File: | net/if_veb.c |
Warning: | line 1060, column 2 Value stored to 'eh' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: if_veb.c,v 1.25 2022/01/04 06:32:39 yasuoka Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2021 David Gwynne <dlg@openbsd.org> |
5 | * |
6 | * Permission to use, copy, modify, and distribute this software for any |
7 | * purpose with or without fee is hereby granted, provided that the above |
8 | * copyright notice and this permission notice appear in all copies. |
9 | * |
10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
16 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
17 | */ |
18 | |
19 | #include "bpfilter.h" |
20 | #include "pf.h" |
21 | #include "vlan.h" |
22 | |
23 | #include <sys/param.h> |
24 | #include <sys/kernel.h> |
25 | #include <sys/malloc.h> |
26 | #include <sys/mbuf.h> |
27 | #include <sys/queue.h> |
28 | #include <sys/socket.h> |
29 | #include <sys/sockio.h> |
30 | #include <sys/systm.h> |
31 | #include <sys/syslog.h> |
32 | #include <sys/rwlock.h> |
33 | #include <sys/percpu.h> |
34 | #include <sys/smr.h> |
35 | #include <sys/task.h> |
36 | #include <sys/pool.h> |
37 | |
38 | #include <net/if.h> |
39 | #include <net/if_dl.h> |
40 | #include <net/if_types.h> |
41 | |
42 | #include <netinet/in.h> |
43 | #include <netinet/ip.h> |
44 | #include <netinet/if_ether.h> |
45 | |
46 | #ifdef INET61 |
47 | #include <netinet6/in6_var.h> |
48 | #include <netinet/ip6.h> |
49 | #include <netinet6/ip6_var.h> |
50 | #endif |
51 | |
52 | #if 0 && defined(IPSEC1) |
53 | /* |
54 | * IPsec handling is disabled in veb until getting and using tdbs is mpsafe. |
55 | */ |
56 | #include <netinet/ip_ipsp.h> |
57 | #include <net/if_enc.h> |
58 | #endif |
59 | |
60 | #include <net/if_bridge.h> |
61 | #include <net/if_etherbridge.h> |
62 | |
63 | #if NBPFILTER1 > 0 |
64 | #include <net/bpf.h> |
65 | #endif |
66 | |
67 | #if NPF1 > 0 |
68 | #include <net/pfvar.h> |
69 | #endif |
70 | |
71 | #if NVLAN1 > 0 |
72 | #include <net/if_vlan_var.h> |
73 | #endif |
74 | |
75 | /* SIOCBRDGIFFLGS, SIOCBRDGIFFLGS */ |
76 | #define VEB_IFBIF_FLAGS(0x0001|0x0002|0x0004) (IFBIF_LEARNING0x0001|IFBIF_DISCOVER0x0002|IFBIF_BLOCKNONIP0x0004) |
77 | |
78 | struct veb_rule { |
79 | TAILQ_ENTRY(veb_rule)struct { struct veb_rule *tqe_next; struct veb_rule **tqe_prev ; } vr_entry; |
80 | SMR_TAILQ_ENTRY(veb_rule)struct { struct veb_rule *smr_tqe_next; struct veb_rule **smr_tqe_prev ; } vr_lentry[2]; |
81 | |
82 | uint16_t vr_flags; |
83 | #define VEB_R_F_IN(1U << 0) (1U << 0) |
84 | #define VEB_R_F_OUT(1U << 1) (1U << 1) |
85 | #define VEB_R_F_SRC(1U << 2) (1U << 2) |
86 | #define VEB_R_F_DST(1U << 3) (1U << 3) |
87 | |
88 | #define VEB_R_F_ARP(1U << 4) (1U << 4) |
89 | #define VEB_R_F_RARP(1U << 5) (1U << 5) |
90 | #define VEB_R_F_SHA(1U << 6) (1U << 6) |
91 | #define VEB_R_F_SPA(1U << 7) (1U << 7) |
92 | #define VEB_R_F_THA(1U << 8) (1U << 8) |
93 | #define VEB_R_F_TPA(1U << 9) (1U << 9) |
94 | uint16_t vr_arp_op; |
95 | |
96 | uint64_t vr_src; |
97 | uint64_t vr_dst; |
98 | struct ether_addr vr_arp_sha; |
99 | struct ether_addr vr_arp_tha; |
100 | struct in_addr vr_arp_spa; |
101 | struct in_addr vr_arp_tpa; |
102 | |
103 | unsigned int vr_action; |
104 | #define VEB_R_MATCH0 0 |
105 | #define VEB_R_PASS1 1 |
106 | #define VEB_R_BLOCK2 2 |
107 | |
108 | int vr_pftag; |
109 | }; |
110 | |
111 | TAILQ_HEAD(veb_rules, veb_rule)struct veb_rules { struct veb_rule *tqh_first; struct veb_rule **tqh_last; }; |
112 | SMR_TAILQ_HEAD(veb_rule_list, veb_rule)struct veb_rule_list { struct veb_rule *smr_tqh_first; struct veb_rule **smr_tqh_last; }; |
113 | |
114 | struct veb_softc; |
115 | |
116 | struct veb_port { |
117 | struct ifnet *p_ifp0; |
118 | struct refcnt p_refs; |
119 | |
120 | int (*p_enqueue)(struct ifnet *, struct mbuf *); |
121 | |
122 | int (*p_ioctl)(struct ifnet *, u_long, caddr_t); |
123 | int (*p_output)(struct ifnet *, struct mbuf *, struct sockaddr *, |
124 | struct rtentry *); |
125 | |
126 | struct task p_ltask; |
127 | struct task p_dtask; |
128 | |
129 | struct veb_softc *p_veb; |
130 | |
131 | struct ether_brport p_brport; |
132 | |
133 | unsigned int p_link_state; |
134 | unsigned int p_bif_flags; |
135 | uint32_t p_protected; |
136 | |
137 | struct veb_rules p_vrl; |
138 | unsigned int p_nvrl; |
139 | struct veb_rule_list p_vr_list[2]; |
140 | #define VEB_RULE_LIST_OUT0 0 |
141 | #define VEB_RULE_LIST_IN1 1 |
142 | |
143 | SMR_TAILQ_ENTRY(veb_port)struct { struct veb_port *smr_tqe_next; struct veb_port **smr_tqe_prev ; } p_entry; |
144 | }; |
145 | |
146 | struct veb_ports { |
147 | SMR_TAILQ_HEAD(, veb_port)struct { struct veb_port *smr_tqh_first; struct veb_port **smr_tqh_last ; } l_list; |
148 | unsigned int l_count; |
149 | }; |
150 | |
151 | struct veb_softc { |
152 | struct ifnet sc_if; |
153 | unsigned int sc_dead; |
154 | |
155 | struct etherbridge sc_eb; |
156 | |
157 | struct rwlock sc_rule_lock; |
158 | struct veb_ports sc_ports; |
159 | struct veb_ports sc_spans; |
160 | }; |
161 | |
162 | #define DPRINTF(_sc, fmt...)do { if ((((_sc)->sc_if.if_flags) & (0x4))) printf(fmt ...); } while (0) do { \ |
163 | if (ISSET((_sc)->sc_if.if_flags, IFF_DEBUG)(((_sc)->sc_if.if_flags) & (0x4))) \ |
164 | printf(fmt); \ |
165 | } while (0) |
166 | |
167 | static int veb_clone_create(struct if_clone *, int); |
168 | static int veb_clone_destroy(struct ifnet *); |
169 | |
170 | static int veb_ioctl(struct ifnet *, u_long, caddr_t); |
171 | static void veb_input(struct ifnet *, struct mbuf *); |
172 | static int veb_enqueue(struct ifnet *, struct mbuf *); |
173 | static int veb_output(struct ifnet *, struct mbuf *, struct sockaddr *, |
174 | struct rtentry *); |
175 | static void veb_start(struct ifqueue *); |
176 | |
177 | static int veb_up(struct veb_softc *); |
178 | static int veb_down(struct veb_softc *); |
179 | static int veb_iff(struct veb_softc *); |
180 | |
181 | static void veb_p_linkch(void *); |
182 | static void veb_p_detach(void *); |
183 | static int veb_p_ioctl(struct ifnet *, u_long, caddr_t); |
184 | static int veb_p_output(struct ifnet *, struct mbuf *, |
185 | struct sockaddr *, struct rtentry *); |
186 | |
187 | static void veb_p_dtor(struct veb_softc *, struct veb_port *, |
188 | const char *); |
189 | static int veb_add_port(struct veb_softc *, |
190 | const struct ifbreq *, unsigned int); |
191 | static int veb_del_port(struct veb_softc *, |
192 | const struct ifbreq *, unsigned int); |
193 | static int veb_port_list(struct veb_softc *, struct ifbifconf *); |
194 | static int veb_port_set_flags(struct veb_softc *, struct ifbreq *); |
195 | static int veb_port_get_flags(struct veb_softc *, struct ifbreq *); |
196 | static int veb_port_set_protected(struct veb_softc *, |
197 | const struct ifbreq *); |
198 | static int veb_add_addr(struct veb_softc *, const struct ifbareq *); |
199 | static int veb_del_addr(struct veb_softc *, const struct ifbareq *); |
200 | |
201 | static int veb_rule_add(struct veb_softc *, const struct ifbrlreq *); |
202 | static int veb_rule_list_flush(struct veb_softc *, |
203 | const struct ifbrlreq *); |
204 | static void veb_rule_list_free(struct veb_rule *); |
205 | static int veb_rule_list_get(struct veb_softc *, struct ifbrlconf *); |
206 | |
207 | static int veb_eb_port_cmp(void *, void *, void *); |
208 | static void *veb_eb_port_take(void *, void *); |
209 | static void veb_eb_port_rele(void *, void *); |
210 | static size_t veb_eb_port_ifname(void *, char *, size_t, void *); |
211 | static void veb_eb_port_sa(void *, struct sockaddr_storage *, void *); |
212 | |
213 | static void veb_eb_brport_take(void *); |
214 | static void veb_eb_brport_rele(void *); |
215 | |
216 | static const struct etherbridge_ops veb_etherbridge_ops = { |
217 | veb_eb_port_cmp, |
218 | veb_eb_port_take, |
219 | veb_eb_port_rele, |
220 | veb_eb_port_ifname, |
221 | veb_eb_port_sa, |
222 | }; |
223 | |
224 | static struct if_clone veb_cloner = |
225 | IF_CLONE_INITIALIZER("veb", veb_clone_create, veb_clone_destroy){ .ifc_list = { ((void *)0), ((void *)0) }, .ifc_name = "veb" , .ifc_namelen = sizeof("veb") - 1, .ifc_create = veb_clone_create , .ifc_destroy = veb_clone_destroy, }; |
226 | |
227 | static struct pool veb_rule_pool; |
228 | |
229 | static int vport_clone_create(struct if_clone *, int); |
230 | static int vport_clone_destroy(struct ifnet *); |
231 | |
232 | struct vport_softc { |
233 | struct arpcom sc_ac; |
234 | unsigned int sc_dead; |
235 | }; |
236 | |
237 | static int vport_if_enqueue(struct ifnet *, struct mbuf *); |
238 | |
239 | static int vport_ioctl(struct ifnet *, u_long, caddr_t); |
240 | static int vport_enqueue(struct ifnet *, struct mbuf *); |
241 | static void vport_start(struct ifqueue *); |
242 | |
243 | static int vport_up(struct vport_softc *); |
244 | static int vport_down(struct vport_softc *); |
245 | static int vport_iff(struct vport_softc *); |
246 | |
247 | static struct if_clone vport_cloner = |
248 | IF_CLONE_INITIALIZER("vport", vport_clone_create, vport_clone_destroy){ .ifc_list = { ((void *)0), ((void *)0) }, .ifc_name = "vport" , .ifc_namelen = sizeof("vport") - 1, .ifc_create = vport_clone_create , .ifc_destroy = vport_clone_destroy, }; |
249 | |
250 | void |
251 | vebattach(int count) |
252 | { |
253 | if_clone_attach(&veb_cloner); |
254 | if_clone_attach(&vport_cloner); |
255 | } |
256 | |
257 | static int |
258 | veb_clone_create(struct if_clone *ifc, int unit) |
259 | { |
260 | struct veb_softc *sc; |
261 | struct ifnet *ifp; |
262 | int error; |
263 | |
264 | if (veb_rule_pool.pr_size == 0) { |
265 | pool_init(&veb_rule_pool, sizeof(struct veb_rule), |
266 | 0, IPL_SOFTNET0x5, 0, "vebrpl", NULL((void *)0)); |
267 | } |
268 | |
269 | sc = malloc(sizeof(*sc), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008|M_CANFAIL0x0004); |
270 | if (sc == NULL((void *)0)) |
271 | return (ENOMEM12); |
272 | |
273 | rw_init(&sc->sc_rule_lock, "vebrlk")_rw_init_flags(&sc->sc_rule_lock, "vebrlk", 0, ((void * )0)); |
274 | SMR_TAILQ_INIT(&sc->sc_ports.l_list)do { (&sc->sc_ports.l_list)->smr_tqh_first = ((void *)0); (&sc->sc_ports.l_list)->smr_tqh_last = & (&sc->sc_ports.l_list)->smr_tqh_first; } while (0); |
275 | SMR_TAILQ_INIT(&sc->sc_spans.l_list)do { (&sc->sc_spans.l_list)->smr_tqh_first = ((void *)0); (&sc->sc_spans.l_list)->smr_tqh_last = & (&sc->sc_spans.l_list)->smr_tqh_first; } while (0); |
276 | |
277 | ifp = &sc->sc_if; |
278 | |
279 | snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d", |
280 | ifc->ifc_name, unit); |
281 | |
282 | error = etherbridge_init(&sc->sc_eb, ifp->if_xname, |
283 | &veb_etherbridge_ops, sc); |
284 | if (error != 0) { |
285 | free(sc, M_DEVBUF2, sizeof(*sc)); |
286 | return (error); |
287 | } |
288 | |
289 | ifp->if_softc = sc; |
290 | ifp->if_typeif_data.ifi_type = IFT_BRIDGE0xd1; |
291 | ifp->if_hdrlenif_data.ifi_hdrlen = ETHER_HDR_LEN((6 * 2) + 2); |
292 | ifp->if_hardmtu = ETHER_MAX_HARDMTU_LEN65435; |
293 | ifp->if_ioctl = veb_ioctl; |
294 | ifp->if_input = veb_input; |
295 | ifp->if_output = veb_output; |
296 | ifp->if_enqueue = veb_enqueue; |
297 | ifp->if_qstart = veb_start; |
298 | ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000; |
299 | ifp->if_xflags = IFXF_CLONED0x2 | IFXF_MPSAFE0x1; |
300 | |
301 | if_counters_alloc(ifp); |
302 | if_attach(ifp); |
303 | |
304 | if_alloc_sadl(ifp); |
305 | |
306 | #if NBPFILTER1 > 0 |
307 | bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB1, ETHER_HDR_LEN((6 * 2) + 2)); |
308 | #endif |
309 | |
310 | return (0); |
311 | } |
312 | |
313 | static int |
314 | veb_clone_destroy(struct ifnet *ifp) |
315 | { |
316 | struct veb_softc *sc = ifp->if_softc; |
317 | struct veb_port *p, *np; |
318 | |
319 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
320 | sc->sc_dead = 1; |
321 | |
322 | if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) |
323 | veb_down(sc); |
324 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
325 | |
326 | if_detach(ifp); |
327 | |
328 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
329 | SMR_TAILQ_FOREACH_SAFE_LOCKED(p, &sc->sc_ports.l_list, p_entry, np)for ((p) = ((&sc->sc_ports.l_list)->smr_tqh_first); (p) && ((np) = ((p)->p_entry.smr_tqe_next), 1); ( p) = (np)) |
330 | veb_p_dtor(sc, p, "destroy"); |
331 | SMR_TAILQ_FOREACH_SAFE_LOCKED(p, &sc->sc_spans.l_list, p_entry, np)for ((p) = ((&sc->sc_spans.l_list)->smr_tqh_first); (p) && ((np) = ((p)->p_entry.smr_tqe_next), 1); ( p) = (np)) |
332 | veb_p_dtor(sc, p, "destroy"); |
333 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
334 | |
335 | etherbridge_destroy(&sc->sc_eb); |
336 | |
337 | free(sc, M_DEVBUF2, sizeof(*sc)); |
338 | |
339 | return (0); |
340 | } |
341 | |
342 | static struct mbuf * |
343 | veb_span_input(struct ifnet *ifp0, struct mbuf *m, uint64_t dst, void *brport) |
344 | { |
345 | m_freem(m); |
346 | return (NULL((void *)0)); |
347 | } |
348 | |
349 | static void |
350 | veb_span(struct veb_softc *sc, struct mbuf *m0) |
351 | { |
352 | struct veb_port *p; |
353 | struct ifnet *ifp0; |
354 | struct mbuf *m; |
355 | |
356 | smr_read_enter(); |
357 | SMR_TAILQ_FOREACH(p, &sc->sc_spans.l_list, p_entry)for((p) = ({ typeof(*&(&sc->sc_spans.l_list)->smr_tqh_first ) __tmp = *(volatile typeof(*&(&sc->sc_spans.l_list )->smr_tqh_first) *)&(*&(&sc->sc_spans.l_list )->smr_tqh_first); membar_datadep_consumer(); __tmp; }); ( p)!= ((void *)0); (p) = ({ typeof(*&(p)->p_entry.smr_tqe_next ) __tmp = *(volatile typeof(*&(p)->p_entry.smr_tqe_next ) *)&(*&(p)->p_entry.smr_tqe_next); membar_datadep_consumer (); __tmp; })) { |
358 | ifp0 = p->p_ifp0; |
359 | if (!ISSET(ifp0->if_flags, IFF_RUNNING)((ifp0->if_flags) & (0x40))) |
360 | continue; |
361 | |
362 | m = m_dup_pkt(m0, max_linkhdr + ETHER_ALIGN2, M_NOWAIT0x0002); |
363 | if (m == NULL((void *)0)) { |
364 | /* XXX count error */ |
365 | continue; |
366 | } |
367 | |
368 | if_enqueue(ifp0, m); /* XXX count error */ |
369 | } |
370 | smr_read_leave(); |
371 | } |
372 | |
373 | static int |
374 | veb_ip_filter(const struct mbuf *m) |
375 | { |
376 | const struct ether_header *eh; |
377 | |
378 | eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data)); |
379 | switch (ntohs(eh->ether_type)(__uint16_t)(__builtin_constant_p(eh->ether_type) ? (__uint16_t )(((__uint16_t)(eh->ether_type) & 0xffU) << 8 | ( (__uint16_t)(eh->ether_type) & 0xff00U) >> 8) : __swap16md (eh->ether_type))) { |
380 | case ETHERTYPE_IP0x0800: |
381 | case ETHERTYPE_ARP0x0806: |
382 | case ETHERTYPE_REVARP0x8035: |
383 | case ETHERTYPE_IPV60x86DD: |
384 | return (0); |
385 | default: |
386 | break; |
387 | } |
388 | |
389 | return (1); |
390 | } |
391 | |
392 | static int |
393 | veb_vlan_filter(const struct mbuf *m) |
394 | { |
395 | const struct ether_header *eh; |
396 | |
397 | eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data)); |
398 | switch (ntohs(eh->ether_type)(__uint16_t)(__builtin_constant_p(eh->ether_type) ? (__uint16_t )(((__uint16_t)(eh->ether_type) & 0xffU) << 8 | ( (__uint16_t)(eh->ether_type) & 0xff00U) >> 8) : __swap16md (eh->ether_type))) { |
399 | case ETHERTYPE_VLAN0x8100: |
400 | case ETHERTYPE_QINQ0x88A8: |
401 | return (1); |
402 | default: |
403 | break; |
404 | } |
405 | |
406 | return (0); |
407 | } |
408 | |
409 | static int |
410 | veb_rule_arp_match(const struct veb_rule *vr, struct mbuf *m) |
411 | { |
412 | struct ether_header *eh; |
413 | struct ether_arp ea; |
414 | |
415 | eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data)); |
416 | |
417 | if (eh->ether_type != htons(ETHERTYPE_ARP)(__uint16_t)(__builtin_constant_p(0x0806) ? (__uint16_t)(((__uint16_t )(0x0806) & 0xffU) << 8 | ((__uint16_t)(0x0806) & 0xff00U) >> 8) : __swap16md(0x0806))) |
418 | return (0); |
419 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < sizeof(*eh) + sizeof(ea)) |
420 | return (0); |
421 | |
422 | m_copydata(m, sizeof(*eh), sizeof(ea), (caddr_t)&ea); |
423 | |
424 | if (ea.arp_hrdea_hdr.ar_hrd != htons(ARPHRD_ETHER)(__uint16_t)(__builtin_constant_p(1) ? (__uint16_t)(((__uint16_t )(1) & 0xffU) << 8 | ((__uint16_t)(1) & 0xff00U ) >> 8) : __swap16md(1)) || |
425 | ea.arp_proea_hdr.ar_pro != htons(ETHERTYPE_IP)(__uint16_t)(__builtin_constant_p(0x0800) ? (__uint16_t)(((__uint16_t )(0x0800) & 0xffU) << 8 | ((__uint16_t)(0x0800) & 0xff00U) >> 8) : __swap16md(0x0800)) || |
426 | ea.arp_hlnea_hdr.ar_hln != ETHER_ADDR_LEN6 || |
427 | ea.arp_plnea_hdr.ar_pln != sizeof(struct in_addr)) |
428 | return (0); |
429 | |
430 | if (ISSET(vr->vr_flags, VEB_R_F_ARP)((vr->vr_flags) & ((1U << 4)))) { |
431 | if (ea.arp_opea_hdr.ar_op != htons(ARPOP_REQUEST)(__uint16_t)(__builtin_constant_p(1) ? (__uint16_t)(((__uint16_t )(1) & 0xffU) << 8 | ((__uint16_t)(1) & 0xff00U ) >> 8) : __swap16md(1)) && |
432 | ea.arp_opea_hdr.ar_op != htons(ARPOP_REPLY)(__uint16_t)(__builtin_constant_p(2) ? (__uint16_t)(((__uint16_t )(2) & 0xffU) << 8 | ((__uint16_t)(2) & 0xff00U ) >> 8) : __swap16md(2))) |
433 | return (0); |
434 | } |
435 | if (ISSET(vr->vr_flags, VEB_R_F_RARP)((vr->vr_flags) & ((1U << 5)))) { |
436 | if (ea.arp_opea_hdr.ar_op != htons(ARPOP_REVREQUEST)(__uint16_t)(__builtin_constant_p(3) ? (__uint16_t)(((__uint16_t )(3) & 0xffU) << 8 | ((__uint16_t)(3) & 0xff00U ) >> 8) : __swap16md(3)) && |
437 | ea.arp_opea_hdr.ar_op != htons(ARPOP_REVREPLY)(__uint16_t)(__builtin_constant_p(4) ? (__uint16_t)(((__uint16_t )(4) & 0xffU) << 8 | ((__uint16_t)(4) & 0xff00U ) >> 8) : __swap16md(4))) |
438 | return (0); |
439 | } |
440 | |
441 | if (vr->vr_arp_op != htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)) && vr->vr_arp_op != ea.arp_opea_hdr.ar_op) |
442 | return (0); |
443 | |
444 | if (ISSET(vr->vr_flags, VEB_R_F_SHA)((vr->vr_flags) & ((1U << 6))) && |
445 | !ETHER_IS_EQ(&vr->vr_arp_sha, ea.arp_sha)(__builtin_memcmp(((&vr->vr_arp_sha)), ((ea.arp_sha)), (6)) == 0)) |
446 | return (0); |
447 | if (ISSET(vr->vr_flags, VEB_R_F_THA)((vr->vr_flags) & ((1U << 8))) && |
448 | !ETHER_IS_EQ(&vr->vr_arp_tha, ea.arp_tha)(__builtin_memcmp(((&vr->vr_arp_tha)), ((ea.arp_tha)), (6)) == 0)) |
449 | return (0); |
450 | if (ISSET(vr->vr_flags, VEB_R_F_SPA)((vr->vr_flags) & ((1U << 7))) && |
451 | memcmp(&vr->vr_arp_spa, ea.arp_spa, sizeof(vr->vr_arp_spa))__builtin_memcmp((&vr->vr_arp_spa), (ea.arp_spa), (sizeof (vr->vr_arp_spa))) != 0) |
452 | return (0); |
453 | if (ISSET(vr->vr_flags, VEB_R_F_TPA)((vr->vr_flags) & ((1U << 9))) && |
454 | memcmp(&vr->vr_arp_tpa, ea.arp_tpa, sizeof(vr->vr_arp_tpa))__builtin_memcmp((&vr->vr_arp_tpa), (ea.arp_tpa), (sizeof (vr->vr_arp_tpa))) != 0) |
455 | return (0); |
456 | |
457 | return (1); |
458 | } |
459 | |
460 | static int |
461 | veb_rule_list_test(struct veb_rule *vr, int dir, struct mbuf *m, |
462 | uint64_t src, uint64_t dst) |
463 | { |
464 | SMR_ASSERT_CRITICAL()do { if (panicstr == ((void *)0) && !db_active) ((({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_schedstate.spc_smrdepth > 0) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/net/if_veb.c", 464, "curcpu()->ci_schedstate.spc_smrdepth > 0" )); } while (0); |
465 | |
466 | do { |
467 | if (ISSET(vr->vr_flags, VEB_R_F_ARP|VEB_R_F_RARP)((vr->vr_flags) & ((1U << 4)|(1U << 5))) && |
468 | !veb_rule_arp_match(vr, m)) |
469 | continue; |
470 | |
471 | if (ISSET(vr->vr_flags, VEB_R_F_SRC)((vr->vr_flags) & ((1U << 2))) && |
472 | vr->vr_src != src) |
473 | continue; |
474 | if (ISSET(vr->vr_flags, VEB_R_F_DST)((vr->vr_flags) & ((1U << 3))) && |
475 | vr->vr_dst != dst) |
476 | continue; |
477 | |
478 | if (vr->vr_action == VEB_R_BLOCK2) |
479 | return (VEB_R_BLOCK2); |
480 | #if NPF1 > 0 |
481 | pf_tag_packet(m, vr->vr_pftag, -1); |
482 | #endif |
483 | if (vr->vr_action == VEB_R_PASS1) |
484 | return (VEB_R_PASS1); |
485 | } while ((vr = SMR_TAILQ_NEXT(vr, vr_lentry[dir])({ typeof(*&(vr)->vr_lentry[dir].smr_tqe_next) __tmp = *(volatile typeof(*&(vr)->vr_lentry[dir].smr_tqe_next ) *)&(*&(vr)->vr_lentry[dir].smr_tqe_next); membar_datadep_consumer (); __tmp; })) != NULL((void *)0)); |
486 | |
487 | return (VEB_R_PASS1); |
488 | } |
489 | |
490 | static inline int |
491 | veb_rule_filter(struct veb_port *p, int dir, struct mbuf *m, |
492 | uint64_t src, uint64_t dst) |
493 | { |
494 | struct veb_rule *vr; |
495 | int filter = VEB_R_PASS1; |
496 | |
497 | smr_read_enter(); |
498 | vr = SMR_TAILQ_FIRST(&p->p_vr_list[dir])({ typeof(*&(&p->p_vr_list[dir])->smr_tqh_first ) __tmp = *(volatile typeof(*&(&p->p_vr_list[dir]) ->smr_tqh_first) *)&(*&(&p->p_vr_list[dir]) ->smr_tqh_first); membar_datadep_consumer(); __tmp; }); |
499 | if (vr != NULL((void *)0)) |
500 | filter = veb_rule_list_test(vr, dir, m, src, dst); |
501 | smr_read_leave(); |
502 | |
503 | return (filter == VEB_R_BLOCK2); |
504 | } |
505 | |
506 | #if NPF1 > 0 |
507 | struct veb_pf_ip_family { |
508 | sa_family_t af; |
509 | struct mbuf *(*ip_check)(struct ifnet *, struct mbuf *); |
510 | void (*ip_input)(struct ifnet *, struct mbuf *); |
511 | }; |
512 | |
513 | static const struct veb_pf_ip_family veb_pf_ipv4 = { |
514 | .af = AF_INET2, |
515 | .ip_check = ipv4_check, |
516 | .ip_input = ipv4_input, |
517 | }; |
518 | |
519 | #ifdef INET61 |
520 | static const struct veb_pf_ip_family veb_pf_ipv6 = { |
521 | .af = AF_INET624, |
522 | .ip_check = ipv6_check, |
523 | .ip_input = ipv6_input, |
524 | }; |
525 | #endif |
526 | |
527 | static struct mbuf * |
528 | veb_pf(struct ifnet *ifp0, int dir, struct mbuf *m) |
529 | { |
530 | struct ether_header *eh, copy; |
531 | const struct veb_pf_ip_family *fam; |
532 | |
533 | /* |
534 | * pf runs on vport interfaces when they enter or leave the |
535 | * l3 stack, so don't confuse things (even more) by running |
536 | * pf again here. note that because of this exception the |
537 | * pf direction on vport interfaces is reversed compared to |
538 | * other veb ports. |
539 | */ |
540 | if (ifp0->if_enqueue == vport_enqueue) |
541 | return (m); |
542 | |
543 | eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data)); |
544 | switch (ntohs(eh->ether_type)(__uint16_t)(__builtin_constant_p(eh->ether_type) ? (__uint16_t )(((__uint16_t)(eh->ether_type) & 0xffU) << 8 | ( (__uint16_t)(eh->ether_type) & 0xff00U) >> 8) : __swap16md (eh->ether_type))) { |
545 | case ETHERTYPE_IP0x0800: |
546 | fam = &veb_pf_ipv4; |
547 | break; |
548 | #ifdef INET61 |
549 | case ETHERTYPE_IPV60x86DD: |
550 | fam = &veb_pf_ipv6; |
551 | break; |
552 | #endif |
553 | default: |
554 | return (m); |
555 | } |
556 | |
557 | copy = *eh; |
558 | m_adj(m, sizeof(*eh)); |
559 | |
560 | if (dir == PF_IN) { |
561 | m = (*fam->ip_check)(ifp0, m); |
562 | if (m == NULL((void *)0)) |
563 | return (NULL((void *)0)); |
564 | } |
565 | |
566 | if (pf_test(fam->af, dir, ifp0, &m) != PF_PASS) { |
567 | m_freem(m); |
568 | return (NULL((void *)0)); |
569 | } |
570 | if (m == NULL((void *)0)) |
571 | return (NULL((void *)0)); |
572 | |
573 | if (dir == PF_IN && ISSET(m->m_pkthdr.pf.flags, PF_TAG_DIVERTED)((m->M_dat.MH.MH_pkthdr.pf.flags) & (0x08))) { |
574 | pf_mbuf_unlink_state_key(m); |
575 | pf_mbuf_unlink_inpcb(m); |
576 | (*fam->ip_input)(ifp0, m); |
577 | return (NULL((void *)0)); |
578 | } |
579 | |
580 | m = m_prepend(m, sizeof(*eh), M_DONTWAIT0x0002); |
581 | if (m == NULL((void *)0)) |
582 | return (NULL((void *)0)); |
583 | |
584 | /* checksum? */ |
585 | |
586 | eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data)); |
587 | *eh = copy; |
588 | |
589 | return (m); |
590 | } |
591 | #endif /* NPF > 0 */ |
592 | |
593 | #if 0 && defined(IPSEC1) |
594 | static struct mbuf * |
595 | veb_ipsec_proto_in(struct ifnet *ifp0, struct mbuf *m, int iphlen, |
596 | /* const */ union sockaddr_union *dst, int poff) |
597 | { |
598 | struct tdb *tdb; |
599 | uint16_t cpi; |
600 | uint32_t spi; |
601 | uint8_t proto; |
602 | |
603 | /* ipsec_common_input checks for 8 bytes of input, so we do too */ |
604 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < iphlen + 2 * sizeof(u_int32_t)) |
605 | return (m); /* decline */ |
606 | |
607 | proto = *(mtod(m, uint8_t *)((uint8_t *)((m)->m_hdr.mh_data)) + poff); |
608 | /* i'm not a huge fan of how these headers get picked at */ |
609 | switch (proto) { |
610 | case IPPROTO_ESP50: |
611 | m_copydata(m, iphlen, sizeof(spi), &spi); |
612 | break; |
613 | case IPPROTO_AH51: |
614 | m_copydata(m, iphlen + sizeof(uint32_t), sizeof(spi), &spi); |
615 | break; |
616 | case IPPROTO_IPCOMP108: |
617 | m_copydata(m, iphlen + sizeof(uint16_t), sizeof(cpi), &cpi); |
618 | spi = htonl(ntohs(cpi))(__uint32_t)(__builtin_constant_p((__uint16_t)(__builtin_constant_p (cpi) ? (__uint16_t)(((__uint16_t)(cpi) & 0xffU) << 8 | ((__uint16_t)(cpi) & 0xff00U) >> 8) : __swap16md (cpi))) ? (__uint32_t)(((__uint32_t)((__uint16_t)(__builtin_constant_p (cpi) ? (__uint16_t)(((__uint16_t)(cpi) & 0xffU) << 8 | ((__uint16_t)(cpi) & 0xff00U) >> 8) : __swap16md (cpi))) & 0xff) << 24 | ((__uint32_t)((__uint16_t)( __builtin_constant_p(cpi) ? (__uint16_t)(((__uint16_t)(cpi) & 0xffU) << 8 | ((__uint16_t)(cpi) & 0xff00U) >> 8) : __swap16md(cpi))) & 0xff00) << 8 | ((__uint32_t )((__uint16_t)(__builtin_constant_p(cpi) ? (__uint16_t)(((__uint16_t )(cpi) & 0xffU) << 8 | ((__uint16_t)(cpi) & 0xff00U ) >> 8) : __swap16md(cpi))) & 0xff0000) >> 8 | ((__uint32_t)((__uint16_t)(__builtin_constant_p(cpi) ? (__uint16_t )(((__uint16_t)(cpi) & 0xffU) << 8 | ((__uint16_t)( cpi) & 0xff00U) >> 8) : __swap16md(cpi))) & 0xff000000 ) >> 24) : __swap32md((__uint16_t)(__builtin_constant_p (cpi) ? (__uint16_t)(((__uint16_t)(cpi) & 0xffU) << 8 | ((__uint16_t)(cpi) & 0xff00U) >> 8) : __swap16md (cpi)))); |
619 | break; |
620 | default: |
621 | return (m); /* decline */ |
622 | } |
623 | |
624 | tdb = gettdb(m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid, spi, dst, proto); |
625 | if (tdb != NULL((void *)0) && !ISSET(tdb->tdb_flags, TDBF_INVALID)((tdb->tdb_flags) & (TDBF_INVALID)) && |
626 | tdb->tdb_xform != NULL((void *)0)) { |
627 | if (tdb->tdb_first_use == 0) { |
628 | tdb->tdb_first_use = gettime(); |
629 | if (ISSET(tdb->tdb_flags, TDBF_FIRSTUSE)((tdb->tdb_flags) & (TDBF_FIRSTUSE))) { |
630 | timeout_add_sec(&tdb->tdb_first_tmo, |
631 | tdb->tdb_exp_first_use); |
632 | } |
633 | if (ISSET(tdb->tdb_flags, TDBF_SOFT_FIRSTUSE)((tdb->tdb_flags) & (TDBF_SOFT_FIRSTUSE))) { |
634 | timeout_add_sec(&tdb->tdb_sfirst_tmo, |
635 | tdb->tdb_soft_first_use); |
636 | } |
637 | } |
638 | |
639 | (*(tdb->tdb_xform->xf_input))(m, tdb, iphlen, poff); |
640 | return (NULL((void *)0)); |
641 | } |
642 | |
643 | return (m); |
644 | } |
645 | |
646 | static struct mbuf * |
647 | veb_ipsec_ipv4_in(struct ifnet *ifp0, struct mbuf *m) |
648 | { |
649 | union sockaddr_union su = { |
650 | .sin.sin_len = sizeof(su.sin), |
651 | .sin.sin_family = AF_INET2, |
652 | }; |
653 | struct ip *ip; |
654 | int iphlen; |
655 | |
656 | if (m->m_lenm_hdr.mh_len < sizeof(*ip)) { |
657 | m = m_pullup(m, sizeof(*ip)); |
658 | if (m == NULL((void *)0)) |
659 | return (NULL((void *)0)); |
660 | } |
661 | |
662 | ip = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data)); |
663 | iphlen = ip->ip_hl << 2; |
664 | if (iphlen < sizeof(*ip)) { |
665 | /* this is a weird packet, decline */ |
666 | return (m); |
667 | } |
668 | |
669 | su.sin.sin_addr = ip->ip_dst; |
670 | |
671 | return (veb_ipsec_proto_in(ifp0, m, iphlen, &su, |
672 | offsetof(struct ip, ip_p)__builtin_offsetof(struct ip, ip_p))); |
673 | } |
674 | |
675 | #ifdef INET61 |
676 | static struct mbuf * |
677 | veb_ipsec_ipv6_in(struct ifnet *ifp0, struct mbuf *m) |
678 | { |
679 | union sockaddr_union su = { |
680 | .sin6.sin6_len = sizeof(su.sin6), |
681 | .sin6.sin6_family = AF_INET624, |
682 | }; |
683 | struct ip6_hdr *ip6; |
684 | |
685 | if (m->m_lenm_hdr.mh_len < sizeof(*ip6)) { |
686 | m = m_pullup(m, sizeof(*ip6)); |
687 | if (m == NULL((void *)0)) |
688 | return (NULL((void *)0)); |
689 | } |
690 | |
691 | ip6 = mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data)); |
692 | |
693 | su.sin6.sin6_addr = ip6->ip6_dst; |
694 | |
695 | /* XXX scope? */ |
696 | |
697 | return (veb_ipsec_proto_in(ifp0, m, sizeof(*ip6), &su, |
698 | offsetof(struct ip6_hdr, ip6_nxt)__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_nxt ))); |
699 | } |
700 | #endif /* INET6 */ |
701 | |
702 | static struct mbuf * |
703 | veb_ipsec_in(struct ifnet *ifp0, struct mbuf *m) |
704 | { |
705 | struct mbuf *(*ipsec_ip_in)(struct ifnet *, struct mbuf *); |
706 | struct ether_header *eh, copy; |
707 | |
708 | if (ifp0->if_enqueue == vport_enqueue) |
709 | return (m); |
710 | |
711 | eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data)); |
712 | switch (ntohs(eh->ether_type)(__uint16_t)(__builtin_constant_p(eh->ether_type) ? (__uint16_t )(((__uint16_t)(eh->ether_type) & 0xffU) << 8 | ( (__uint16_t)(eh->ether_type) & 0xff00U) >> 8) : __swap16md (eh->ether_type))) { |
713 | case ETHERTYPE_IP0x0800: |
714 | ipsec_ip_in = veb_ipsec_ipv4_in; |
715 | break; |
716 | #ifdef INET61 |
717 | case ETHERTYPE_IPV60x86DD: |
718 | ipsec_ip_in = veb_ipsec_ipv6_in; |
719 | break; |
720 | #endif /* INET6 */ |
721 | default: |
722 | return (m); |
723 | } |
724 | |
725 | copy = *eh; |
726 | m_adj(m, sizeof(*eh)); |
727 | |
728 | m = (*ipsec_ip_in)(ifp0, m); |
729 | if (m == NULL((void *)0)) |
730 | return (NULL((void *)0)); |
731 | |
732 | m = m_prepend(m, sizeof(*eh), M_DONTWAIT0x0002); |
733 | if (m == NULL((void *)0)) |
734 | return (NULL((void *)0)); |
735 | |
736 | eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data)); |
737 | *eh = copy; |
738 | |
739 | return (m); |
740 | } |
741 | |
742 | static struct mbuf * |
743 | veb_ipsec_proto_out(struct mbuf *m, sa_family_t af, int iphlen) |
744 | { |
745 | struct tdb *tdb; |
746 | int error; |
747 | #if NPF1 > 0 |
748 | struct ifnet *encifp; |
749 | #endif |
750 | |
751 | tdb = ipsp_spd_lookup(m, af, iphlen, &error, IPSP_DIRECTION_OUT, |
752 | NULL((void *)0), NULL((void *)0), NULL((void *)0)); |
753 | if (tdb == NULL((void *)0)) |
754 | return (m); |
755 | |
756 | #if NPF1 > 0 |
757 | encifp = enc_getif(tdb->tdb_rdomain, tdb->tdb_tap); |
758 | if (encifp != NULL((void *)0)) { |
759 | if (pf_test(af, PF_OUT, encifp, &m) != PF_PASS) { |
760 | m_freem(m); |
761 | return (NULL((void *)0)); |
762 | } |
763 | if (m == NULL((void *)0)) |
764 | return (NULL((void *)0)); |
765 | } |
766 | #endif /* NPF > 0 */ |
767 | |
768 | /* XXX mtu checks */ |
769 | |
770 | (void)ipsp_process_packet(m, tdb, af, 0); |
771 | return (NULL((void *)0)); |
772 | } |
773 | |
774 | static struct mbuf * |
775 | veb_ipsec_ipv4_out(struct mbuf *m) |
776 | { |
777 | struct ip *ip; |
778 | int iphlen; |
779 | |
780 | if (m->m_lenm_hdr.mh_len < sizeof(*ip)) { |
781 | m = m_pullup(m, sizeof(*ip)); |
782 | if (m == NULL((void *)0)) |
783 | return (NULL((void *)0)); |
784 | } |
785 | |
786 | ip = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data)); |
787 | iphlen = ip->ip_hl << 2; |
788 | if (iphlen < sizeof(*ip)) { |
789 | /* this is a weird packet, decline */ |
790 | return (m); |
791 | } |
792 | |
793 | return (veb_ipsec_proto_out(m, AF_INET2, iphlen)); |
794 | } |
795 | |
796 | #ifdef INET61 |
797 | static struct mbuf * |
798 | veb_ipsec_ipv6_out(struct mbuf *m) |
799 | { |
800 | return (veb_ipsec_proto_out(m, AF_INET624, sizeof(struct ip6_hdr))); |
801 | } |
802 | #endif /* INET6 */ |
803 | |
804 | static struct mbuf * |
805 | veb_ipsec_out(struct ifnet *ifp0, struct mbuf *m) |
806 | { |
807 | struct mbuf *(*ipsec_ip_out)(struct mbuf *); |
808 | struct ether_header *eh, copy; |
809 | |
810 | if (ifp0->if_enqueue == vport_enqueue) |
811 | return (m); |
812 | |
813 | eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data)); |
814 | switch (ntohs(eh->ether_type)(__uint16_t)(__builtin_constant_p(eh->ether_type) ? (__uint16_t )(((__uint16_t)(eh->ether_type) & 0xffU) << 8 | ( (__uint16_t)(eh->ether_type) & 0xff00U) >> 8) : __swap16md (eh->ether_type))) { |
815 | case ETHERTYPE_IP0x0800: |
816 | ipsec_ip_out = veb_ipsec_ipv4_out; |
817 | break; |
818 | #ifdef INET61 |
819 | case ETHERTYPE_IPV60x86DD: |
820 | ipsec_ip_out = veb_ipsec_ipv6_out; |
821 | break; |
822 | #endif /* INET6 */ |
823 | default: |
824 | return (m); |
825 | } |
826 | |
827 | copy = *eh; |
828 | m_adj(m, sizeof(*eh)); |
829 | |
830 | m = (*ipsec_ip_out)(m); |
831 | if (m == NULL((void *)0)) |
832 | return (NULL((void *)0)); |
833 | |
834 | m = m_prepend(m, sizeof(*eh), M_DONTWAIT0x0002); |
835 | if (m == NULL((void *)0)) |
836 | return (NULL((void *)0)); |
837 | |
838 | eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data)); |
839 | *eh = copy; |
840 | |
841 | return (m); |
842 | } |
843 | #endif /* IPSEC */ |
844 | |
845 | static void |
846 | veb_broadcast(struct veb_softc *sc, struct veb_port *rp, struct mbuf *m0, |
847 | uint64_t src, uint64_t dst) |
848 | { |
849 | struct ifnet *ifp = &sc->sc_if; |
850 | struct veb_port *tp; |
851 | struct ifnet *ifp0; |
852 | struct mbuf *m; |
853 | |
854 | #if NPF1 > 0 |
855 | /* |
856 | * we couldn't find a specific port to send this packet to, |
857 | * but pf should still have a chance to apply policy to it. |
858 | * let pf look at it, but use the veb interface as a proxy. |
859 | */ |
860 | if (ISSET(ifp->if_flags, IFF_LINK1)((ifp->if_flags) & (0x2000)) && |
861 | (m = veb_pf(ifp, PF_OUT, m0)) == NULL((void *)0)) |
862 | return; |
863 | #endif |
864 | |
865 | #if 0 && defined(IPSEC1) |
866 | /* same goes for ipsec */ |
867 | if (ISSET(ifp->if_flags, IFF_LINK2)((ifp->if_flags) & (0x4000)) && |
868 | (m = veb_ipsec_out(ifp, m0)) == NULL((void *)0)) |
869 | return; |
870 | #endif |
871 | |
872 | counters_pkt(ifp->if_counters, ifc_opackets, ifc_obytes, |
873 | m0->m_pkthdrM_dat.MH.MH_pkthdr.len); |
874 | |
875 | smr_read_enter(); |
876 | SMR_TAILQ_FOREACH(tp, &sc->sc_ports.l_list, p_entry)for((tp) = ({ typeof(*&(&sc->sc_ports.l_list)-> smr_tqh_first) __tmp = *(volatile typeof(*&(&sc->sc_ports .l_list)->smr_tqh_first) *)&(*&(&sc->sc_ports .l_list)->smr_tqh_first); membar_datadep_consumer(); __tmp ; }); (tp)!= ((void *)0); (tp) = ({ typeof(*&(tp)->p_entry .smr_tqe_next) __tmp = *(volatile typeof(*&(tp)->p_entry .smr_tqe_next) *)&(*&(tp)->p_entry.smr_tqe_next); membar_datadep_consumer (); __tmp; })) { |
877 | if (rp == tp || (rp->p_protected & tp->p_protected)) { |
878 | /* |
879 | * don't let Ethernet packets hairpin or |
880 | * move between ports in the same protected |
881 | * domain(s). |
882 | */ |
883 | continue; |
884 | } |
885 | |
886 | ifp0 = tp->p_ifp0; |
887 | if (!ISSET(ifp0->if_flags, IFF_RUNNING)((ifp0->if_flags) & (0x40))) { |
888 | /* don't waste time */ |
889 | continue; |
890 | } |
891 | |
892 | if (!ISSET(tp->p_bif_flags, IFBIF_DISCOVER)((tp->p_bif_flags) & (0x0002)) && |
893 | !ISSET(m0->m_flags, M_BCAST | M_MCAST)((m0->m_hdr.mh_flags) & (0x0100 | 0x0200))) { |
894 | /* don't flood unknown unicast */ |
895 | continue; |
896 | } |
897 | |
898 | if (veb_rule_filter(tp, VEB_RULE_LIST_OUT0, m0, src, dst)) |
899 | continue; |
900 | |
901 | m = m_dup_pkt(m0, max_linkhdr + ETHER_ALIGN2, M_NOWAIT0x0002); |
902 | if (m == NULL((void *)0)) { |
903 | /* XXX count error? */ |
904 | continue; |
905 | } |
906 | |
907 | (*tp->p_enqueue)(ifp0, m); /* XXX count error */ |
908 | } |
909 | smr_read_leave(); |
910 | |
911 | m_freem(m0); |
912 | } |
913 | |
914 | static struct mbuf * |
915 | veb_transmit(struct veb_softc *sc, struct veb_port *rp, struct veb_port *tp, |
916 | struct mbuf *m, uint64_t src, uint64_t dst) |
917 | { |
918 | struct ifnet *ifp = &sc->sc_if; |
919 | struct ifnet *ifp0; |
920 | |
921 | if (tp == NULL((void *)0)) |
922 | return (m); |
923 | |
924 | if (rp == tp || (rp->p_protected & tp->p_protected)) { |
925 | /* |
926 | * don't let Ethernet packets hairpin or move between |
927 | * ports in the same protected domain(s). |
928 | */ |
929 | goto drop; |
930 | } |
931 | |
932 | if (veb_rule_filter(tp, VEB_RULE_LIST_OUT0, m, src, dst)) |
933 | goto drop; |
934 | |
935 | ifp0 = tp->p_ifp0; |
936 | |
937 | #if 0 && defined(IPSEC1) |
938 | if (ISSET(ifp->if_flags, IFF_LINK2)((ifp->if_flags) & (0x4000)) && |
939 | (m = veb_ipsec_out(ifp0, m0)) == NULL((void *)0)) |
940 | return; |
941 | #endif |
942 | |
943 | #if NPF1 > 0 |
944 | if (ISSET(ifp->if_flags, IFF_LINK1)((ifp->if_flags) & (0x2000)) && |
945 | (m = veb_pf(ifp0, PF_OUT, m)) == NULL((void *)0)) |
946 | return (NULL((void *)0)); |
947 | #endif |
948 | |
949 | counters_pkt(ifp->if_counters, ifc_opackets, ifc_obytes, |
950 | m->m_pkthdrM_dat.MH.MH_pkthdr.len); |
951 | |
952 | (*tp->p_enqueue)(ifp0, m); /* XXX count error */ |
953 | |
954 | return (NULL((void *)0)); |
955 | drop: |
956 | m_freem(m); |
957 | return (NULL((void *)0)); |
958 | } |
959 | |
960 | static struct mbuf * |
961 | veb_vport_input(struct ifnet *ifp0, struct mbuf *m, uint64_t dst, void *brport) |
962 | { |
963 | return (m); |
964 | } |
965 | |
966 | static struct mbuf * |
967 | veb_port_input(struct ifnet *ifp0, struct mbuf *m, uint64_t dst, void *brport) |
968 | { |
969 | struct veb_port *p = brport; |
970 | struct veb_softc *sc = p->p_veb; |
971 | struct ifnet *ifp = &sc->sc_if; |
972 | struct ether_header *eh; |
973 | uint64_t src; |
974 | #if NBPFILTER1 > 0 |
975 | caddr_t if_bpf; |
976 | #endif |
977 | |
978 | if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) |
979 | return (m); |
980 | |
981 | eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data)); |
982 | src = ether_addr_to_e64((struct ether_addr *)eh->ether_shost); |
983 | |
984 | /* Is this a MAC Bridge component Reserved address? */ |
985 | if (ETH64_IS_8021_RSVD(dst)(((dst) & 0xfffffffffff0ULL) == 0x0180c2000000ULL)) { |
986 | if (!ISSET(ifp->if_flags, IFF_LINK0)((ifp->if_flags) & (0x1000))) { |
987 | /* |
988 | * letting vlans through implies this is |
989 | * an s-vlan component. |
990 | */ |
991 | goto drop; |
992 | } |
993 | |
994 | /* look at the last nibble of the 802.1 reserved address */ |
995 | switch (dst & 0xf) { |
996 | case 0x0: /* Nearest Customer Bridge Group Address */ |
997 | case 0xb: /* EDE-SS PEP (IEEE Std 802.1AEcg) */ |
998 | case 0xc: /* reserved */ |
999 | case 0xd: /* Provider Bridge MVRP Address */ |
1000 | case 0xf: /* reserved */ |
1001 | break; |
1002 | default: |
1003 | goto drop; |
1004 | } |
1005 | } |
1006 | |
1007 | #if NVLAN1 > 0 |
1008 | /* |
1009 | * If the underlying interface removed the VLAN header itself, |
1010 | * add it back. |
1011 | */ |
1012 | if (ISSET(m->m_flags, M_VLANTAG)((m->m_hdr.mh_flags) & (0x0020))) { |
1013 | m = vlan_inject(m, ETHERTYPE_VLAN0x8100, m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag); |
1014 | if (m == NULL((void *)0)) { |
1015 | counters_inc(ifp->if_counters, ifc_ierrors); |
1016 | goto drop; |
1017 | } |
1018 | } |
1019 | #endif |
1020 | |
1021 | counters_pkt(ifp->if_counters, ifc_ipackets, ifc_ibytes, |
1022 | m->m_pkthdrM_dat.MH.MH_pkthdr.len); |
1023 | |
1024 | /* force packets into the one routing domain for pf */ |
1025 | m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = ifp->if_rdomainif_data.ifi_rdomain; |
1026 | |
1027 | #if NBPFILTER1 > 0 |
1028 | if_bpf = READ_ONCE(ifp->if_bpf)({ typeof(ifp->if_bpf) __tmp = *(volatile typeof(ifp->if_bpf ) *)&(ifp->if_bpf); membar_datadep_consumer(); __tmp; } ); |
1029 | if (if_bpf != NULL((void *)0)) { |
1030 | if (bpf_mtap_ether(if_bpf, m, 0) != 0) |
1031 | goto drop; |
1032 | } |
1033 | #endif |
1034 | |
1035 | veb_span(sc, m); |
1036 | |
1037 | if (ISSET(p->p_bif_flags, IFBIF_BLOCKNONIP)((p->p_bif_flags) & (0x0004)) && |
1038 | veb_ip_filter(m)) |
1039 | goto drop; |
1040 | |
1041 | if (!ISSET(ifp->if_flags, IFF_LINK0)((ifp->if_flags) & (0x1000)) && |
1042 | veb_vlan_filter(m)) |
1043 | goto drop; |
1044 | |
1045 | if (veb_rule_filter(p, VEB_RULE_LIST_IN1, m, src, dst)) |
1046 | goto drop; |
1047 | |
1048 | #if NPF1 > 0 |
1049 | if (ISSET(ifp->if_flags, IFF_LINK1)((ifp->if_flags) & (0x2000)) && |
1050 | (m = veb_pf(ifp0, PF_IN, m)) == NULL((void *)0)) |
1051 | return (NULL((void *)0)); |
1052 | #endif |
1053 | |
1054 | #if 0 && defined(IPSEC1) |
1055 | if (ISSET(ifp->if_flags, IFF_LINK2)((ifp->if_flags) & (0x4000)) && |
1056 | (m = veb_ipsec_in(ifp0, m)) == NULL((void *)0)) |
1057 | return (NULL((void *)0)); |
1058 | #endif |
1059 | |
1060 | eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data)); |
Value stored to 'eh' is never read | |
1061 | |
1062 | if (ISSET(p->p_bif_flags, IFBIF_LEARNING)((p->p_bif_flags) & (0x0001))) |
1063 | etherbridge_map(&sc->sc_eb, p, src); |
1064 | |
1065 | CLR(m->m_flags, M_BCAST|M_MCAST)((m->m_hdr.mh_flags) &= ~(0x0100|0x0200)); |
1066 | |
1067 | if (!ETH64_IS_MULTICAST(dst)((dst) & 0x010000000000ULL)) { |
1068 | struct veb_port *tp = NULL((void *)0); |
1069 | |
1070 | smr_read_enter(); |
1071 | tp = etherbridge_resolve(&sc->sc_eb, dst); |
1072 | if (tp != NULL((void *)0)) |
1073 | veb_eb_port_take(NULL((void *)0), tp); |
1074 | smr_read_leave(); |
1075 | if (tp != NULL((void *)0)) { |
1076 | m = veb_transmit(sc, p, tp, m, src, dst); |
1077 | veb_eb_port_rele(NULL((void *)0), tp); |
1078 | } |
1079 | |
1080 | if (m == NULL((void *)0)) |
1081 | return (NULL((void *)0)); |
1082 | |
1083 | /* unknown unicast address */ |
1084 | } else { |
1085 | SET(m->m_flags, ETH64_IS_BROADCAST(dst) ? M_BCAST : M_MCAST)((m->m_hdr.mh_flags) |= (((dst) == 0xffffffffffffULL) ? 0x0100 : 0x0200)); |
1086 | } |
1087 | |
1088 | veb_broadcast(sc, p, m, src, dst); |
1089 | return (NULL((void *)0)); |
1090 | |
1091 | drop: |
1092 | m_freem(m); |
1093 | return (NULL((void *)0)); |
1094 | } |
1095 | |
1096 | static void |
1097 | veb_input(struct ifnet *ifp, struct mbuf *m) |
1098 | { |
1099 | m_freem(m); |
1100 | } |
1101 | |
1102 | static int |
1103 | veb_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, |
1104 | struct rtentry *rt) |
1105 | { |
1106 | m_freem(m); |
1107 | return (ENODEV19); |
1108 | } |
1109 | |
1110 | static int |
1111 | veb_enqueue(struct ifnet *ifp, struct mbuf *m) |
1112 | { |
1113 | m_freem(m); |
1114 | return (ENODEV19); |
1115 | } |
1116 | |
1117 | static void |
1118 | veb_start(struct ifqueue *ifq) |
1119 | { |
1120 | ifq_purge(ifq); |
1121 | } |
1122 | |
1123 | static int |
1124 | veb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) |
1125 | { |
1126 | struct veb_softc *sc = ifp->if_softc; |
1127 | struct ifbrparam *bparam = (struct ifbrparam *)data; |
1128 | int error = 0; |
1129 | |
1130 | if (sc->sc_dead) |
1131 | return (ENXIO6); |
1132 | |
1133 | switch (cmd) { |
1134 | case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))): |
1135 | if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) { |
1136 | if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) |
1137 | error = veb_up(sc); |
1138 | } else { |
1139 | if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) |
1140 | error = veb_down(sc); |
1141 | } |
1142 | break; |
1143 | |
1144 | case SIOCBRDGADD((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((60))): |
1145 | error = suser(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
1146 | if (error != 0) |
1147 | break; |
1148 | |
1149 | error = veb_add_port(sc, (struct ifbreq *)data, 0); |
1150 | break; |
1151 | case SIOCBRDGADDS((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((65))): |
1152 | error = suser(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
1153 | if (error != 0) |
1154 | break; |
1155 | |
1156 | error = veb_add_port(sc, (struct ifbreq *)data, 1); |
1157 | break; |
1158 | case SIOCBRDGDEL((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((61))): |
1159 | error = suser(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
1160 | if (error != 0) |
1161 | break; |
1162 | |
1163 | error = veb_del_port(sc, (struct ifbreq *)data, 0); |
1164 | break; |
1165 | case SIOCBRDGDELS((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((66))): |
1166 | error = suser(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
1167 | if (error != 0) |
1168 | break; |
1169 | |
1170 | error = veb_del_port(sc, (struct ifbreq *)data, 1); |
1171 | break; |
1172 | |
1173 | case SIOCBRDGSCACHE((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) & 0x1fff) << 16) | ((('i')) << 8) | ((64))): |
1174 | error = suser(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
1175 | if (error != 0) |
1176 | break; |
1177 | |
1178 | error = etherbridge_set_max(&sc->sc_eb, bparam); |
1179 | break; |
1180 | case SIOCBRDGGCACHE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifbrparam) & 0x1fff) << 16) | ((('i')) << 8) | ((65))): |
1181 | error = etherbridge_get_max(&sc->sc_eb, bparam); |
1182 | break; |
1183 | |
1184 | case SIOCBRDGSTO((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) & 0x1fff) << 16) | ((('i')) << 8) | ((69))): |
1185 | error = suser(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
1186 | if (error != 0) |
1187 | break; |
1188 | |
1189 | error = etherbridge_set_tmo(&sc->sc_eb, bparam); |
1190 | break; |
1191 | case SIOCBRDGGTO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifbrparam) & 0x1fff) << 16) | ((('i')) << 8) | ((70))): |
1192 | error = etherbridge_get_tmo(&sc->sc_eb, bparam); |
1193 | break; |
1194 | |
1195 | case SIOCBRDGRTS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifbaconf) & 0x1fff) << 16) | ((('i')) << 8) | ((67))): |
1196 | error = etherbridge_rtfind(&sc->sc_eb, (struct ifbaconf *)data); |
1197 | break; |
1198 | case SIOCBRDGIFS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifbreq) & 0x1fff) << 16) | ((('i')) << 8) | ((66))): |
1199 | error = veb_port_list(sc, (struct ifbifconf *)data); |
1200 | break; |
1201 | case SIOCBRDGFLUSH((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((72))): |
1202 | etherbridge_flush(&sc->sc_eb, |
1203 | ((struct ifbreq *)data)->ifbr_ifsflags); |
1204 | break; |
1205 | case SIOCBRDGSADDR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifbareq) & 0x1fff) << 16) | ((('i')) << 8) | ((68))): |
1206 | error = veb_add_addr(sc, (struct ifbareq *)data); |
1207 | break; |
1208 | case SIOCBRDGDADDR((unsigned long)0x80000000 | ((sizeof(struct ifbareq) & 0x1fff ) << 16) | ((('i')) << 8) | ((71))): |
1209 | error = veb_del_addr(sc, (struct ifbareq *)data); |
1210 | break; |
1211 | |
1212 | case SIOCBRDGSIFPROT((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((74))): |
1213 | error = veb_port_set_protected(sc, (struct ifbreq *)data); |
1214 | break; |
1215 | |
1216 | case SIOCBRDGSIFFLGS((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((63))): |
1217 | error = veb_port_set_flags(sc, (struct ifbreq *)data); |
1218 | break; |
1219 | case SIOCBRDGGIFFLGS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifbreq) & 0x1fff) << 16) | ((('i')) << 8) | ((62))): |
1220 | error = veb_port_get_flags(sc, (struct ifbreq *)data); |
1221 | break; |
1222 | |
1223 | case SIOCBRDGARL((unsigned long)0x80000000 | ((sizeof(struct ifbrlreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((77))): |
1224 | error = veb_rule_add(sc, (struct ifbrlreq *)data); |
1225 | break; |
1226 | case SIOCBRDGFRL((unsigned long)0x80000000 | ((sizeof(struct ifbrlreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((78))): |
1227 | error = veb_rule_list_flush(sc, (struct ifbrlreq *)data); |
1228 | break; |
1229 | case SIOCBRDGGRL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifbrlconf) & 0x1fff) << 16) | ((('i')) << 8) | ((79))): |
1230 | error = veb_rule_list_get(sc, (struct ifbrlconf *)data); |
1231 | break; |
1232 | |
1233 | default: |
1234 | error = ENOTTY25; |
1235 | break; |
1236 | } |
1237 | |
1238 | if (error == ENETRESET52) |
1239 | error = veb_iff(sc); |
1240 | |
1241 | return (error); |
1242 | } |
1243 | |
1244 | static int |
1245 | veb_add_port(struct veb_softc *sc, const struct ifbreq *req, unsigned int span) |
1246 | { |
1247 | struct ifnet *ifp = &sc->sc_if; |
1248 | struct ifnet *ifp0; |
1249 | struct veb_ports *port_list; |
1250 | struct veb_port *p; |
1251 | int isvport; |
1252 | int error; |
1253 | |
1254 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
1255 | |
1256 | ifp0 = if_unit(req->ifbr_ifsname); |
1257 | if (ifp0 == NULL((void *)0)) |
1258 | return (EINVAL22); |
1259 | |
1260 | if (ifp0->if_typeif_data.ifi_type != IFT_ETHER0x06) { |
1261 | error = EPROTONOSUPPORT43; |
1262 | goto put; |
1263 | } |
1264 | |
1265 | if (ifp0 == ifp) { |
1266 | error = EPROTONOSUPPORT43; |
1267 | goto put; |
1268 | } |
1269 | |
1270 | isvport = (ifp0->if_enqueue == vport_enqueue); |
1271 | |
1272 | error = ether_brport_isset(ifp0); |
1273 | if (error != 0) |
1274 | goto put; |
1275 | |
1276 | /* let's try */ |
1277 | |
1278 | p = malloc(sizeof(*p), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008|M_CANFAIL0x0004); |
1279 | if (p == NULL((void *)0)) { |
1280 | error = ENOMEM12; |
1281 | goto put; |
1282 | } |
1283 | |
1284 | p->p_ifp0 = ifp0; |
1285 | p->p_veb = sc; |
1286 | |
1287 | refcnt_init(&p->p_refs); |
1288 | TAILQ_INIT(&p->p_vrl)do { (&p->p_vrl)->tqh_first = ((void *)0); (&p-> p_vrl)->tqh_last = &(&p->p_vrl)->tqh_first; } while (0); |
1289 | SMR_TAILQ_INIT(&p->p_vr_list[0])do { (&p->p_vr_list[0])->smr_tqh_first = ((void *)0 ); (&p->p_vr_list[0])->smr_tqh_last = &(&p-> p_vr_list[0])->smr_tqh_first; } while (0); |
1290 | SMR_TAILQ_INIT(&p->p_vr_list[1])do { (&p->p_vr_list[1])->smr_tqh_first = ((void *)0 ); (&p->p_vr_list[1])->smr_tqh_last = &(&p-> p_vr_list[1])->smr_tqh_first; } while (0); |
1291 | |
1292 | p->p_enqueue = isvport ? vport_if_enqueue : if_enqueue; |
1293 | p->p_ioctl = ifp0->if_ioctl; |
1294 | p->p_output = ifp0->if_output; |
1295 | |
1296 | if (span) { |
1297 | port_list = &sc->sc_spans; |
1298 | |
1299 | if (isvport) { |
1300 | error = EPROTONOSUPPORT43; |
1301 | goto free; |
1302 | } |
1303 | |
1304 | p->p_brport.eb_input = veb_span_input; |
1305 | p->p_bif_flags = IFBIF_SPAN0x0100; |
1306 | } else { |
1307 | port_list = &sc->sc_ports; |
1308 | |
1309 | error = ifpromisc(ifp0, 1); |
1310 | if (error != 0) |
1311 | goto free; |
1312 | |
1313 | p->p_bif_flags = IFBIF_LEARNING0x0001 | IFBIF_DISCOVER0x0002; |
1314 | p->p_brport.eb_input = isvport ? |
1315 | veb_vport_input : veb_port_input; |
1316 | } |
1317 | |
1318 | p->p_brport.eb_port_take = veb_eb_brport_take; |
1319 | p->p_brport.eb_port_rele = veb_eb_brport_rele; |
1320 | |
1321 | /* this might have changed if we slept for malloc or ifpromisc */ |
1322 | error = ether_brport_isset(ifp0); |
1323 | if (error != 0) |
1324 | goto unpromisc; |
1325 | |
1326 | task_set(&p->p_ltask, veb_p_linkch, p); |
1327 | if_linkstatehook_add(ifp0, &p->p_ltask); |
1328 | |
1329 | task_set(&p->p_dtask, veb_p_detach, p); |
1330 | if_detachhook_add(ifp0, &p->p_dtask); |
1331 | |
1332 | p->p_brport.eb_port = p; |
1333 | |
1334 | /* commit */ |
1335 | SMR_TAILQ_INSERT_TAIL_LOCKED(&port_list->l_list, p, p_entry)do { (p)->p_entry.smr_tqe_next = ((void *)0); (p)->p_entry .smr_tqe_prev = (&port_list->l_list)->smr_tqh_last; do { __asm volatile("" ::: "memory"); } while (0); *(&port_list ->l_list)->smr_tqh_last = (p); (&port_list->l_list )->smr_tqh_last = &(p)->p_entry.smr_tqe_next; } while (0); |
1336 | port_list->l_count++; |
1337 | |
1338 | ether_brport_set(ifp0, &p->p_brport); |
1339 | if (!isvport) { /* vport is special */ |
1340 | ifp0->if_ioctl = veb_p_ioctl; |
1341 | ifp0->if_output = veb_p_output; |
1342 | } |
1343 | |
1344 | veb_p_linkch(p); |
1345 | |
1346 | return (0); |
1347 | |
1348 | unpromisc: |
1349 | if (!span) |
1350 | ifpromisc(ifp0, 0); |
1351 | free: |
1352 | free(p, M_DEVBUF2, sizeof(*p)); |
1353 | put: |
1354 | if_put(ifp0); |
1355 | return (error); |
1356 | } |
1357 | |
1358 | static struct veb_port * |
1359 | veb_trunkport(struct veb_softc *sc, const char *name, unsigned int span) |
1360 | { |
1361 | struct veb_ports *port_list; |
1362 | struct veb_port *p; |
1363 | |
1364 | port_list = span ? &sc->sc_spans : &sc->sc_ports; |
1365 | |
1366 | SMR_TAILQ_FOREACH_LOCKED(p, &port_list->l_list, p_entry)for((p) = ((&port_list->l_list)->smr_tqh_first); (p )!= ((void *)0); (p) = ((p)->p_entry.smr_tqe_next)) { |
1367 | if (strcmp(p->p_ifp0->if_xname, name) == 0) |
1368 | return (p); |
1369 | } |
1370 | |
1371 | return (NULL((void *)0)); |
1372 | } |
1373 | |
1374 | static int |
1375 | veb_del_port(struct veb_softc *sc, const struct ifbreq *req, unsigned int span) |
1376 | { |
1377 | struct veb_port *p; |
1378 | |
1379 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
1380 | p = veb_trunkport(sc, req->ifbr_ifsname, span); |
1381 | if (p == NULL((void *)0)) |
1382 | return (EINVAL22); |
1383 | |
1384 | veb_p_dtor(sc, p, "del"); |
1385 | |
1386 | return (0); |
1387 | } |
1388 | |
1389 | static struct veb_port * |
1390 | veb_port_get(struct veb_softc *sc, const char *name) |
1391 | { |
1392 | struct veb_port *p; |
1393 | |
1394 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
1395 | |
1396 | SMR_TAILQ_FOREACH_LOCKED(p, &sc->sc_ports.l_list, p_entry)for((p) = ((&sc->sc_ports.l_list)->smr_tqh_first); ( p)!= ((void *)0); (p) = ((p)->p_entry.smr_tqe_next)) { |
1397 | struct ifnet *ifp0 = p->p_ifp0; |
1398 | if (strncmp(ifp0->if_xname, name, |
1399 | sizeof(ifp0->if_xname)) == 0) { |
1400 | refcnt_take(&p->p_refs); |
1401 | break; |
1402 | } |
1403 | } |
1404 | |
1405 | return (p); |
1406 | } |
1407 | |
1408 | static void |
1409 | veb_port_put(struct veb_softc *sc, struct veb_port *p) |
1410 | { |
1411 | refcnt_rele_wake(&p->p_refs); |
1412 | } |
1413 | |
1414 | static int |
1415 | veb_port_set_protected(struct veb_softc *sc, const struct ifbreq *ifbr) |
1416 | { |
1417 | struct veb_port *p; |
1418 | |
1419 | p = veb_port_get(sc, ifbr->ifbr_ifsname); |
1420 | if (p == NULL((void *)0)) |
1421 | return (ESRCH3); |
1422 | |
1423 | p->p_protected = ifbr->ifbr_protected; |
1424 | veb_port_put(sc, p); |
1425 | |
1426 | return (0); |
1427 | } |
1428 | |
1429 | static int |
1430 | veb_rule_add(struct veb_softc *sc, const struct ifbrlreq *ifbr) |
1431 | { |
1432 | const struct ifbrarpf *brla = &ifbr->ifbr_arpf; |
1433 | struct veb_rule vr, *vrp; |
1434 | struct veb_port *p; |
1435 | int error; |
1436 | |
1437 | memset(&vr, 0, sizeof(vr))__builtin_memset((&vr), (0), (sizeof(vr))); |
1438 | |
1439 | switch (ifbr->ifbr_action) { |
1440 | case BRL_ACTION_BLOCK0x01: |
1441 | vr.vr_action = VEB_R_BLOCK2; |
1442 | break; |
1443 | case BRL_ACTION_PASS0x02: |
1444 | vr.vr_action = VEB_R_PASS1; |
1445 | break; |
1446 | /* XXX VEB_R_MATCH */ |
1447 | default: |
1448 | return (EINVAL22); |
1449 | } |
1450 | |
1451 | if (!ISSET(ifbr->ifbr_flags, BRL_FLAG_IN|BRL_FLAG_OUT)((ifbr->ifbr_flags) & (0x08|0x04))) |
1452 | return (EINVAL22); |
1453 | if (ISSET(ifbr->ifbr_flags, BRL_FLAG_IN)((ifbr->ifbr_flags) & (0x08))) |
1454 | SET(vr.vr_flags, VEB_R_F_IN)((vr.vr_flags) |= ((1U << 0))); |
1455 | if (ISSET(ifbr->ifbr_flags, BRL_FLAG_OUT)((ifbr->ifbr_flags) & (0x04))) |
1456 | SET(vr.vr_flags, VEB_R_F_OUT)((vr.vr_flags) |= ((1U << 1))); |
1457 | |
1458 | if (ISSET(ifbr->ifbr_flags, BRL_FLAG_SRCVALID)((ifbr->ifbr_flags) & (0x02))) { |
1459 | SET(vr.vr_flags, VEB_R_F_SRC)((vr.vr_flags) |= ((1U << 2))); |
1460 | vr.vr_src = ether_addr_to_e64(&ifbr->ifbr_src); |
1461 | } |
1462 | if (ISSET(ifbr->ifbr_flags, BRL_FLAG_DSTVALID)((ifbr->ifbr_flags) & (0x01))) { |
1463 | SET(vr.vr_flags, VEB_R_F_DST)((vr.vr_flags) |= ((1U << 3))); |
1464 | vr.vr_dst = ether_addr_to_e64(&ifbr->ifbr_dst); |
1465 | } |
1466 | |
1467 | /* ARP rule */ |
1468 | if (ISSET(brla->brla_flags, BRLA_ARP|BRLA_RARP)((brla->brla_flags) & (0x01|0x02))) { |
1469 | if (ISSET(brla->brla_flags, BRLA_ARP)((brla->brla_flags) & (0x01))) |
1470 | SET(vr.vr_flags, VEB_R_F_ARP)((vr.vr_flags) |= ((1U << 4))); |
1471 | if (ISSET(brla->brla_flags, BRLA_RARP)((brla->brla_flags) & (0x02))) |
1472 | SET(vr.vr_flags, VEB_R_F_RARP)((vr.vr_flags) |= ((1U << 5))); |
1473 | |
1474 | if (ISSET(brla->brla_flags, BRLA_SHA)((brla->brla_flags) & (0x10))) { |
1475 | SET(vr.vr_flags, VEB_R_F_SHA)((vr.vr_flags) |= ((1U << 6))); |
1476 | vr.vr_arp_sha = brla->brla_sha; |
1477 | } |
1478 | if (ISSET(brla->brla_flags, BRLA_THA)((brla->brla_flags) & (0x40))) { |
1479 | SET(vr.vr_flags, VEB_R_F_THA)((vr.vr_flags) |= ((1U << 8))); |
1480 | vr.vr_arp_tha = brla->brla_tha; |
1481 | } |
1482 | if (ISSET(brla->brla_flags, BRLA_SPA)((brla->brla_flags) & (0x20))) { |
1483 | SET(vr.vr_flags, VEB_R_F_SPA)((vr.vr_flags) |= ((1U << 7))); |
1484 | vr.vr_arp_spa = brla->brla_spa; |
1485 | } |
1486 | if (ISSET(brla->brla_flags, BRLA_TPA)((brla->brla_flags) & (0x80))) { |
1487 | SET(vr.vr_flags, VEB_R_F_TPA)((vr.vr_flags) |= ((1U << 9))); |
1488 | vr.vr_arp_tpa = brla->brla_tpa; |
1489 | } |
1490 | vr.vr_arp_op = htons(brla->brla_op)(__uint16_t)(__builtin_constant_p(brla->brla_op) ? (__uint16_t )(((__uint16_t)(brla->brla_op) & 0xffU) << 8 | ( (__uint16_t)(brla->brla_op) & 0xff00U) >> 8) : __swap16md (brla->brla_op)); |
1491 | } |
1492 | |
1493 | if (ifbr->ifbr_tagname[0] != '\0') { |
1494 | #if NPF1 > 0 |
1495 | vr.vr_pftag = pf_tagname2tag((char *)ifbr->ifbr_tagname, 1); |
1496 | if (vr.vr_pftag == 0) |
1497 | return (ENOMEM12); |
1498 | #else |
1499 | return (EINVAL22); |
1500 | #endif |
1501 | } |
1502 | |
1503 | p = veb_port_get(sc, ifbr->ifbr_ifsname); |
1504 | if (p == NULL((void *)0)) { |
1505 | error = ESRCH3; |
1506 | goto error; |
1507 | } |
1508 | |
1509 | vrp = pool_get(&veb_rule_pool, PR_WAITOK0x0001|PR_LIMITFAIL0x0004|PR_ZERO0x0008); |
1510 | if (vrp == NULL((void *)0)) { |
1511 | error = ENOMEM12; |
1512 | goto port_put; |
1513 | } |
1514 | |
1515 | *vrp = vr; |
1516 | |
1517 | /* there's one big lock on a veb for all ports */ |
1518 | error = rw_enter(&sc->sc_rule_lock, RW_WRITE0x0001UL|RW_INTR0x0010UL); |
1519 | if (error != 0) |
1520 | goto rule_put; |
1521 | |
1522 | TAILQ_INSERT_TAIL(&p->p_vrl, vrp, vr_entry)do { (vrp)->vr_entry.tqe_next = ((void *)0); (vrp)->vr_entry .tqe_prev = (&p->p_vrl)->tqh_last; *(&p->p_vrl )->tqh_last = (vrp); (&p->p_vrl)->tqh_last = & (vrp)->vr_entry.tqe_next; } while (0); |
1523 | p->p_nvrl++; |
1524 | if (ISSET(vr.vr_flags, VEB_R_F_OUT)((vr.vr_flags) & ((1U << 1)))) { |
1525 | SMR_TAILQ_INSERT_TAIL_LOCKED(&p->p_vr_list[0],do { (vrp)->vr_lentry[0].smr_tqe_next = ((void *)0); (vrp) ->vr_lentry[0].smr_tqe_prev = (&p->p_vr_list[0])-> smr_tqh_last; do { __asm volatile("" ::: "memory"); } while ( 0); *(&p->p_vr_list[0])->smr_tqh_last = (vrp); (& p->p_vr_list[0])->smr_tqh_last = &(vrp)->vr_lentry [0].smr_tqe_next; } while (0) |
1526 | vrp, vr_lentry[0])do { (vrp)->vr_lentry[0].smr_tqe_next = ((void *)0); (vrp) ->vr_lentry[0].smr_tqe_prev = (&p->p_vr_list[0])-> smr_tqh_last; do { __asm volatile("" ::: "memory"); } while ( 0); *(&p->p_vr_list[0])->smr_tqh_last = (vrp); (& p->p_vr_list[0])->smr_tqh_last = &(vrp)->vr_lentry [0].smr_tqe_next; } while (0); |
1527 | } |
1528 | if (ISSET(vr.vr_flags, VEB_R_F_IN)((vr.vr_flags) & ((1U << 0)))) { |
1529 | SMR_TAILQ_INSERT_TAIL_LOCKED(&p->p_vr_list[1],do { (vrp)->vr_lentry[1].smr_tqe_next = ((void *)0); (vrp) ->vr_lentry[1].smr_tqe_prev = (&p->p_vr_list[1])-> smr_tqh_last; do { __asm volatile("" ::: "memory"); } while ( 0); *(&p->p_vr_list[1])->smr_tqh_last = (vrp); (& p->p_vr_list[1])->smr_tqh_last = &(vrp)->vr_lentry [1].smr_tqe_next; } while (0) |
1530 | vrp, vr_lentry[1])do { (vrp)->vr_lentry[1].smr_tqe_next = ((void *)0); (vrp) ->vr_lentry[1].smr_tqe_prev = (&p->p_vr_list[1])-> smr_tqh_last; do { __asm volatile("" ::: "memory"); } while ( 0); *(&p->p_vr_list[1])->smr_tqh_last = (vrp); (& p->p_vr_list[1])->smr_tqh_last = &(vrp)->vr_lentry [1].smr_tqe_next; } while (0); |
1531 | } |
1532 | |
1533 | rw_exit(&sc->sc_rule_lock); |
1534 | veb_port_put(sc, p); |
1535 | |
1536 | return (0); |
1537 | |
1538 | rule_put: |
1539 | pool_put(&veb_rule_pool, vrp); |
1540 | port_put: |
1541 | veb_port_put(sc, p); |
1542 | error: |
1543 | #if NPF1 > 0 |
1544 | pf_tag_unref(vr.vr_pftag); |
1545 | #endif |
1546 | return (error); |
1547 | } |
1548 | |
1549 | static void |
1550 | veb_rule_list_free(struct veb_rule *nvr) |
1551 | { |
1552 | struct veb_rule *vr; |
1553 | |
1554 | while ((vr = nvr) != NULL((void *)0)) { |
1555 | nvr = TAILQ_NEXT(vr, vr_entry)((vr)->vr_entry.tqe_next); |
1556 | pool_put(&veb_rule_pool, vr); |
1557 | } |
1558 | } |
1559 | |
1560 | static int |
1561 | veb_rule_list_flush(struct veb_softc *sc, const struct ifbrlreq *ifbr) |
1562 | { |
1563 | struct veb_port *p; |
1564 | struct veb_rule *vr; |
1565 | int error; |
1566 | |
1567 | p = veb_port_get(sc, ifbr->ifbr_ifsname); |
1568 | if (p == NULL((void *)0)) |
1569 | return (ESRCH3); |
1570 | |
1571 | error = rw_enter(&sc->sc_rule_lock, RW_WRITE0x0001UL|RW_INTR0x0010UL); |
1572 | if (error != 0) { |
1573 | veb_port_put(sc, p); |
1574 | return (error); |
1575 | } |
1576 | |
1577 | /* take all the rules away */ |
1578 | vr = TAILQ_FIRST(&p->p_vrl)((&p->p_vrl)->tqh_first); |
1579 | |
1580 | /* reset the lists and counts of rules */ |
1581 | TAILQ_INIT(&p->p_vrl)do { (&p->p_vrl)->tqh_first = ((void *)0); (&p-> p_vrl)->tqh_last = &(&p->p_vrl)->tqh_first; } while (0); |
1582 | p->p_nvrl = 0; |
1583 | SMR_TAILQ_INIT(&p->p_vr_list[0])do { (&p->p_vr_list[0])->smr_tqh_first = ((void *)0 ); (&p->p_vr_list[0])->smr_tqh_last = &(&p-> p_vr_list[0])->smr_tqh_first; } while (0); |
1584 | SMR_TAILQ_INIT(&p->p_vr_list[1])do { (&p->p_vr_list[1])->smr_tqh_first = ((void *)0 ); (&p->p_vr_list[1])->smr_tqh_last = &(&p-> p_vr_list[1])->smr_tqh_first; } while (0); |
1585 | |
1586 | rw_exit(&sc->sc_rule_lock); |
1587 | veb_port_put(sc, p); |
1588 | |
1589 | smr_barrier()smr_barrier_impl(0); |
1590 | veb_rule_list_free(vr); |
1591 | |
1592 | return (0); |
1593 | } |
1594 | |
1595 | static void |
1596 | veb_rule2ifbr(struct ifbrlreq *ifbr, const struct veb_rule *vr) |
1597 | { |
1598 | switch (vr->vr_action) { |
1599 | case VEB_R_PASS1: |
1600 | ifbr->ifbr_action = BRL_ACTION_PASS0x02; |
1601 | break; |
1602 | case VEB_R_BLOCK2: |
1603 | ifbr->ifbr_action = BRL_ACTION_BLOCK0x01; |
1604 | break; |
1605 | } |
1606 | |
1607 | if (ISSET(vr->vr_flags, VEB_R_F_IN)((vr->vr_flags) & ((1U << 0)))) |
1608 | SET(ifbr->ifbr_flags, BRL_FLAG_IN)((ifbr->ifbr_flags) |= (0x08)); |
1609 | if (ISSET(vr->vr_flags, VEB_R_F_OUT)((vr->vr_flags) & ((1U << 1)))) |
1610 | SET(ifbr->ifbr_flags, BRL_FLAG_OUT)((ifbr->ifbr_flags) |= (0x04)); |
1611 | |
1612 | if (ISSET(vr->vr_flags, VEB_R_F_SRC)((vr->vr_flags) & ((1U << 2)))) { |
1613 | SET(ifbr->ifbr_flags, BRL_FLAG_SRCVALID)((ifbr->ifbr_flags) |= (0x02)); |
1614 | ether_e64_to_addr(&ifbr->ifbr_src, vr->vr_src); |
1615 | } |
1616 | if (ISSET(vr->vr_flags, VEB_R_F_DST)((vr->vr_flags) & ((1U << 3)))) { |
1617 | SET(ifbr->ifbr_flags, BRL_FLAG_DSTVALID)((ifbr->ifbr_flags) |= (0x01)); |
1618 | ether_e64_to_addr(&ifbr->ifbr_dst, vr->vr_dst); |
1619 | } |
1620 | |
1621 | /* ARP rule */ |
1622 | if (ISSET(vr->vr_flags, VEB_R_F_ARP|VEB_R_F_RARP)((vr->vr_flags) & ((1U << 4)|(1U << 5)))) { |
1623 | struct ifbrarpf *brla = &ifbr->ifbr_arpf; |
1624 | |
1625 | if (ISSET(vr->vr_flags, VEB_R_F_ARP)((vr->vr_flags) & ((1U << 4)))) |
1626 | SET(brla->brla_flags, BRLA_ARP)((brla->brla_flags) |= (0x01)); |
1627 | if (ISSET(vr->vr_flags, VEB_R_F_RARP)((vr->vr_flags) & ((1U << 5)))) |
1628 | SET(brla->brla_flags, BRLA_RARP)((brla->brla_flags) |= (0x02)); |
1629 | |
1630 | if (ISSET(vr->vr_flags, VEB_R_F_SHA)((vr->vr_flags) & ((1U << 6)))) { |
1631 | SET(brla->brla_flags, BRLA_SHA)((brla->brla_flags) |= (0x10)); |
1632 | brla->brla_sha = vr->vr_arp_sha; |
1633 | } |
1634 | if (ISSET(vr->vr_flags, VEB_R_F_THA)((vr->vr_flags) & ((1U << 8)))) { |
1635 | SET(brla->brla_flags, BRLA_THA)((brla->brla_flags) |= (0x40)); |
1636 | brla->brla_tha = vr->vr_arp_tha; |
1637 | } |
1638 | |
1639 | if (ISSET(vr->vr_flags, VEB_R_F_SPA)((vr->vr_flags) & ((1U << 7)))) { |
1640 | SET(brla->brla_flags, BRLA_SPA)((brla->brla_flags) |= (0x20)); |
1641 | brla->brla_spa = vr->vr_arp_spa; |
1642 | } |
1643 | if (ISSET(vr->vr_flags, VEB_R_F_TPA)((vr->vr_flags) & ((1U << 9)))) { |
1644 | SET(brla->brla_flags, BRLA_TPA)((brla->brla_flags) |= (0x80)); |
1645 | brla->brla_tpa = vr->vr_arp_tpa; |
1646 | } |
1647 | |
1648 | brla->brla_op = ntohs(vr->vr_arp_op)(__uint16_t)(__builtin_constant_p(vr->vr_arp_op) ? (__uint16_t )(((__uint16_t)(vr->vr_arp_op) & 0xffU) << 8 | ( (__uint16_t)(vr->vr_arp_op) & 0xff00U) >> 8) : __swap16md (vr->vr_arp_op)); |
1649 | } |
1650 | |
1651 | #if NPF1 > 0 |
1652 | if (vr->vr_pftag != 0) |
1653 | pf_tag2tagname(vr->vr_pftag, ifbr->ifbr_tagname); |
1654 | #endif |
1655 | } |
1656 | |
1657 | static int |
1658 | veb_rule_list_get(struct veb_softc *sc, struct ifbrlconf *ifbrl) |
1659 | { |
1660 | struct veb_port *p; |
1661 | struct veb_rule *vr; |
1662 | struct ifbrlreq *ifbr, *ifbrs; |
1663 | int error = 0; |
1664 | size_t len; |
1665 | |
1666 | p = veb_port_get(sc, ifbrl->ifbrl_ifsname); |
1667 | if (p == NULL((void *)0)) |
1668 | return (ESRCH3); |
1669 | |
1670 | len = p->p_nvrl; /* estimate */ |
1671 | if (ifbrl->ifbrl_len == 0 || len == 0) { |
1672 | ifbrl->ifbrl_len = len * sizeof(*ifbrs); |
1673 | goto port_put; |
1674 | } |
1675 | |
1676 | error = rw_enter(&sc->sc_rule_lock, RW_READ0x0002UL|RW_INTR0x0010UL); |
1677 | if (error != 0) |
1678 | goto port_put; |
1679 | |
1680 | ifbrs = mallocarray(p->p_nvrl, sizeof(*ifbrs), M_TEMP127, |
1681 | M_WAITOK0x0001|M_CANFAIL0x0004|M_ZERO0x0008); |
1682 | if (ifbrs == NULL((void *)0)) { |
1683 | rw_exit(&sc->sc_rule_lock); |
1684 | goto port_put; |
1685 | } |
1686 | len = p->p_nvrl * sizeof(*ifbrs); |
1687 | |
1688 | ifbr = ifbrs; |
1689 | TAILQ_FOREACH(vr, &p->p_vrl, vr_entry)for((vr) = ((&p->p_vrl)->tqh_first); (vr) != ((void *)0); (vr) = ((vr)->vr_entry.tqe_next)) { |
1690 | strlcpy(ifbr->ifbr_name, sc->sc_if.if_xname, |
1691 | sizeof(ifbr->ifbr_name)); |
1692 | strlcpy(ifbr->ifbr_ifsname, p->p_ifp0->if_xname, |
1693 | sizeof(ifbr->ifbr_ifsname)); |
1694 | veb_rule2ifbr(ifbr, vr); |
1695 | |
1696 | ifbr++; |
1697 | } |
1698 | |
1699 | rw_exit(&sc->sc_rule_lock); |
1700 | |
1701 | error = copyout(ifbrs, ifbrl->ifbrl_bufifbrl_ifbrlu.ifbrlu_buf, min(len, ifbrl->ifbrl_len)); |
1702 | if (error == 0) |
1703 | ifbrl->ifbrl_len = len; |
1704 | free(ifbrs, M_TEMP127, len); |
1705 | |
1706 | port_put: |
1707 | veb_port_put(sc, p); |
1708 | return (error); |
1709 | } |
1710 | |
1711 | static int |
1712 | veb_port_list(struct veb_softc *sc, struct ifbifconf *bifc) |
1713 | { |
1714 | struct ifnet *ifp = &sc->sc_if; |
1715 | struct veb_port *p; |
1716 | struct ifnet *ifp0; |
1717 | struct ifbreq breq; |
1718 | int n = 0, error = 0; |
1719 | |
1720 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
1721 | |
1722 | if (bifc->ifbic_len == 0) { |
1723 | n = sc->sc_ports.l_count + sc->sc_spans.l_count; |
1724 | goto done; |
1725 | } |
1726 | |
1727 | SMR_TAILQ_FOREACH_LOCKED(p, &sc->sc_ports.l_list, p_entry)for((p) = ((&sc->sc_ports.l_list)->smr_tqh_first); ( p)!= ((void *)0); (p) = ((p)->p_entry.smr_tqe_next)) { |
1728 | if (bifc->ifbic_len < sizeof(breq)) |
1729 | break; |
1730 | |
1731 | memset(&breq, 0, sizeof(breq))__builtin_memset((&breq), (0), (sizeof(breq))); |
1732 | |
1733 | ifp0 = p->p_ifp0; |
1734 | |
1735 | strlcpy(breq.ifbr_name, ifp->if_xname, IFNAMSIZ16); |
1736 | strlcpy(breq.ifbr_ifsname, ifp0->if_xname, IFNAMSIZ16); |
1737 | |
1738 | breq.ifbr_ifsflags = p->p_bif_flags; |
1739 | breq.ifbr_portno = ifp0->if_index; |
1740 | breq.ifbr_protected = p->p_protected; |
1741 | if ((error = copyout(&breq, bifc->ifbic_reqifbic_ifbicu.ifbicu_req + n, |
1742 | sizeof(breq))) != 0) |
1743 | goto done; |
1744 | |
1745 | bifc->ifbic_len -= sizeof(breq); |
1746 | n++; |
1747 | } |
1748 | |
1749 | SMR_TAILQ_FOREACH_LOCKED(p, &sc->sc_spans.l_list, p_entry)for((p) = ((&sc->sc_spans.l_list)->smr_tqh_first); ( p)!= ((void *)0); (p) = ((p)->p_entry.smr_tqe_next)) { |
1750 | if (bifc->ifbic_len < sizeof(breq)) |
1751 | break; |
1752 | |
1753 | memset(&breq, 0, sizeof(breq))__builtin_memset((&breq), (0), (sizeof(breq))); |
1754 | |
1755 | strlcpy(breq.ifbr_name, ifp->if_xname, IFNAMSIZ16); |
1756 | strlcpy(breq.ifbr_ifsname, p->p_ifp0->if_xname, IFNAMSIZ16); |
1757 | |
1758 | breq.ifbr_ifsflags = p->p_bif_flags; |
1759 | if ((error = copyout(&breq, bifc->ifbic_reqifbic_ifbicu.ifbicu_req + n, |
1760 | sizeof(breq))) != 0) |
1761 | goto done; |
1762 | |
1763 | bifc->ifbic_len -= sizeof(breq); |
1764 | n++; |
1765 | } |
1766 | |
1767 | done: |
1768 | bifc->ifbic_len = n * sizeof(breq); |
1769 | return (error); |
1770 | } |
1771 | |
1772 | static int |
1773 | veb_port_set_flags(struct veb_softc *sc, struct ifbreq *ifbr) |
1774 | { |
1775 | struct veb_port *p; |
1776 | |
1777 | if (ISSET(ifbr->ifbr_ifsflags, ~VEB_IFBIF_FLAGS)((ifbr->ifbr_ifsflags) & (~(0x0001|0x0002|0x0004)))) |
1778 | return (EINVAL22); |
1779 | |
1780 | p = veb_port_get(sc, ifbr->ifbr_ifsname); |
1781 | if (p == NULL((void *)0)) |
1782 | return (ESRCH3); |
1783 | |
1784 | p->p_bif_flags = ifbr->ifbr_ifsflags; |
1785 | |
1786 | veb_port_put(sc, p); |
1787 | return (0); |
1788 | } |
1789 | |
1790 | static int |
1791 | veb_port_get_flags(struct veb_softc *sc, struct ifbreq *ifbr) |
1792 | { |
1793 | struct veb_port *p; |
1794 | |
1795 | p = veb_port_get(sc, ifbr->ifbr_ifsname); |
1796 | if (p == NULL((void *)0)) |
1797 | return (ESRCH3); |
1798 | |
1799 | ifbr->ifbr_ifsflags = p->p_bif_flags; |
1800 | ifbr->ifbr_portno = p->p_ifp0->if_index; |
1801 | ifbr->ifbr_protected = p->p_protected; |
1802 | |
1803 | veb_port_put(sc, p); |
1804 | return (0); |
1805 | } |
1806 | |
1807 | static int |
1808 | veb_add_addr(struct veb_softc *sc, const struct ifbareq *ifba) |
1809 | { |
1810 | struct veb_port *p; |
1811 | int error = 0; |
1812 | unsigned int type; |
1813 | |
1814 | if (ISSET(ifba->ifba_flags, ~IFBAF_TYPEMASK)((ifba->ifba_flags) & (~0x03))) |
1815 | return (EINVAL22); |
1816 | switch (ifba->ifba_flags & IFBAF_TYPEMASK0x03) { |
1817 | case IFBAF_DYNAMIC0x00: |
1818 | type = EBE_DYNAMIC0x0; |
1819 | break; |
1820 | case IFBAF_STATIC0x01: |
1821 | type = EBE_STATIC0x1; |
1822 | break; |
1823 | default: |
1824 | return (EINVAL22); |
1825 | } |
1826 | |
1827 | if (ifba->ifba_dstsa.ss_family != AF_UNSPEC0) |
1828 | return (EAFNOSUPPORT47); |
1829 | |
1830 | p = veb_port_get(sc, ifba->ifba_ifsname); |
1831 | if (p == NULL((void *)0)) |
1832 | return (ESRCH3); |
1833 | |
1834 | error = etherbridge_add_addr(&sc->sc_eb, p, &ifba->ifba_dst, type); |
1835 | |
1836 | veb_port_put(sc, p); |
1837 | |
1838 | return (error); |
1839 | } |
1840 | |
1841 | static int |
1842 | veb_del_addr(struct veb_softc *sc, const struct ifbareq *ifba) |
1843 | { |
1844 | return (etherbridge_del_addr(&sc->sc_eb, &ifba->ifba_dst)); |
1845 | } |
1846 | |
1847 | static int |
1848 | veb_p_ioctl(struct ifnet *ifp0, u_long cmd, caddr_t data) |
1849 | { |
1850 | const struct ether_brport *eb = ether_brport_get_locked(ifp0); |
1851 | struct veb_port *p; |
1852 | int error = 0; |
1853 | |
1854 | KASSERTMSG(eb != NULL,((eb != ((void *)0)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d" " " "%s: %s called without an ether_brport set", "diagnostic " , "eb != NULL", "/usr/src/sys/net/if_veb.c", 1856, ifp0->if_xname , __func__)) |
1855 | "%s: %s called without an ether_brport set",((eb != ((void *)0)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d" " " "%s: %s called without an ether_brport set", "diagnostic " , "eb != NULL", "/usr/src/sys/net/if_veb.c", 1856, ifp0->if_xname , __func__)) |
1856 | ifp0->if_xname, __func__)((eb != ((void *)0)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d" " " "%s: %s called without an ether_brport set", "diagnostic " , "eb != NULL", "/usr/src/sys/net/if_veb.c", 1856, ifp0->if_xname , __func__)); |
1857 | KASSERTMSG((eb->eb_input == veb_port_input) ||(((eb->eb_input == veb_port_input) || (eb->eb_input == veb_span_input )) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d" " " "%s called %s, but eb_input (%p) seems wrong", "diagnostic " , "(eb->eb_input == veb_port_input) || (eb->eb_input == veb_span_input)" , "/usr/src/sys/net/if_veb.c", 1860, ifp0->if_xname, __func__ , eb->eb_input)) |
1858 | (eb->eb_input == veb_span_input),(((eb->eb_input == veb_port_input) || (eb->eb_input == veb_span_input )) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d" " " "%s called %s, but eb_input (%p) seems wrong", "diagnostic " , "(eb->eb_input == veb_port_input) || (eb->eb_input == veb_span_input)" , "/usr/src/sys/net/if_veb.c", 1860, ifp0->if_xname, __func__ , eb->eb_input)) |
1859 | "%s called %s, but eb_input (%p) seems wrong",(((eb->eb_input == veb_port_input) || (eb->eb_input == veb_span_input )) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d" " " "%s called %s, but eb_input (%p) seems wrong", "diagnostic " , "(eb->eb_input == veb_port_input) || (eb->eb_input == veb_span_input)" , "/usr/src/sys/net/if_veb.c", 1860, ifp0->if_xname, __func__ , eb->eb_input)) |
1860 | ifp0->if_xname, __func__, eb->eb_input)(((eb->eb_input == veb_port_input) || (eb->eb_input == veb_span_input )) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d" " " "%s called %s, but eb_input (%p) seems wrong", "diagnostic " , "(eb->eb_input == veb_port_input) || (eb->eb_input == veb_span_input)" , "/usr/src/sys/net/if_veb.c", 1860, ifp0->if_xname, __func__ , eb->eb_input)); |
1861 | |
1862 | p = eb->eb_port; |
1863 | |
1864 | switch (cmd) { |
1865 | case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((12))): |
1866 | error = EBUSY16; |
1867 | break; |
1868 | |
1869 | default: |
1870 | error = (*p->p_ioctl)(ifp0, cmd, data); |
1871 | break; |
1872 | } |
1873 | |
1874 | return (error); |
1875 | } |
1876 | |
1877 | static int |
1878 | veb_p_output(struct ifnet *ifp0, struct mbuf *m, struct sockaddr *dst, |
1879 | struct rtentry *rt) |
1880 | { |
1881 | int (*p_output)(struct ifnet *, struct mbuf *, struct sockaddr *, |
1882 | struct rtentry *) = NULL((void *)0); |
1883 | const struct ether_brport *eb; |
1884 | |
1885 | /* restrict transmission to bpf only */ |
1886 | if ((m_tag_find(m, PACKET_TAG_DLT0x0100, NULL((void *)0)) == NULL((void *)0))) { |
1887 | m_freem(m); |
1888 | return (EBUSY16); |
1889 | } |
1890 | |
1891 | smr_read_enter(); |
1892 | eb = ether_brport_get(ifp0); |
1893 | if (eb != NULL((void *)0) && eb->eb_input == veb_port_input) { |
1894 | struct veb_port *p = eb->eb_port; |
1895 | p_output = p->p_output; /* code doesn't go away */ |
1896 | } |
1897 | smr_read_leave(); |
1898 | |
1899 | if (p_output == NULL((void *)0)) { |
1900 | m_freem(m); |
1901 | return (ENXIO6); |
1902 | } |
1903 | |
1904 | return ((*p_output)(ifp0, m, dst, rt)); |
1905 | } |
1906 | |
1907 | static void |
1908 | veb_p_dtor(struct veb_softc *sc, struct veb_port *p, const char *op) |
1909 | { |
1910 | struct ifnet *ifp = &sc->sc_if; |
1911 | struct ifnet *ifp0 = p->p_ifp0; |
1912 | struct veb_ports *port_list; |
1913 | |
1914 | DPRINTF(sc, "%s %s: destroying port\n",do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s %s: destroying port\n" , ifp->if_xname, ifp0->if_xname); } while (0) |
1915 | ifp->if_xname, ifp0->if_xname)do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s %s: destroying port\n" , ifp->if_xname, ifp0->if_xname); } while (0); |
1916 | |
1917 | ifp0->if_ioctl = p->p_ioctl; |
1918 | ifp0->if_output = p->p_output; |
1919 | |
1920 | ether_brport_clr(ifp0); |
1921 | |
1922 | if_detachhook_del(ifp0, &p->p_dtask); |
1923 | if_linkstatehook_del(ifp0, &p->p_ltask); |
1924 | |
1925 | if (ISSET(p->p_bif_flags, IFBIF_SPAN)((p->p_bif_flags) & (0x0100))) { |
1926 | port_list = &sc->sc_spans; |
1927 | } else { |
1928 | if (ifpromisc(ifp0, 0) != 0) { |
1929 | log(LOG_WARNING4, "%s %s: unable to disable promisc\n", |
1930 | ifp->if_xname, ifp0->if_xname); |
1931 | } |
1932 | |
1933 | etherbridge_detach_port(&sc->sc_eb, p); |
1934 | |
1935 | port_list = &sc->sc_ports; |
1936 | } |
1937 | SMR_TAILQ_REMOVE_LOCKED(&port_list->l_list, p, p_entry)do { if ((p)->p_entry.smr_tqe_next != ((void *)0)) (p)-> p_entry.smr_tqe_next->p_entry.smr_tqe_prev = (p)->p_entry .smr_tqe_prev; else (&port_list->l_list)->smr_tqh_last = (p)->p_entry.smr_tqe_prev; *(p)->p_entry.smr_tqe_prev = (p)->p_entry.smr_tqe_next; } while (0); |
1938 | port_list->l_count--; |
1939 | |
1940 | refcnt_finalize(&p->p_refs, "vebpdtor"); |
1941 | smr_barrier()smr_barrier_impl(0); |
1942 | |
1943 | veb_rule_list_free(TAILQ_FIRST(&p->p_vrl)((&p->p_vrl)->tqh_first)); |
1944 | |
1945 | if_put(ifp0); |
1946 | free(p, M_DEVBUF2, sizeof(*p)); |
1947 | } |
1948 | |
1949 | static void |
1950 | veb_p_detach(void *arg) |
1951 | { |
1952 | struct veb_port *p = arg; |
1953 | struct veb_softc *sc = p->p_veb; |
1954 | |
1955 | veb_p_dtor(sc, p, "detach"); |
1956 | |
1957 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
1958 | } |
1959 | |
1960 | static int |
1961 | veb_p_active(struct veb_port *p) |
1962 | { |
1963 | struct ifnet *ifp0 = p->p_ifp0; |
1964 | |
1965 | return (ISSET(ifp0->if_flags, IFF_RUNNING)((ifp0->if_flags) & (0x40)) && |
1966 | LINK_STATE_IS_UP(ifp0->if_link_state)((ifp0->if_data.ifi_link_state) >= 4 || (ifp0->if_data .ifi_link_state) == 0)); |
1967 | } |
1968 | |
1969 | static void |
1970 | veb_p_linkch(void *arg) |
1971 | { |
1972 | struct veb_port *p = arg; |
1973 | u_char link_state = LINK_STATE_FULL_DUPLEX6; |
1974 | |
1975 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
1976 | |
1977 | if (!veb_p_active(p)) |
1978 | link_state = LINK_STATE_DOWN2; |
1979 | |
1980 | p->p_link_state = link_state; |
1981 | } |
1982 | |
1983 | static int |
1984 | veb_up(struct veb_softc *sc) |
1985 | { |
1986 | struct ifnet *ifp = &sc->sc_if; |
1987 | int error; |
1988 | |
1989 | error = etherbridge_up(&sc->sc_eb); |
1990 | if (error != 0) |
1991 | return (error); |
1992 | |
1993 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
1994 | SET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) |= (0x40)); |
1995 | |
1996 | return (0); |
1997 | } |
1998 | |
1999 | static int |
2000 | veb_iff(struct veb_softc *sc) |
2001 | { |
2002 | return (0); |
2003 | } |
2004 | |
2005 | static int |
2006 | veb_down(struct veb_softc *sc) |
2007 | { |
2008 | struct ifnet *ifp = &sc->sc_if; |
2009 | int error; |
2010 | |
2011 | error = etherbridge_down(&sc->sc_eb); |
2012 | if (error != 0) |
2013 | return (0); |
2014 | |
2015 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
2016 | CLR(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) &= ~(0x40)); |
2017 | |
2018 | return (0); |
2019 | } |
2020 | |
2021 | static int |
2022 | veb_eb_port_cmp(void *arg, void *a, void *b) |
2023 | { |
2024 | struct veb_port *pa = a, *pb = b; |
2025 | return (pa == pb); |
2026 | } |
2027 | |
2028 | static void * |
2029 | veb_eb_port_take(void *arg, void *port) |
2030 | { |
2031 | struct veb_port *p = port; |
2032 | |
2033 | refcnt_take(&p->p_refs); |
2034 | |
2035 | return (p); |
2036 | } |
2037 | |
2038 | static void |
2039 | veb_eb_port_rele(void *arg, void *port) |
2040 | { |
2041 | struct veb_port *p = port; |
2042 | |
2043 | refcnt_rele_wake(&p->p_refs); |
2044 | } |
2045 | |
2046 | static void |
2047 | veb_eb_brport_take(void *port) |
2048 | { |
2049 | veb_eb_port_take(NULL((void *)0), port); |
2050 | } |
2051 | |
2052 | static void |
2053 | veb_eb_brport_rele(void *port) |
2054 | { |
2055 | veb_eb_port_rele(NULL((void *)0), port); |
2056 | } |
2057 | |
2058 | static size_t |
2059 | veb_eb_port_ifname(void *arg, char *dst, size_t len, void *port) |
2060 | { |
2061 | struct veb_port *p = port; |
2062 | |
2063 | return (strlcpy(dst, p->p_ifp0->if_xname, len)); |
2064 | } |
2065 | |
2066 | static void |
2067 | veb_eb_port_sa(void *arg, struct sockaddr_storage *ss, void *port) |
2068 | { |
2069 | ss->ss_family = AF_UNSPEC0; |
2070 | } |
2071 | |
2072 | /* |
2073 | * virtual ethernet bridge port |
2074 | */ |
2075 | |
2076 | static int |
2077 | vport_clone_create(struct if_clone *ifc, int unit) |
2078 | { |
2079 | struct vport_softc *sc; |
2080 | struct ifnet *ifp; |
2081 | |
2082 | sc = malloc(sizeof(*sc), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008|M_CANFAIL0x0004); |
2083 | if (sc == NULL((void *)0)) |
2084 | return (ENOMEM12); |
2085 | |
2086 | ifp = &sc->sc_ac.ac_if; |
2087 | |
2088 | snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d", |
2089 | ifc->ifc_name, unit); |
2090 | |
2091 | ifp->if_softc = sc; |
2092 | ifp->if_typeif_data.ifi_type = IFT_ETHER0x06; |
2093 | ifp->if_hardmtu = ETHER_MAX_HARDMTU_LEN65435; |
2094 | ifp->if_ioctl = vport_ioctl; |
2095 | ifp->if_enqueue = vport_enqueue; |
2096 | ifp->if_qstart = vport_start; |
2097 | ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000; |
2098 | ifp->if_xflags = IFXF_CLONED0x2 | IFXF_MPSAFE0x1; |
2099 | ether_fakeaddr(ifp); |
2100 | |
2101 | if_counters_alloc(ifp); |
2102 | if_attach(ifp); |
2103 | ether_ifattach(ifp); |
2104 | |
2105 | return (0); |
2106 | } |
2107 | |
2108 | static int |
2109 | vport_clone_destroy(struct ifnet *ifp) |
2110 | { |
2111 | struct vport_softc *sc = ifp->if_softc; |
2112 | |
2113 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
2114 | sc->sc_dead = 1; |
2115 | |
2116 | if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) |
2117 | vport_down(sc); |
2118 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
2119 | |
2120 | ether_ifdetach(ifp); |
2121 | if_detach(ifp); |
2122 | |
2123 | free(sc, M_DEVBUF2, sizeof(*sc)); |
2124 | |
2125 | return (0); |
2126 | } |
2127 | |
2128 | static int |
2129 | vport_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) |
2130 | { |
2131 | struct vport_softc *sc = ifp->if_softc; |
2132 | int error = 0; |
2133 | |
2134 | if (sc->sc_dead) |
2135 | return (ENXIO6); |
2136 | |
2137 | switch (cmd) { |
2138 | case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))): |
2139 | if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) { |
2140 | if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) |
2141 | error = vport_up(sc); |
2142 | } else { |
2143 | if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) |
2144 | error = vport_down(sc); |
2145 | } |
2146 | break; |
2147 | |
2148 | case SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((49))): |
2149 | case SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((50))): |
2150 | break; |
2151 | |
2152 | default: |
2153 | error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); |
2154 | break; |
2155 | } |
2156 | |
2157 | if (error == ENETRESET52) |
2158 | error = vport_iff(sc); |
2159 | |
2160 | return (error); |
2161 | } |
2162 | |
2163 | static int |
2164 | vport_up(struct vport_softc *sc) |
2165 | { |
2166 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
2167 | |
2168 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
2169 | SET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) |= (0x40)); |
2170 | |
2171 | return (0); |
2172 | } |
2173 | |
2174 | static int |
2175 | vport_iff(struct vport_softc *sc) |
2176 | { |
2177 | return (0); |
2178 | } |
2179 | |
2180 | static int |
2181 | vport_down(struct vport_softc *sc) |
2182 | { |
2183 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
2184 | |
2185 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
2186 | CLR(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) &= ~(0x40)); |
2187 | |
2188 | return (0); |
2189 | } |
2190 | |
2191 | static int |
2192 | vport_if_enqueue(struct ifnet *ifp, struct mbuf *m) |
2193 | { |
2194 | /* |
2195 | * switching an l2 packet toward a vport means pushing it |
2196 | * into the network stack. this function exists to make |
2197 | * if_vinput compat with veb calling if_enqueue. |
2198 | */ |
2199 | |
2200 | if_vinput(ifp, m); |
2201 | |
2202 | return (0); |
2203 | } |
2204 | |
2205 | static int |
2206 | vport_enqueue(struct ifnet *ifp, struct mbuf *m) |
2207 | { |
2208 | struct arpcom *ac; |
2209 | const struct ether_brport *eb; |
2210 | int error = ENETDOWN50; |
2211 | #if NBPFILTER1 > 0 |
2212 | caddr_t if_bpf; |
2213 | #endif |
2214 | |
2215 | /* |
2216 | * a packet sent from the l3 stack out a vport goes into |
2217 | * veb for switching out another port. |
2218 | */ |
2219 | |
2220 | #if NPF1 > 0 |
2221 | /* |
2222 | * there's no relationship between pf states in the l3 stack |
2223 | * and the l2 bridge. |
2224 | */ |
2225 | pf_pkt_addr_changed(m); |
2226 | #endif |
2227 | |
2228 | ac = (struct arpcom *)ifp; |
2229 | |
2230 | smr_read_enter(); |
2231 | eb = SMR_PTR_GET(&ac->ac_brport)({ typeof(*&ac->ac_brport) __tmp = *(volatile typeof(* &ac->ac_brport) *)&(*&ac->ac_brport); membar_datadep_consumer (); __tmp; }); |
2232 | if (eb != NULL((void *)0)) |
2233 | eb->eb_port_take(eb->eb_port); |
2234 | smr_read_leave(); |
2235 | if (eb != NULL((void *)0)) { |
2236 | struct mbuf *(*input)(struct ifnet *, struct mbuf *, |
2237 | uint64_t, void *) = eb->eb_input; |
2238 | struct ether_header *eh; |
2239 | uint64_t dst; |
2240 | |
2241 | counters_pkt(ifp->if_counters, ifc_opackets, ifc_obytes, |
2242 | m->m_pkthdrM_dat.MH.MH_pkthdr.len); |
2243 | |
2244 | #if NBPFILTER1 > 0 |
2245 | if_bpf = READ_ONCE(ifp->if_bpf)({ typeof(ifp->if_bpf) __tmp = *(volatile typeof(ifp->if_bpf ) *)&(ifp->if_bpf); membar_datadep_consumer(); __tmp; } ); |
2246 | if (if_bpf != NULL((void *)0)) |
2247 | bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT(1 << 1)); |
2248 | #endif |
2249 | |
2250 | eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data)); |
2251 | dst = ether_addr_to_e64((struct ether_addr *)eh->ether_dhost); |
2252 | |
2253 | if (input == veb_vport_input) |
2254 | input = veb_port_input; |
2255 | m = (*input)(ifp, m, dst, eb->eb_port); |
2256 | |
2257 | error = 0; |
2258 | |
2259 | eb->eb_port_rele(eb->eb_port); |
2260 | } |
2261 | |
2262 | m_freem(m); |
2263 | |
2264 | return (error); |
2265 | } |
2266 | |
2267 | static void |
2268 | vport_start(struct ifqueue *ifq) |
2269 | { |
2270 | ifq_purge(ifq); |
2271 | } |