File: | net/if_trunk.c |
Warning: | line 807, column 8 Although the value stored to 'error' is used in the enclosing expression, the value is never actually read from 'error' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: if_trunk.c,v 1.154 2023/12/23 10:52:54 bluhm Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2005, 2006, 2007 Reyk Floeter <reyk@openbsd.org> |
5 | * |
6 | * Permission to use, copy, modify, and distribute this software for any |
7 | * purpose with or without fee is hereby granted, provided that the above |
8 | * copyright notice and this permission notice appear in all copies. |
9 | * |
10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
16 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
17 | */ |
18 | |
19 | #include <sys/param.h> |
20 | #include <sys/kernel.h> |
21 | #include <sys/malloc.h> |
22 | #include <sys/mbuf.h> |
23 | #include <sys/queue.h> |
24 | #include <sys/socket.h> |
25 | #include <sys/sockio.h> |
26 | #include <sys/systm.h> |
27 | #include <sys/task.h> |
28 | #include <sys/timeout.h> |
29 | |
30 | #include <crypto/siphash.h> |
31 | |
32 | #include <net/if.h> |
33 | #include <net/if_dl.h> |
34 | #include <net/if_media.h> |
35 | #include <net/if_types.h> |
36 | #include <net/route.h> |
37 | |
38 | #include <netinet/in.h> |
39 | #include <netinet/if_ether.h> |
40 | #include <netinet/ip.h> |
41 | |
42 | #ifdef INET61 |
43 | #include <netinet/ip6.h> |
44 | #endif |
45 | |
46 | #include <net/if_vlan_var.h> |
47 | #include <net/if_trunk.h> |
48 | #include <net/trunklacp.h> |
49 | |
50 | #include "bpfilter.h" |
51 | #if NBPFILTER1 > 0 |
52 | #include <net/bpf.h> |
53 | #endif |
54 | |
55 | SLIST_HEAD(__trhead, trunk_softc)struct __trhead { struct trunk_softc *slh_first; } trunk_list; /* list of trunks */ |
56 | |
57 | void trunkattach(int); |
58 | int trunk_clone_create(struct if_clone *, int); |
59 | int trunk_clone_destroy(struct ifnet *); |
60 | void trunk_lladdr(struct arpcom *, u_int8_t *); |
61 | int trunk_capabilities(struct trunk_softc *); |
62 | void trunk_port_lladdr(struct trunk_port *, u_int8_t *); |
63 | int trunk_port_create(struct trunk_softc *, struct ifnet *); |
64 | int trunk_port_destroy(struct trunk_port *); |
65 | void trunk_port_state(void *); |
66 | void trunk_port_ifdetach(void *); |
67 | int trunk_port_ioctl(struct ifnet *, u_long, caddr_t); |
68 | int trunk_port_output(struct ifnet *, struct mbuf *, struct sockaddr *, |
69 | struct rtentry *); |
70 | struct trunk_port *trunk_port_get(struct trunk_softc *, struct ifnet *); |
71 | int trunk_port_checkstacking(struct trunk_softc *); |
72 | void trunk_port2req(struct trunk_port *, struct trunk_reqport *); |
73 | int trunk_ioctl(struct ifnet *, u_long, caddr_t); |
74 | int trunk_ether_addmulti(struct trunk_softc *, struct ifreq *); |
75 | int trunk_ether_delmulti(struct trunk_softc *, struct ifreq *); |
76 | void trunk_ether_purgemulti(struct trunk_softc *); |
77 | int trunk_ether_cmdmulti(struct trunk_port *, u_long); |
78 | int trunk_ioctl_allports(struct trunk_softc *, u_long, caddr_t); |
79 | void trunk_input(struct ifnet *, struct mbuf *); |
80 | void trunk_start(struct ifnet *); |
81 | void trunk_init(struct ifnet *); |
82 | void trunk_stop(struct ifnet *); |
83 | int trunk_media_change(struct ifnet *); |
84 | void trunk_media_status(struct ifnet *, struct ifmediareq *); |
85 | struct trunk_port *trunk_link_active(struct trunk_softc *, |
86 | struct trunk_port *); |
87 | const void *trunk_gethdr(struct mbuf *, u_int, u_int, void *); |
88 | |
89 | struct if_clone trunk_cloner = |
90 | IF_CLONE_INITIALIZER("trunk", trunk_clone_create, trunk_clone_destroy){ .ifc_list = { ((void *)0), ((void *)0) }, .ifc_name = "trunk" , .ifc_namelen = sizeof("trunk") - 1, .ifc_create = trunk_clone_create , .ifc_destroy = trunk_clone_destroy, }; |
91 | |
92 | /* Simple round robin */ |
93 | int trunk_rr_attach(struct trunk_softc *); |
94 | int trunk_rr_detach(struct trunk_softc *); |
95 | void trunk_rr_port_destroy(struct trunk_port *); |
96 | int trunk_rr_start(struct trunk_softc *, struct mbuf *); |
97 | int trunk_rr_input(struct trunk_softc *, struct trunk_port *, |
98 | struct mbuf *); |
99 | |
100 | /* Active failover */ |
101 | int trunk_fail_attach(struct trunk_softc *); |
102 | int trunk_fail_detach(struct trunk_softc *); |
103 | int trunk_fail_port_create(struct trunk_port *); |
104 | void trunk_fail_port_destroy(struct trunk_port *); |
105 | int trunk_fail_start(struct trunk_softc *, struct mbuf *); |
106 | int trunk_fail_input(struct trunk_softc *, struct trunk_port *, |
107 | struct mbuf *); |
108 | void trunk_fail_linkstate(struct trunk_port *); |
109 | |
110 | /* Loadbalancing */ |
111 | int trunk_lb_attach(struct trunk_softc *); |
112 | int trunk_lb_detach(struct trunk_softc *); |
113 | int trunk_lb_port_create(struct trunk_port *); |
114 | void trunk_lb_port_destroy(struct trunk_port *); |
115 | int trunk_lb_start(struct trunk_softc *, struct mbuf *); |
116 | int trunk_lb_input(struct trunk_softc *, struct trunk_port *, |
117 | struct mbuf *); |
118 | int trunk_lb_porttable(struct trunk_softc *, struct trunk_port *); |
119 | |
120 | /* Broadcast mode */ |
121 | int trunk_bcast_attach(struct trunk_softc *); |
122 | int trunk_bcast_detach(struct trunk_softc *); |
123 | int trunk_bcast_start(struct trunk_softc *, struct mbuf *); |
124 | int trunk_bcast_input(struct trunk_softc *, struct trunk_port *, |
125 | struct mbuf *); |
126 | |
127 | /* 802.3ad LACP */ |
128 | int trunk_lacp_attach(struct trunk_softc *); |
129 | int trunk_lacp_detach(struct trunk_softc *); |
130 | int trunk_lacp_start(struct trunk_softc *, struct mbuf *); |
131 | int trunk_lacp_input(struct trunk_softc *, struct trunk_port *, |
132 | struct mbuf *); |
133 | |
134 | /* Trunk protocol table */ |
135 | static const struct { |
136 | enum trunk_proto ti_proto; |
137 | int (*ti_attach)(struct trunk_softc *); |
138 | } trunk_protos[] = { |
139 | { TRUNK_PROTO_ROUNDROBIN, trunk_rr_attach }, |
140 | { TRUNK_PROTO_FAILOVER, trunk_fail_attach }, |
141 | { TRUNK_PROTO_LOADBALANCE, trunk_lb_attach }, |
142 | { TRUNK_PROTO_BROADCAST, trunk_bcast_attach }, |
143 | { TRUNK_PROTO_LACP, trunk_lacp_attach }, |
144 | { TRUNK_PROTO_NONE, NULL((void *)0) } |
145 | }; |
146 | |
147 | void |
148 | trunkattach(int count) |
149 | { |
150 | SLIST_INIT(&trunk_list){ ((&trunk_list)->slh_first) = ((void *)0); }; |
151 | if_clone_attach(&trunk_cloner); |
152 | } |
153 | |
154 | int |
155 | trunk_clone_create(struct if_clone *ifc, int unit) |
156 | { |
157 | struct trunk_softc *tr; |
158 | struct ifnet *ifp; |
159 | int i, error = 0; |
160 | |
161 | tr = malloc(sizeof(*tr), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008); |
162 | tr->tr_proto = TRUNK_PROTO_NONE; |
163 | for (i = 0; trunk_protos[i].ti_proto != TRUNK_PROTO_NONE; i++) { |
164 | if (trunk_protos[i].ti_proto == TRUNK_PROTO_DEFAULTTRUNK_PROTO_ROUNDROBIN) { |
165 | tr->tr_proto = trunk_protos[i].ti_proto; |
166 | if ((error = trunk_protos[i].ti_attach(tr)) != 0) { |
167 | free(tr, M_DEVBUF2, sizeof *tr); |
168 | return (error); |
169 | } |
170 | break; |
171 | } |
172 | } |
173 | SLIST_INIT(&tr->tr_ports){ ((&tr->tr_ports)->slh_first) = ((void *)0); }; |
174 | |
175 | /* Initialise pseudo media types */ |
176 | ifmedia_init(&tr->tr_media, 0, trunk_media_change, |
177 | trunk_media_status); |
178 | ifmedia_add(&tr->tr_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0)); |
179 | ifmedia_set(&tr->tr_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL); |
180 | |
181 | ifp = &tr->tr_ac.ac_if; |
182 | ifp->if_softc = tr; |
183 | ifp->if_start = trunk_start; |
184 | ifp->if_ioctl = trunk_ioctl; |
185 | ifp->if_flags = IFF_SIMPLEX0x800 | IFF_BROADCAST0x2 | IFF_MULTICAST0x8000; |
186 | ifp->if_capabilitiesif_data.ifi_capabilities = trunk_capabilities(tr); |
187 | ifp->if_xflags = IFXF_CLONED0x2; |
188 | |
189 | snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d", |
190 | ifc->ifc_name, unit); |
191 | |
192 | /* |
193 | * Attach as an ordinary ethernet device, children will be attached |
194 | * as special device IFT_IEEE8023ADLAG. |
195 | */ |
196 | if_counters_alloc(ifp); |
197 | if_attach(ifp); |
198 | ether_ifattach(ifp); |
199 | |
200 | /* Insert into the global list of trunks */ |
201 | SLIST_INSERT_HEAD(&trunk_list, tr, tr_entries)do { (tr)->tr_entries.sle_next = (&trunk_list)->slh_first ; (&trunk_list)->slh_first = (tr); } while (0); |
202 | |
203 | return (0); |
204 | } |
205 | |
206 | int |
207 | trunk_clone_destroy(struct ifnet *ifp) |
208 | { |
209 | struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; |
210 | struct trunk_port *tp; |
211 | int error; |
212 | |
213 | /* Remove any multicast groups that we may have joined. */ |
214 | trunk_ether_purgemulti(tr); |
215 | |
216 | /* Shutdown and remove trunk ports, return on error */ |
217 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
218 | while ((tp = SLIST_FIRST(&tr->tr_ports)((&tr->tr_ports)->slh_first)) != NULL((void *)0)) { |
219 | if ((error = trunk_port_destroy(tp)) != 0) { |
220 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
221 | return (error); |
222 | } |
223 | } |
224 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
225 | |
226 | ifmedia_delete_instance(&tr->tr_media, IFM_INST_ANY((uint64_t) -1)); |
227 | ether_ifdetach(ifp); |
228 | if_detach(ifp); |
229 | |
230 | SLIST_REMOVE(&trunk_list, tr, trunk_softc, tr_entries)do { if ((&trunk_list)->slh_first == (tr)) { do { ((& trunk_list))->slh_first = ((&trunk_list))->slh_first ->tr_entries.sle_next; } while (0); } else { struct trunk_softc *curelm = (&trunk_list)->slh_first; while (curelm-> tr_entries.sle_next != (tr)) curelm = curelm->tr_entries.sle_next ; curelm->tr_entries.sle_next = curelm->tr_entries.sle_next ->tr_entries.sle_next; } ((tr)->tr_entries.sle_next) = ( (void *)-1); } while (0); |
231 | free(tr, M_DEVBUF2, sizeof *tr); |
232 | |
233 | return (0); |
234 | } |
235 | |
236 | void |
237 | trunk_lladdr(struct arpcom *ac, u_int8_t *lladdr) |
238 | { |
239 | struct ifnet *ifp = &ac->ac_if; |
240 | struct sockaddr_dl *sdl; |
241 | |
242 | sdl = ifp->if_sadl; |
243 | sdl->sdl_type = IFT_ETHER0x06; |
244 | sdl->sdl_alen = ETHER_ADDR_LEN6; |
245 | bcopy(lladdr, LLADDR(sdl)((caddr_t)((sdl)->sdl_data + (sdl)->sdl_nlen)), ETHER_ADDR_LEN6); |
246 | bcopy(lladdr, ac->ac_enaddr, ETHER_ADDR_LEN6); |
247 | } |
248 | |
249 | int |
250 | trunk_capabilities(struct trunk_softc *tr) |
251 | { |
252 | struct trunk_port *tp; |
253 | int cap = ~0, priv; |
254 | |
255 | /* Preserve private capabilities */ |
256 | priv = tr->tr_capabilitiestr_ac.ac_if.if_data.ifi_capabilities & IFCAP_TRUNK_MASK0xffff0000; |
257 | |
258 | /* Get capabilities from the trunk ports */ |
259 | SLIST_FOREACH(tp, &tr->tr_ports, tp_entries)for((tp) = ((&tr->tr_ports)->slh_first); (tp) != (( void *)0); (tp) = ((tp)->tp_entries.sle_next)) |
260 | cap &= tp->tp_capabilitiestp_if->if_data.ifi_capabilities; |
261 | |
262 | if (tr->tr_ifflagstr_ac.ac_if.if_flags & IFF_DEBUG0x4) { |
263 | printf("%s: capabilities 0x%08x\n", |
264 | tr->tr_ifnametr_ac.ac_if.if_xname, cap == ~0 ? priv : (cap | priv)); |
265 | } |
266 | |
267 | return (cap == ~0 ? priv : (cap | priv)); |
268 | } |
269 | |
270 | void |
271 | trunk_port_lladdr(struct trunk_port *tp, u_int8_t *lladdr) |
272 | { |
273 | struct ifnet *ifp = tp->tp_if; |
274 | |
275 | /* Set the link layer address */ |
276 | trunk_lladdr((struct arpcom *)ifp, lladdr); |
277 | |
278 | /* Reset the port to update the lladdr */ |
279 | ifnewlladdr(ifp); |
280 | } |
281 | |
282 | int |
283 | trunk_port_create(struct trunk_softc *tr, struct ifnet *ifp) |
284 | { |
285 | struct trunk_softc *tr_ptr; |
286 | struct trunk_port *tp; |
287 | struct arpcom *ac0; |
288 | int error = 0; |
289 | |
290 | /* Limit the maximal number of trunk ports */ |
291 | if (tr->tr_count >= TRUNK_MAX_PORTS32) |
292 | return (ENOSPC28); |
293 | |
294 | /* Check if port has already been associated to a trunk */ |
295 | if (trunk_port_get(NULL((void *)0), ifp) != NULL((void *)0)) |
296 | return (EBUSY16); |
297 | |
298 | /* XXX Disallow non-ethernet interfaces (this should be any of 802) */ |
299 | if (ifp->if_typeif_data.ifi_type != IFT_ETHER0x06) |
300 | return (EPROTONOSUPPORT43); |
301 | |
302 | ac0 = (struct arpcom *)ifp; |
303 | if (ac0->ac_trunkport != NULL((void *)0)) |
304 | return (EBUSY16); |
305 | |
306 | /* Take MTU from the first member port */ |
307 | if (SLIST_EMPTY(&tr->tr_ports)(((&tr->tr_ports)->slh_first) == ((void *)0))) { |
308 | if (tr->tr_ifflagstr_ac.ac_if.if_flags & IFF_DEBUG0x4) |
309 | printf("%s: first port, setting trunk mtu %u\n", |
310 | tr->tr_ifnametr_ac.ac_if.if_xname, ifp->if_mtuif_data.ifi_mtu); |
311 | tr->tr_ac.ac_if.if_mtuif_data.ifi_mtu = ifp->if_mtuif_data.ifi_mtu; |
312 | tr->tr_ac.ac_if.if_hardmtu = ifp->if_hardmtu; |
313 | } else if (tr->tr_ac.ac_if.if_mtuif_data.ifi_mtu != ifp->if_mtuif_data.ifi_mtu) { |
314 | printf("%s: adding %s failed, MTU %u != %u\n", tr->tr_ifnametr_ac.ac_if.if_xname, |
315 | ifp->if_xname, ifp->if_mtuif_data.ifi_mtu, tr->tr_ac.ac_if.if_mtuif_data.ifi_mtu); |
316 | return (EINVAL22); |
317 | } |
318 | |
319 | if ((error = ifpromisc(ifp, 1)) != 0) |
320 | return (error); |
321 | |
322 | if ((tp = malloc(sizeof *tp, M_DEVBUF2, M_NOWAIT0x0002|M_ZERO0x0008)) == NULL((void *)0)) |
323 | return (ENOMEM12); |
324 | |
325 | /* Check if port is a stacked trunk */ |
326 | SLIST_FOREACH(tr_ptr, &trunk_list, tr_entries)for((tr_ptr) = ((&trunk_list)->slh_first); (tr_ptr) != ((void *)0); (tr_ptr) = ((tr_ptr)->tr_entries.sle_next)) { |
327 | if (ifp == &tr_ptr->tr_ac.ac_if) { |
328 | tp->tp_flags |= TRUNK_PORT_STACK0x00000002; |
329 | if (trunk_port_checkstacking(tr_ptr) >= |
330 | TRUNK_MAX_STACKING4) { |
331 | free(tp, M_DEVBUF2, sizeof *tp); |
332 | return (E2BIG7); |
333 | } |
334 | } |
335 | } |
336 | |
337 | /* Change the interface type */ |
338 | tp->tp_iftype = ifp->if_typeif_data.ifi_type; |
339 | ifp->if_typeif_data.ifi_type = IFT_IEEE8023ADLAG0xa1; |
340 | |
341 | tp->tp_ioctl = ifp->if_ioctl; |
342 | ifp->if_ioctl = trunk_port_ioctl; |
343 | |
344 | tp->tp_output = ifp->if_output; |
345 | ifp->if_output = trunk_port_output; |
346 | |
347 | tp->tp_if = ifp; |
348 | tp->tp_trunk = tr; |
349 | |
350 | /* Save port link layer address */ |
351 | bcopy(((struct arpcom *)ifp)->ac_enaddr, tp->tp_lladdr, ETHER_ADDR_LEN6); |
352 | |
353 | if (SLIST_EMPTY(&tr->tr_ports)(((&tr->tr_ports)->slh_first) == ((void *)0))) { |
354 | tr->tr_primary = tp; |
355 | tp->tp_flags |= TRUNK_PORT_MASTER0x00000001; |
356 | trunk_lladdr(&tr->tr_ac, tp->tp_lladdr); |
357 | } |
358 | |
359 | /* Insert into the list of ports */ |
360 | SLIST_INSERT_HEAD(&tr->tr_ports, tp, tp_entries)do { (tp)->tp_entries.sle_next = (&tr->tr_ports)-> slh_first; (&tr->tr_ports)->slh_first = (tp); } while (0); |
361 | tr->tr_count++; |
362 | |
363 | /* Update link layer address for this port */ |
364 | trunk_port_lladdr(tp, |
365 | ((struct arpcom *)(tr->tr_primary->tp_if))->ac_enaddr); |
366 | |
367 | /* Update trunk capabilities */ |
368 | tr->tr_capabilitiestr_ac.ac_if.if_data.ifi_capabilities = trunk_capabilities(tr); |
369 | |
370 | /* Add multicast addresses to this port */ |
371 | trunk_ether_cmdmulti(tp, SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((49)))); |
372 | |
373 | /* Register callback for physical link state changes */ |
374 | task_set(&tp->tp_ltask, trunk_port_state, tp); |
375 | if_linkstatehook_add(ifp, &tp->tp_ltask); |
376 | |
377 | /* Register callback if parent wants to unregister */ |
378 | task_set(&tp->tp_dtask, trunk_port_ifdetach, tp); |
379 | if_detachhook_add(ifp, &tp->tp_dtask); |
380 | |
381 | if (tr->tr_port_create != NULL((void *)0)) |
382 | error = (*tr->tr_port_create)(tp); |
383 | |
384 | /* Change input handler of the physical interface. */ |
385 | tp->tp_input = ifp->if_input; |
386 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
387 | ac0->ac_trunkport = tp; |
388 | ifp->if_input = trunk_input; |
389 | |
390 | return (error); |
391 | } |
392 | |
393 | int |
394 | trunk_port_checkstacking(struct trunk_softc *tr) |
395 | { |
396 | struct trunk_softc *tr_ptr; |
397 | struct trunk_port *tp; |
398 | int m = 0; |
399 | |
400 | SLIST_FOREACH(tp, &tr->tr_ports, tp_entries)for((tp) = ((&tr->tr_ports)->slh_first); (tp) != (( void *)0); (tp) = ((tp)->tp_entries.sle_next)) { |
401 | if (tp->tp_flags & TRUNK_PORT_STACK0x00000002) { |
402 | tr_ptr = (struct trunk_softc *)tp->tp_if->if_softc; |
403 | m = MAX(m, trunk_port_checkstacking(tr_ptr))(((m)>(trunk_port_checkstacking(tr_ptr)))?(m):(trunk_port_checkstacking (tr_ptr))); |
404 | } |
405 | } |
406 | |
407 | return (m + 1); |
408 | } |
409 | |
410 | int |
411 | trunk_port_destroy(struct trunk_port *tp) |
412 | { |
413 | struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; |
414 | struct trunk_port *tp_ptr; |
415 | struct ifnet *ifp = tp->tp_if; |
416 | struct arpcom *ac0 = (struct arpcom *)ifp; |
417 | |
418 | /* Restore previous input handler. */ |
419 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
420 | ifp->if_input = tp->tp_input; |
421 | ac0->ac_trunkport = NULL((void *)0); |
422 | |
423 | /* Remove multicast addresses from this port */ |
424 | trunk_ether_cmdmulti(tp, SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((50)))); |
425 | |
426 | ifpromisc(ifp, 0); |
427 | |
428 | if (tr->tr_port_destroy != NULL((void *)0)) |
429 | (*tr->tr_port_destroy)(tp); |
430 | |
431 | /* Restore interface type. */ |
432 | ifp->if_typeif_data.ifi_type = tp->tp_iftype; |
433 | |
434 | ifp->if_ioctl = tp->tp_ioctl; |
435 | ifp->if_output = tp->tp_output; |
436 | |
437 | if_detachhook_del(ifp, &tp->tp_dtask); |
438 | if_linkstatehook_del(ifp, &tp->tp_ltask); |
439 | |
440 | /* Finally, remove the port from the trunk */ |
441 | SLIST_REMOVE(&tr->tr_ports, tp, trunk_port, tp_entries)do { if ((&tr->tr_ports)->slh_first == (tp)) { do { ((&tr->tr_ports))->slh_first = ((&tr->tr_ports ))->slh_first->tp_entries.sle_next; } while (0); } else { struct trunk_port *curelm = (&tr->tr_ports)->slh_first ; while (curelm->tp_entries.sle_next != (tp)) curelm = curelm ->tp_entries.sle_next; curelm->tp_entries.sle_next = curelm ->tp_entries.sle_next->tp_entries.sle_next; } ((tp)-> tp_entries.sle_next) = ((void *)-1); } while (0); |
442 | tr->tr_count--; |
443 | |
444 | /* Update the primary interface */ |
445 | if (tp == tr->tr_primary) { |
446 | u_int8_t lladdr[ETHER_ADDR_LEN6]; |
447 | |
448 | if ((tp_ptr = SLIST_FIRST(&tr->tr_ports)((&tr->tr_ports)->slh_first)) == NULL((void *)0)) { |
449 | bzero(&lladdr, ETHER_ADDR_LEN)__builtin_bzero((&lladdr), (6)); |
450 | } else { |
451 | bcopy(((struct arpcom *)tp_ptr->tp_if)->ac_enaddr, |
452 | lladdr, ETHER_ADDR_LEN6); |
453 | tp_ptr->tp_flags = TRUNK_PORT_MASTER0x00000001; |
454 | } |
455 | trunk_lladdr(&tr->tr_ac, lladdr); |
456 | tr->tr_primary = tp_ptr; |
457 | |
458 | /* Update link layer address for each port */ |
459 | SLIST_FOREACH(tp_ptr, &tr->tr_ports, tp_entries)for((tp_ptr) = ((&tr->tr_ports)->slh_first); (tp_ptr ) != ((void *)0); (tp_ptr) = ((tp_ptr)->tp_entries.sle_next )) |
460 | trunk_port_lladdr(tp_ptr, lladdr); |
461 | } |
462 | |
463 | /* Reset the port lladdr */ |
464 | trunk_port_lladdr(tp, tp->tp_lladdr); |
465 | |
466 | if_put(ifp); |
467 | free(tp, M_DEVBUF2, sizeof *tp); |
468 | |
469 | /* Update trunk capabilities */ |
470 | tr->tr_capabilitiestr_ac.ac_if.if_data.ifi_capabilities = trunk_capabilities(tr); |
471 | |
472 | return (0); |
473 | } |
474 | |
475 | int |
476 | trunk_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) |
477 | { |
478 | struct trunk_reqport *rp = (struct trunk_reqport *)data; |
479 | struct trunk_softc *tr; |
480 | struct trunk_port *tp = NULL((void *)0); |
481 | struct ifnet *ifp0 = NULL((void *)0); |
482 | int error = 0; |
483 | |
484 | /* Should be checked by the caller */ |
485 | if (ifp->if_typeif_data.ifi_type != IFT_IEEE8023ADLAG0xa1 || |
486 | (tp = trunk_port_get(NULL((void *)0), ifp)) == NULL((void *)0) || |
487 | (tr = (struct trunk_softc *)tp->tp_trunk) == NULL((void *)0)) { |
488 | error = EINVAL22; |
489 | goto fallback; |
490 | } |
491 | |
492 | switch (cmd) { |
493 | case SIOCGTRUNKPORT(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct trunk_reqport) & 0x1fff) << 16) | ((('i')) << 8) | ((140))): |
494 | if (rp->rp_portname[0] == '\0' || |
495 | (ifp0 = if_unit(rp->rp_portname)) != ifp) { |
496 | if_put(ifp0); |
497 | error = EINVAL22; |
498 | break; |
499 | } |
500 | if_put(ifp0); |
501 | |
502 | /* Search in all trunks if the global flag is set */ |
503 | if ((tp = trunk_port_get(rp->rp_flags & TRUNK_PORT_GLOBAL0x80000000 ? |
504 | NULL((void *)0) : tr, ifp)) == NULL((void *)0)) { |
505 | error = ENOENT2; |
506 | break; |
507 | } |
508 | |
509 | trunk_port2req(tp, rp); |
510 | break; |
511 | case SIOCSIFMTU((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((127))): |
512 | /* Do not allow the MTU to be changed once joined */ |
513 | error = EINVAL22; |
514 | break; |
515 | default: |
516 | error = ENOTTY25; |
517 | goto fallback; |
518 | } |
519 | |
520 | return (error); |
521 | |
522 | fallback: |
523 | if (tp != NULL((void *)0)) |
524 | error = (*tp->tp_ioctl)(ifp, cmd, data); |
525 | |
526 | return (error); |
527 | } |
528 | |
529 | int |
530 | trunk_port_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, |
531 | struct rtentry *rt) |
532 | { |
533 | /* restrict transmission on trunk members to bpf only */ |
534 | if (ifp->if_typeif_data.ifi_type == IFT_IEEE8023ADLAG0xa1 && |
535 | (m_tag_find(m, PACKET_TAG_DLT0x0100, NULL((void *)0)) == NULL((void *)0))) { |
536 | m_freem(m); |
537 | return (EBUSY16); |
538 | } |
539 | |
540 | return (ether_output(ifp, m, dst, rt)); |
541 | } |
542 | |
543 | void |
544 | trunk_port_ifdetach(void *arg) |
545 | { |
546 | struct trunk_port *tp = (struct trunk_port *)arg; |
547 | |
548 | trunk_port_destroy(tp); |
549 | } |
550 | |
551 | struct trunk_port * |
552 | trunk_port_get(struct trunk_softc *tr, struct ifnet *ifp) |
553 | { |
554 | struct trunk_port *tp; |
555 | struct trunk_softc *tr_ptr; |
556 | |
557 | if (tr != NULL((void *)0)) { |
558 | /* Search port in specified trunk */ |
559 | SLIST_FOREACH(tp, &tr->tr_ports, tp_entries)for((tp) = ((&tr->tr_ports)->slh_first); (tp) != (( void *)0); (tp) = ((tp)->tp_entries.sle_next)) { |
560 | if (tp->tp_if == ifp) |
561 | return (tp); |
562 | } |
563 | } else { |
564 | /* Search all trunks for the selected port */ |
565 | SLIST_FOREACH(tr_ptr, &trunk_list, tr_entries)for((tr_ptr) = ((&trunk_list)->slh_first); (tr_ptr) != ((void *)0); (tr_ptr) = ((tr_ptr)->tr_entries.sle_next)) { |
566 | SLIST_FOREACH(tp, &tr_ptr->tr_ports, tp_entries)for((tp) = ((&tr_ptr->tr_ports)->slh_first); (tp) != ((void *)0); (tp) = ((tp)->tp_entries.sle_next)) { |
567 | if (tp->tp_if == ifp) |
568 | return (tp); |
569 | } |
570 | } |
571 | } |
572 | |
573 | return (NULL((void *)0)); |
574 | } |
575 | |
576 | void |
577 | trunk_port2req(struct trunk_port *tp, struct trunk_reqport *rp) |
578 | { |
579 | struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; |
580 | |
581 | strlcpy(rp->rp_ifname, tr->tr_ifnametr_ac.ac_if.if_xname, sizeof(rp->rp_ifname)); |
582 | strlcpy(rp->rp_portname, tp->tp_if->if_xname, sizeof(rp->rp_portname)); |
583 | rp->rp_prio = tp->tp_prio; |
584 | if (tr->tr_portreq != NULL((void *)0)) |
585 | (*tr->tr_portreq)(tp, (caddr_t)&rp->rp_psc); |
586 | |
587 | /* Add protocol specific flags */ |
588 | switch (tr->tr_proto) { |
589 | case TRUNK_PROTO_FAILOVER: |
590 | rp->rp_flags = tp->tp_flags; |
591 | if (tp == trunk_link_active(tr, tr->tr_primary)) |
592 | rp->rp_flags |= TRUNK_PORT_ACTIVE0x00000004; |
593 | break; |
594 | |
595 | case TRUNK_PROTO_ROUNDROBIN: |
596 | case TRUNK_PROTO_LOADBALANCE: |
597 | case TRUNK_PROTO_BROADCAST: |
598 | rp->rp_flags = tp->tp_flags; |
599 | if (TRUNK_PORTACTIVE(tp)( (((tp)->tp_if->if_data.ifi_link_state) >= 4 || ((tp )->tp_if->if_data.ifi_link_state) == 0) && (tp) ->tp_if->if_flags & 0x1)) |
600 | rp->rp_flags |= TRUNK_PORT_ACTIVE0x00000004; |
601 | break; |
602 | |
603 | case TRUNK_PROTO_LACP: |
604 | /* LACP has a different definition of active */ |
605 | rp->rp_flags = lacp_port_status(tp); |
606 | break; |
607 | default: |
608 | break; |
609 | } |
610 | } |
611 | |
612 | int |
613 | trunk_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) |
614 | { |
615 | struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; |
616 | struct trunk_reqall *ra = (struct trunk_reqall *)data; |
617 | struct trunk_reqport *rp = (struct trunk_reqport *)data, rpbuf; |
618 | struct trunk_opts *tro = (struct trunk_opts *)data; |
619 | struct ifreq *ifr = (struct ifreq *)data; |
620 | struct lacp_softc *lsc; |
621 | struct trunk_port *tp; |
622 | struct lacp_port *lp; |
623 | struct ifnet *tpif; |
624 | int i, error = 0; |
625 | |
626 | bzero(&rpbuf, sizeof(rpbuf))__builtin_bzero((&rpbuf), (sizeof(rpbuf))); |
627 | |
628 | switch (cmd) { |
629 | case SIOCGTRUNK(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct trunk_reqall) & 0x1fff) << 16) | ((('i')) << 8) | ((143))): |
630 | ra->ra_proto = tr->tr_proto; |
631 | if (tr->tr_req != NULL((void *)0)) |
632 | (*tr->tr_req)(tr, (caddr_t)&ra->ra_psc); |
633 | ra->ra_ports = i = 0; |
634 | tp = SLIST_FIRST(&tr->tr_ports)((&tr->tr_ports)->slh_first); |
635 | while (tp && ra->ra_size >= |
636 | i + sizeof(struct trunk_reqport)) { |
637 | trunk_port2req(tp, &rpbuf); |
638 | error = copyout(&rpbuf, (caddr_t)ra->ra_port + i, |
639 | sizeof(struct trunk_reqport)); |
640 | if (error) |
641 | break; |
642 | i += sizeof(struct trunk_reqport); |
643 | ra->ra_ports++; |
644 | tp = SLIST_NEXT(tp, tp_entries)((tp)->tp_entries.sle_next); |
645 | } |
646 | break; |
647 | case SIOCSTRUNK((unsigned long)0x80000000 | ((sizeof(struct trunk_reqall) & 0x1fff) << 16) | ((('i')) << 8) | ((144))): |
648 | if ((error = suser(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc)) != 0) { |
649 | error = EPERM1; |
650 | break; |
651 | } |
652 | if (ra->ra_proto >= TRUNK_PROTO_MAX) { |
653 | error = EPROTONOSUPPORT43; |
654 | break; |
655 | } |
656 | |
657 | /* |
658 | * Use of ifp->if_input and ac->ac_trunkport is |
659 | * protected by NET_LOCK, but that may not be true |
660 | * in the future. The below comment and code flow is |
661 | * maintained to help in that future. |
662 | * |
663 | * Serialize modifications to the trunk and trunk |
664 | * ports via the ifih SRP: detaching trunk_input |
665 | * from the trunk port will require all currently |
666 | * running trunk_input's on this port to finish |
667 | * granting us an exclusive access to it. |
668 | */ |
669 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
670 | SLIST_FOREACH(tp, &tr->tr_ports, tp_entries)for((tp) = ((&tr->tr_ports)->slh_first); (tp) != (( void *)0); (tp) = ((tp)->tp_entries.sle_next)) { |
671 | /* if_ih_remove(tp->tp_if, trunk_input, tp); */ |
672 | tp->tp_if->if_input = tp->tp_input; |
673 | } |
674 | if (tr->tr_proto != TRUNK_PROTO_NONE) |
675 | error = tr->tr_detach(tr); |
676 | if (error != 0) |
677 | break; |
678 | for (i = 0; i < nitems(trunk_protos)(sizeof((trunk_protos)) / sizeof((trunk_protos)[0])); i++) { |
679 | if (trunk_protos[i].ti_proto == ra->ra_proto) { |
680 | if (tr->tr_ifflagstr_ac.ac_if.if_flags & IFF_DEBUG0x4) |
681 | printf("%s: using proto %u\n", |
682 | tr->tr_ifnametr_ac.ac_if.if_xname, |
683 | trunk_protos[i].ti_proto); |
684 | tr->tr_proto = trunk_protos[i].ti_proto; |
685 | if (tr->tr_proto != TRUNK_PROTO_NONE) |
686 | error = trunk_protos[i].ti_attach(tr); |
687 | SLIST_FOREACH(tp, &tr->tr_ports, tp_entries)for((tp) = ((&tr->tr_ports)->slh_first); (tp) != (( void *)0); (tp) = ((tp)->tp_entries.sle_next)) { |
688 | /* if_ih_insert(tp->tp_if, |
689 | trunk_input, tp); */ |
690 | tp->tp_if->if_input = trunk_input; |
691 | } |
692 | /* Update trunk capabilities */ |
693 | tr->tr_capabilitiestr_ac.ac_if.if_data.ifi_capabilities = trunk_capabilities(tr); |
694 | goto out; |
695 | } |
696 | } |
697 | error = EPROTONOSUPPORT43; |
698 | break; |
699 | case SIOCGTRUNKOPTS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct trunk_opts) & 0x1fff) << 16) | ((('i')) << 8) | ((145))): |
700 | /* Only LACP trunks have options atm */ |
701 | if (tro->to_proto != TRUNK_PROTO_LACP) { |
702 | error = EPROTONOSUPPORT43; |
703 | break; |
704 | } |
705 | lsc = LACP_SOFTC(tr)((struct lacp_softc *)(tr)->tr_psc); |
706 | tro->to_lacpoptsto_psc.rpsc_lacp.lacp_mode = lsc->lsc_modelsc_admin_defaults.lad_mode; |
707 | tro->to_lacpoptsto_psc.rpsc_lacp.lacp_timeout = lsc->lsc_timeoutlsc_admin_defaults.lad_timeout; |
708 | tro->to_lacpoptsto_psc.rpsc_lacp.lacp_prio = lsc->lsc_sys_priolsc_admin_defaults.lad_prio; |
709 | tro->to_lacpoptsto_psc.rpsc_lacp.lacp_portprio = lsc->lsc_port_priolsc_admin_defaults.lad_portprio; |
710 | tro->to_lacpoptsto_psc.rpsc_lacp.lacp_ifqprio = lsc->lsc_ifq_priolsc_admin_defaults.lad_ifqprio; |
711 | break; |
712 | case SIOCSTRUNKOPTS((unsigned long)0x80000000 | ((sizeof(struct trunk_opts) & 0x1fff) << 16) | ((('i')) << 8) | ((146))): |
713 | if ((error = suser(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc)) != 0) { |
714 | error = EPERM1; |
715 | break; |
716 | } |
717 | /* Only LACP trunks have options atm */ |
718 | if (tro->to_proto != TRUNK_PROTO_LACP) { |
719 | error = EPROTONOSUPPORT43; |
720 | break; |
721 | } |
722 | lsc = LACP_SOFTC(tr)((struct lacp_softc *)(tr)->tr_psc); |
723 | switch(tro->to_opts) { |
724 | case TRUNK_OPT_LACP_MODE0x01: |
725 | /* |
726 | * Ensure mode changes occur immediately |
727 | * on all ports |
728 | */ |
729 | lsc->lsc_modelsc_admin_defaults.lad_mode = tro->to_lacpoptsto_psc.rpsc_lacp.lacp_mode; |
730 | if (lsc->lsc_modelsc_admin_defaults.lad_mode == 0) { |
731 | LIST_FOREACH(lp, &lsc->lsc_ports,for((lp) = ((&lsc->lsc_ports)->lh_first); (lp)!= (( void *)0); (lp) = ((lp)->lp_next.le_next)) |
732 | lp_next)for((lp) = ((&lsc->lsc_ports)->lh_first); (lp)!= (( void *)0); (lp) = ((lp)->lp_next.le_next)) |
733 | lp->lp_statelp_actor.lip_state &= |
734 | ~LACP_STATE_ACTIVITY(1<<0); |
735 | } else { |
736 | LIST_FOREACH(lp, &lsc->lsc_ports,for((lp) = ((&lsc->lsc_ports)->lh_first); (lp)!= (( void *)0); (lp) = ((lp)->lp_next.le_next)) |
737 | lp_next)for((lp) = ((&lsc->lsc_ports)->lh_first); (lp)!= (( void *)0); (lp) = ((lp)->lp_next.le_next)) |
738 | lp->lp_statelp_actor.lip_state |= |
739 | LACP_STATE_ACTIVITY(1<<0); |
740 | } |
741 | break; |
742 | case TRUNK_OPT_LACP_TIMEOUT0x02: |
743 | /* |
744 | * Ensure timeout changes occur immediately |
745 | * on all ports |
746 | */ |
747 | lsc->lsc_timeoutlsc_admin_defaults.lad_timeout = |
748 | tro->to_lacpoptsto_psc.rpsc_lacp.lacp_timeout; |
749 | if (lsc->lsc_timeoutlsc_admin_defaults.lad_timeout == 0) { |
750 | LIST_FOREACH(lp, &lsc->lsc_ports,for((lp) = ((&lsc->lsc_ports)->lh_first); (lp)!= (( void *)0); (lp) = ((lp)->lp_next.le_next)) |
751 | lp_next)for((lp) = ((&lsc->lsc_ports)->lh_first); (lp)!= (( void *)0); (lp) = ((lp)->lp_next.le_next)) |
752 | lp->lp_statelp_actor.lip_state &= |
753 | ~LACP_STATE_TIMEOUT(1<<1); |
754 | } else { |
755 | LIST_FOREACH(lp, &lsc->lsc_ports,for((lp) = ((&lsc->lsc_ports)->lh_first); (lp)!= (( void *)0); (lp) = ((lp)->lp_next.le_next)) |
756 | lp_next)for((lp) = ((&lsc->lsc_ports)->lh_first); (lp)!= (( void *)0); (lp) = ((lp)->lp_next.le_next)) |
757 | lp->lp_statelp_actor.lip_state |= |
758 | LACP_STATE_TIMEOUT(1<<1); |
759 | } |
760 | break; |
761 | case TRUNK_OPT_LACP_SYS_PRIO0x04: |
762 | if (tro->to_lacpoptsto_psc.rpsc_lacp.lacp_prio == 0) { |
763 | error = EINVAL22; |
764 | break; |
765 | } |
766 | lsc->lsc_sys_priolsc_admin_defaults.lad_prio = tro->to_lacpoptsto_psc.rpsc_lacp.lacp_prio; |
767 | break; |
768 | case TRUNK_OPT_LACP_PORT_PRIO0x08: |
769 | if (tro->to_lacpoptsto_psc.rpsc_lacp.lacp_portprio == 0) { |
770 | error = EINVAL22; |
771 | break; |
772 | } |
773 | lsc->lsc_port_priolsc_admin_defaults.lad_portprio = |
774 | tro->to_lacpoptsto_psc.rpsc_lacp.lacp_portprio; |
775 | break; |
776 | case TRUNK_OPT_LACP_IFQ_PRIO0x10: |
777 | if (tro->to_lacpoptsto_psc.rpsc_lacp.lacp_ifqprio > |
778 | IFQ_MAXPRIO8 - 1) { |
779 | error = EINVAL22; |
780 | break; |
781 | } |
782 | lsc->lsc_ifq_priolsc_admin_defaults.lad_ifqprio = |
783 | tro->to_lacpoptsto_psc.rpsc_lacp.lacp_ifqprio; |
784 | break; |
785 | } |
786 | break; |
787 | case SIOCGTRUNKPORT(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct trunk_reqport) & 0x1fff) << 16) | ((('i')) << 8) | ((140))): |
788 | if (rp->rp_portname[0] == '\0' || |
789 | (tpif = if_unit(rp->rp_portname)) == NULL((void *)0)) { |
790 | error = EINVAL22; |
791 | break; |
792 | } |
793 | |
794 | /* Search in all trunks if the global flag is set */ |
795 | tp = trunk_port_get(rp->rp_flags & TRUNK_PORT_GLOBAL0x80000000 ? |
796 | NULL((void *)0) : tr, tpif); |
797 | if_put(tpif); |
798 | |
799 | if(tp == NULL((void *)0)) { |
800 | error = ENOENT2; |
801 | break; |
802 | } |
803 | |
804 | trunk_port2req(tp, rp); |
805 | break; |
806 | case SIOCSTRUNKPORT((unsigned long)0x80000000 | ((sizeof(struct trunk_reqport) & 0x1fff) << 16) | ((('i')) << 8) | ((141))): |
807 | if ((error = suser(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc)) != 0) { |
Although the value stored to 'error' is used in the enclosing expression, the value is never actually read from 'error' | |
808 | error = EPERM1; |
809 | break; |
810 | } |
811 | if (rp->rp_portname[0] == '\0' || |
812 | (tpif = if_unit(rp->rp_portname)) == NULL((void *)0)) { |
813 | error = EINVAL22; |
814 | break; |
815 | } |
816 | error = trunk_port_create(tr, tpif); |
817 | if (error != 0) |
818 | if_put(tpif); |
819 | break; |
820 | case SIOCSTRUNKDELPORT((unsigned long)0x80000000 | ((sizeof(struct trunk_reqport) & 0x1fff) << 16) | ((('i')) << 8) | ((142))): |
821 | if ((error = suser(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc)) != 0) { |
822 | error = EPERM1; |
823 | break; |
824 | } |
825 | if (rp->rp_portname[0] == '\0' || |
826 | (tpif = if_unit(rp->rp_portname)) == NULL((void *)0)) { |
827 | error = EINVAL22; |
828 | break; |
829 | } |
830 | |
831 | /* Search in all trunks if the global flag is set */ |
832 | tp = trunk_port_get(rp->rp_flags & TRUNK_PORT_GLOBAL0x80000000 ? |
833 | NULL((void *)0) : tr, tpif); |
834 | if_put(tpif); |
835 | |
836 | if(tp == NULL((void *)0)) { |
837 | error = ENOENT2; |
838 | break; |
839 | } |
840 | |
841 | error = trunk_port_destroy(tp); |
842 | break; |
843 | case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((12))): |
844 | ifp->if_flags |= IFF_UP0x1; |
845 | /* FALLTHROUGH */ |
846 | case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))): |
847 | error = ENETRESET52; |
848 | break; |
849 | case SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((49))): |
850 | error = trunk_ether_addmulti(tr, ifr); |
851 | break; |
852 | case SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((50))): |
853 | error = trunk_ether_delmulti(tr, ifr); |
854 | break; |
855 | case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((55))): |
856 | case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifmediareq) & 0x1fff) << 16) | ((('i')) << 8) | ((56))): |
857 | error = ifmedia_ioctl(ifp, ifr, &tr->tr_media, cmd); |
858 | break; |
859 | case SIOCSIFLLADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((31))): |
860 | /* Update the port lladdrs as well */ |
861 | SLIST_FOREACH(tp, &tr->tr_ports, tp_entries)for((tp) = ((&tr->tr_ports)->slh_first); (tp) != (( void *)0); (tp) = ((tp)->tp_entries.sle_next)) |
862 | trunk_port_lladdr(tp, ifr->ifr_addrifr_ifru.ifru_addr.sa_data); |
863 | error = ENETRESET52; |
864 | break; |
865 | default: |
866 | error = ether_ioctl(ifp, &tr->tr_ac, cmd, data); |
867 | } |
868 | |
869 | if (error == ENETRESET52) { |
870 | if (ifp->if_flags & IFF_UP0x1) { |
871 | if ((ifp->if_flags & IFF_RUNNING0x40) == 0) |
872 | trunk_init(ifp); |
873 | } else { |
874 | if (ifp->if_flags & IFF_RUNNING0x40) |
875 | trunk_stop(ifp); |
876 | } |
877 | error = 0; |
878 | } |
879 | |
880 | out: |
881 | return (error); |
882 | } |
883 | |
884 | int |
885 | trunk_ether_addmulti(struct trunk_softc *tr, struct ifreq *ifr) |
886 | { |
887 | struct trunk_mc *mc; |
888 | u_int8_t addrlo[ETHER_ADDR_LEN6], addrhi[ETHER_ADDR_LEN6]; |
889 | int error; |
890 | |
891 | /* Ignore ENETRESET error code */ |
892 | if ((error = ether_addmulti(ifr, &tr->tr_ac)) != ENETRESET52) |
893 | return (error); |
894 | |
895 | if ((mc = malloc(sizeof(*mc), M_DEVBUF2, M_NOWAIT0x0002)) == NULL((void *)0)) { |
896 | error = ENOMEM12; |
897 | goto failed; |
898 | } |
899 | |
900 | ether_multiaddr(&ifr->ifr_addrifr_ifru.ifru_addr, addrlo, addrhi); |
901 | ETHER_LOOKUP_MULTI(addrlo, addrhi, &tr->tr_ac, mc->mc_enm)do { for ((mc->mc_u.mcu_enm) = ((&(&tr->tr_ac)-> ac_multiaddrs)->lh_first); (mc->mc_u.mcu_enm) != ((void *)0) && (__builtin_memcmp(((mc->mc_u.mcu_enm)-> enm_addrlo), ((addrlo)), (6)) != 0 || __builtin_memcmp(((mc-> mc_u.mcu_enm)->enm_addrhi), ((addrhi)), (6)) != 0); (mc-> mc_u.mcu_enm) = (((mc->mc_u.mcu_enm))->enm_list.le_next )); } while ( 0); |
902 | bcopy(&ifr->ifr_addrifr_ifru.ifru_addr, &mc->mc_addr, ifr->ifr_addrifr_ifru.ifru_addr.sa_len); |
903 | SLIST_INSERT_HEAD(&tr->tr_mc_head, mc, mc_entries)do { (mc)->mc_entries.sle_next = (&tr->tr_mc_head)-> slh_first; (&tr->tr_mc_head)->slh_first = (mc); } while (0); |
904 | |
905 | if ((error = trunk_ioctl_allports(tr, SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((49))), |
906 | (caddr_t)ifr)) != 0) { |
907 | trunk_ether_delmulti(tr, ifr); |
908 | return (error); |
909 | } |
910 | |
911 | return (error); |
912 | |
913 | failed: |
914 | ether_delmulti(ifr, &tr->tr_ac); |
915 | |
916 | return (error); |
917 | } |
918 | |
919 | int |
920 | trunk_ether_delmulti(struct trunk_softc *tr, struct ifreq *ifr) |
921 | { |
922 | struct ether_multi *enm; |
923 | struct trunk_mc *mc; |
924 | u_int8_t addrlo[ETHER_ADDR_LEN6], addrhi[ETHER_ADDR_LEN6]; |
925 | int error; |
926 | |
927 | if ((error = ether_multiaddr(&ifr->ifr_addrifr_ifru.ifru_addr, addrlo, addrhi)) != 0) |
928 | return (error); |
929 | ETHER_LOOKUP_MULTI(addrlo, addrhi, &tr->tr_ac, enm)do { for ((enm) = ((&(&tr->tr_ac)->ac_multiaddrs )->lh_first); (enm) != ((void *)0) && (__builtin_memcmp (((enm)->enm_addrlo), ((addrlo)), (6)) != 0 || __builtin_memcmp (((enm)->enm_addrhi), ((addrhi)), (6)) != 0); (enm) = (((enm ))->enm_list.le_next)); } while ( 0); |
930 | if (enm == NULL((void *)0)) |
931 | return (EINVAL22); |
932 | |
933 | SLIST_FOREACH(mc, &tr->tr_mc_head, mc_entries)for((mc) = ((&tr->tr_mc_head)->slh_first); (mc) != ( (void *)0); (mc) = ((mc)->mc_entries.sle_next)) |
934 | if (mc->mc_enmmc_u.mcu_enm == enm) |
935 | break; |
936 | |
937 | /* We won't delete entries we didn't add */ |
938 | if (mc == NULL((void *)0)) |
939 | return (EINVAL22); |
940 | |
941 | if ((error = ether_delmulti(ifr, &tr->tr_ac)) != ENETRESET52) |
942 | return (error); |
943 | |
944 | /* We no longer use this multicast address. Tell parent so. */ |
945 | error = trunk_ioctl_allports(tr, SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((50))), (caddr_t)ifr); |
946 | if (error == 0) { |
947 | SLIST_REMOVE(&tr->tr_mc_head, mc, trunk_mc, mc_entries)do { if ((&tr->tr_mc_head)->slh_first == (mc)) { do { ((&tr->tr_mc_head))->slh_first = ((&tr->tr_mc_head ))->slh_first->mc_entries.sle_next; } while (0); } else { struct trunk_mc *curelm = (&tr->tr_mc_head)->slh_first ; while (curelm->mc_entries.sle_next != (mc)) curelm = curelm ->mc_entries.sle_next; curelm->mc_entries.sle_next = curelm ->mc_entries.sle_next->mc_entries.sle_next; } ((mc)-> mc_entries.sle_next) = ((void *)-1); } while (0); |
948 | free(mc, M_DEVBUF2, sizeof(*mc)); |
949 | } else { |
950 | /* XXX At least one port failed to remove the address */ |
951 | if (tr->tr_ifflagstr_ac.ac_if.if_flags & IFF_DEBUG0x4) { |
952 | printf("%s: failed to remove multicast address " |
953 | "on all ports (%d)\n", tr->tr_ifnametr_ac.ac_if.if_xname, error); |
954 | } |
955 | (void)ether_addmulti(ifr, &tr->tr_ac); |
956 | } |
957 | |
958 | return (0); |
959 | } |
960 | |
961 | void |
962 | trunk_ether_purgemulti(struct trunk_softc *tr) |
963 | { |
964 | struct trunk_mc *mc; |
965 | struct trunk_ifreq ifs; |
966 | struct ifreq *ifr = &ifs.ifreq.ifreq; |
967 | |
968 | while ((mc = SLIST_FIRST(&tr->tr_mc_head)((&tr->tr_mc_head)->slh_first)) != NULL((void *)0)) { |
969 | bcopy(&mc->mc_addr, &ifr->ifr_addrifr_ifru.ifru_addr, mc->mc_addr.ss_len); |
970 | |
971 | /* Try to remove multicast address on all ports */ |
972 | trunk_ioctl_allports(tr, SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((50))), (caddr_t)ifr); |
973 | |
974 | SLIST_REMOVE(&tr->tr_mc_head, mc, trunk_mc, mc_entries)do { if ((&tr->tr_mc_head)->slh_first == (mc)) { do { ((&tr->tr_mc_head))->slh_first = ((&tr->tr_mc_head ))->slh_first->mc_entries.sle_next; } while (0); } else { struct trunk_mc *curelm = (&tr->tr_mc_head)->slh_first ; while (curelm->mc_entries.sle_next != (mc)) curelm = curelm ->mc_entries.sle_next; curelm->mc_entries.sle_next = curelm ->mc_entries.sle_next->mc_entries.sle_next; } ((mc)-> mc_entries.sle_next) = ((void *)-1); } while (0); |
975 | free(mc, M_DEVBUF2, sizeof(*mc)); |
976 | } |
977 | } |
978 | |
979 | int |
980 | trunk_ether_cmdmulti(struct trunk_port *tp, u_long cmd) |
981 | { |
982 | struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; |
983 | struct trunk_mc *mc; |
984 | struct trunk_ifreq ifs; |
985 | struct ifreq *ifr = &ifs.ifreq.ifreq; |
986 | int ret, error = 0; |
987 | |
988 | bcopy(tp->tp_ifnametp_if->if_xname, ifr->ifr_name, IFNAMSIZ16); |
989 | SLIST_FOREACH(mc, &tr->tr_mc_head, mc_entries)for((mc) = ((&tr->tr_mc_head)->slh_first); (mc) != ( (void *)0); (mc) = ((mc)->mc_entries.sle_next)) { |
990 | bcopy(&mc->mc_addr, &ifr->ifr_addrifr_ifru.ifru_addr, mc->mc_addr.ss_len); |
991 | |
992 | if ((ret = tp->tp_ioctl(tp->tp_if, cmd, (caddr_t)ifr)) != 0) { |
993 | if (tr->tr_ifflagstr_ac.ac_if.if_flags & IFF_DEBUG0x4) { |
994 | printf("%s: ioctl %lu failed on %s: %d\n", |
995 | tr->tr_ifnametr_ac.ac_if.if_xname, cmd, tp->tp_ifnametp_if->if_xname, ret); |
996 | } |
997 | /* Store last known error and continue */ |
998 | error = ret; |
999 | } |
1000 | } |
1001 | |
1002 | return (error); |
1003 | } |
1004 | |
1005 | int |
1006 | trunk_ioctl_allports(struct trunk_softc *tr, u_long cmd, caddr_t data) |
1007 | { |
1008 | struct ifreq *ifr = (struct ifreq *)data; |
1009 | struct trunk_port *tp; |
1010 | int ret, error = 0; |
1011 | |
1012 | SLIST_FOREACH(tp, &tr->tr_ports, tp_entries)for((tp) = ((&tr->tr_ports)->slh_first); (tp) != (( void *)0); (tp) = ((tp)->tp_entries.sle_next)) { |
1013 | bcopy(tp->tp_ifnametp_if->if_xname, ifr->ifr_name, IFNAMSIZ16); |
1014 | if ((ret = tp->tp_ioctl(tp->tp_if, cmd, data)) != 0) { |
1015 | if (tr->tr_ifflagstr_ac.ac_if.if_flags & IFF_DEBUG0x4) { |
1016 | printf("%s: ioctl %lu failed on %s: %d\n", |
1017 | tr->tr_ifnametr_ac.ac_if.if_xname, cmd, tp->tp_ifnametp_if->if_xname, ret); |
1018 | } |
1019 | /* Store last known error and continue */ |
1020 | error = ret; |
1021 | } |
1022 | } |
1023 | |
1024 | return (error); |
1025 | } |
1026 | |
1027 | void |
1028 | trunk_start(struct ifnet *ifp) |
1029 | { |
1030 | struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; |
1031 | struct mbuf *m; |
1032 | int error; |
1033 | |
1034 | for (;;) { |
1035 | m = ifq_dequeue(&ifp->if_snd); |
1036 | if (m == NULL((void *)0)) |
1037 | break; |
1038 | |
1039 | #if NBPFILTER1 > 0 |
1040 | if (ifp->if_bpf) |
1041 | bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1)); |
1042 | #endif |
1043 | |
1044 | if (tr->tr_proto != TRUNK_PROTO_NONE && tr->tr_count) { |
1045 | error = (*tr->tr_start)(tr, m); |
1046 | if (error != 0) |
1047 | ifp->if_oerrorsif_data.ifi_oerrors++; |
1048 | } else { |
1049 | m_freem(m); |
1050 | if (tr->tr_proto != TRUNK_PROTO_NONE) |
1051 | ifp->if_oerrorsif_data.ifi_oerrors++; |
1052 | } |
1053 | } |
1054 | } |
1055 | |
1056 | u_int32_t |
1057 | trunk_hashmbuf(struct mbuf *m, SIPHASH_KEY *key) |
1058 | { |
1059 | u_int16_t etype, ether_vtag; |
1060 | u_int32_t p = 0; |
1061 | u_int16_t *vlan, vlanbuf[2]; |
1062 | int off; |
1063 | struct ether_header *eh; |
1064 | struct ip *ip, ipbuf; |
1065 | #ifdef INET61 |
1066 | u_int32_t flow; |
1067 | struct ip6_hdr *ip6, ip6buf; |
1068 | #endif |
1069 | SIPHASH_CTX ctx; |
1070 | |
1071 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_FLOWID0x4000) |
1072 | return (m->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid); |
1073 | |
1074 | SipHash24_Init(&ctx, key)SipHash_Init((&ctx), (key)); |
1075 | off = sizeof(*eh); |
1076 | if (m->m_lenm_hdr.mh_len < off) |
1077 | goto done; |
1078 | eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data)); |
1079 | etype = ntohs(eh->ether_type)(__uint16_t)(__builtin_constant_p(eh->ether_type) ? (__uint16_t )(((__uint16_t)(eh->ether_type) & 0xffU) << 8 | ( (__uint16_t)(eh->ether_type) & 0xff00U) >> 8) : __swap16md (eh->ether_type)); |
1080 | SipHash24_Update(&ctx, &eh->ether_shost, ETHER_ADDR_LEN)SipHash_Update((&ctx), 2, 4, (&eh->ether_shost), ( 6)); |
1081 | SipHash24_Update(&ctx, &eh->ether_dhost, ETHER_ADDR_LEN)SipHash_Update((&ctx), 2, 4, (&eh->ether_dhost), ( 6)); |
1082 | |
1083 | /* Special handling for encapsulating VLAN frames */ |
1084 | if (m->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020) { |
1085 | ether_vtag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag)((m->M_dat.MH.MH_pkthdr.ether_vtag) & 0xFFF); |
1086 | SipHash24_Update(&ctx, ðer_vtag, sizeof(ether_vtag))SipHash_Update((&ctx), 2, 4, (ðer_vtag), (sizeof(ether_vtag ))); |
1087 | } else if (etype == ETHERTYPE_VLAN0x8100) { |
1088 | if ((vlan = (u_int16_t *) |
1089 | trunk_gethdr(m, off, EVL_ENCAPLEN4, &vlanbuf)) == NULL((void *)0)) |
1090 | return (p); |
1091 | ether_vtag = EVL_VLANOFTAG(*vlan)((*vlan) & 0xFFF); |
1092 | SipHash24_Update(&ctx, ðer_vtag, sizeof(ether_vtag))SipHash_Update((&ctx), 2, 4, (ðer_vtag), (sizeof(ether_vtag ))); |
1093 | etype = ntohs(vlan[1])(__uint16_t)(__builtin_constant_p(vlan[1]) ? (__uint16_t)(((__uint16_t )(vlan[1]) & 0xffU) << 8 | ((__uint16_t)(vlan[1]) & 0xff00U) >> 8) : __swap16md(vlan[1])); |
1094 | off += EVL_ENCAPLEN4; |
1095 | } |
1096 | |
1097 | switch (etype) { |
1098 | case ETHERTYPE_IP0x0800: |
1099 | if ((ip = (struct ip *) |
1100 | trunk_gethdr(m, off, sizeof(*ip), &ipbuf)) == NULL((void *)0)) |
1101 | return (p); |
1102 | SipHash24_Update(&ctx, &ip->ip_src, sizeof(struct in_addr))SipHash_Update((&ctx), 2, 4, (&ip->ip_src), (sizeof (struct in_addr))); |
1103 | SipHash24_Update(&ctx, &ip->ip_dst, sizeof(struct in_addr))SipHash_Update((&ctx), 2, 4, (&ip->ip_dst), (sizeof (struct in_addr))); |
1104 | break; |
1105 | #ifdef INET61 |
1106 | case ETHERTYPE_IPV60x86DD: |
1107 | if ((ip6 = (struct ip6_hdr *) |
1108 | trunk_gethdr(m, off, sizeof(*ip6), &ip6buf)) == NULL((void *)0)) |
1109 | return (p); |
1110 | SipHash24_Update(&ctx, &ip6->ip6_src, sizeof(struct in6_addr))SipHash_Update((&ctx), 2, 4, (&ip6->ip6_src), (sizeof (struct in6_addr))); |
1111 | SipHash24_Update(&ctx, &ip6->ip6_dst, sizeof(struct in6_addr))SipHash_Update((&ctx), 2, 4, (&ip6->ip6_dst), (sizeof (struct in6_addr))); |
1112 | flow = ip6->ip6_flowip6_ctlun.ip6_un1.ip6_un1_flow & IPV6_FLOWLABEL_MASK0xffff0f00; |
1113 | SipHash24_Update(&ctx, &flow, sizeof(flow))SipHash_Update((&ctx), 2, 4, (&flow), (sizeof(flow))); /* IPv6 flow label */ |
1114 | break; |
1115 | #endif |
1116 | } |
1117 | |
1118 | done: |
1119 | return SipHash24_End(&ctx)SipHash_End((&ctx), 2, 4); |
1120 | } |
1121 | |
1122 | void |
1123 | trunk_init(struct ifnet *ifp) |
1124 | { |
1125 | struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; |
1126 | |
1127 | ifp->if_flags |= IFF_RUNNING0x40; |
1128 | |
1129 | if (tr->tr_init != NULL((void *)0)) |
1130 | (*tr->tr_init)(tr); |
1131 | } |
1132 | |
1133 | void |
1134 | trunk_stop(struct ifnet *ifp) |
1135 | { |
1136 | struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; |
1137 | |
1138 | ifp->if_flags &= ~IFF_RUNNING0x40; |
1139 | |
1140 | if (tr->tr_stop != NULL((void *)0)) |
1141 | (*tr->tr_stop)(tr); |
1142 | } |
1143 | |
1144 | void |
1145 | trunk_input(struct ifnet *ifp, struct mbuf *m) |
1146 | { |
1147 | struct arpcom *ac0 = (struct arpcom *)ifp; |
1148 | struct trunk_port *tp; |
1149 | struct trunk_softc *tr; |
1150 | struct ifnet *trifp = NULL((void *)0); |
1151 | struct ether_header *eh; |
1152 | |
1153 | if (m->m_lenm_hdr.mh_len < sizeof(*eh)) |
1154 | goto bad; |
1155 | |
1156 | eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data)); |
1157 | if (ETHER_IS_MULTICAST(eh->ether_dhost)(*(eh->ether_dhost) & 0x01)) |
1158 | ifp->if_imcastsif_data.ifi_imcasts++; |
1159 | |
1160 | /* Should be checked by the caller */ |
1161 | if (ifp->if_typeif_data.ifi_type != IFT_IEEE8023ADLAG0xa1) |
1162 | goto bad; |
1163 | |
1164 | tp = (struct trunk_port *)ac0->ac_trunkport; |
1165 | if ((tr = (struct trunk_softc *)tp->tp_trunk) == NULL((void *)0)) |
1166 | goto bad; |
1167 | |
1168 | trifp = &tr->tr_ac.ac_if; |
1169 | if (tr->tr_proto == TRUNK_PROTO_NONE) |
1170 | goto bad; |
1171 | |
1172 | if ((*tr->tr_input)(tr, tp, m)) { |
1173 | /* |
1174 | * We stop here if the packet has been consumed |
1175 | * by the protocol routine. |
1176 | */ |
1177 | return; |
1178 | } |
1179 | |
1180 | if ((trifp->if_flags & (IFF_UP0x1|IFF_RUNNING0x40)) != (IFF_UP0x1|IFF_RUNNING0x40)) |
1181 | goto bad; |
1182 | |
1183 | /* |
1184 | * Drop promiscuously received packets if we are not in |
1185 | * promiscuous mode. |
1186 | */ |
1187 | if (!ETHER_IS_MULTICAST(eh->ether_dhost)(*(eh->ether_dhost) & 0x01) && |
1188 | (ifp->if_flags & IFF_PROMISC0x100) && |
1189 | (trifp->if_flags & IFF_PROMISC0x100) == 0) { |
1190 | if (bcmp(&tr->tr_ac.ac_enaddr, eh->ether_dhost, |
1191 | ETHER_ADDR_LEN6)) { |
1192 | m_freem(m); |
1193 | return; |
1194 | } |
1195 | } |
1196 | |
1197 | |
1198 | if_vinput(trifp, m); |
1199 | return; |
1200 | |
1201 | bad: |
1202 | if (trifp != NULL((void *)0)) |
1203 | trifp->if_ierrorsif_data.ifi_ierrors++; |
1204 | m_freem(m); |
1205 | } |
1206 | |
1207 | int |
1208 | trunk_media_change(struct ifnet *ifp) |
1209 | { |
1210 | struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; |
1211 | |
1212 | if (tr->tr_ifflagstr_ac.ac_if.if_flags & IFF_DEBUG0x4) |
1213 | printf("%s\n", __func__); |
1214 | |
1215 | /* Ignore */ |
1216 | return (0); |
1217 | } |
1218 | |
1219 | void |
1220 | trunk_media_status(struct ifnet *ifp, struct ifmediareq *imr) |
1221 | { |
1222 | struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; |
1223 | struct trunk_port *tp; |
1224 | |
1225 | imr->ifm_status = IFM_AVALID0x0000000000000001ULL; |
1226 | imr->ifm_active = IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL; |
1227 | |
1228 | SLIST_FOREACH(tp, &tr->tr_ports, tp_entries)for((tp) = ((&tr->tr_ports)->slh_first); (tp) != (( void *)0); (tp) = ((tp)->tp_entries.sle_next)) { |
1229 | if (TRUNK_PORTACTIVE(tp)( (((tp)->tp_if->if_data.ifi_link_state) >= 4 || ((tp )->tp_if->if_data.ifi_link_state) == 0) && (tp) ->tp_if->if_flags & 0x1)) |
1230 | imr->ifm_status |= IFM_ACTIVE0x0000000000000002ULL; |
1231 | } |
1232 | } |
1233 | |
1234 | void |
1235 | trunk_port_state(void *arg) |
1236 | { |
1237 | struct trunk_port *tp = (struct trunk_port *)arg; |
1238 | struct trunk_softc *tr = NULL((void *)0); |
1239 | |
1240 | if (tp != NULL((void *)0)) |
1241 | tr = (struct trunk_softc *)tp->tp_trunk; |
1242 | if (tr == NULL((void *)0)) |
1243 | return; |
1244 | if (tr->tr_linkstate != NULL((void *)0)) |
1245 | (*tr->tr_linkstate)(tp); |
1246 | trunk_link_active(tr, tp); |
1247 | } |
1248 | |
1249 | struct trunk_port * |
1250 | trunk_link_active(struct trunk_softc *tr, struct trunk_port *tp) |
1251 | { |
1252 | struct trunk_port *tp_next, *rval = NULL((void *)0); |
1253 | int new_link = LINK_STATE_DOWN2; |
1254 | |
1255 | /* |
1256 | * Search a port which reports an active link state. |
1257 | */ |
1258 | |
1259 | if (tp == NULL((void *)0)) |
1260 | goto search; |
1261 | if (TRUNK_PORTACTIVE(tp)( (((tp)->tp_if->if_data.ifi_link_state) >= 4 || ((tp )->tp_if->if_data.ifi_link_state) == 0) && (tp) ->tp_if->if_flags & 0x1)) { |
1262 | rval = tp; |
1263 | goto found; |
1264 | } |
1265 | if ((tp_next = SLIST_NEXT(tp, tp_entries)((tp)->tp_entries.sle_next)) != NULL((void *)0) && |
1266 | TRUNK_PORTACTIVE(tp_next)( (((tp_next)->tp_if->if_data.ifi_link_state) >= 4 || ((tp_next)->tp_if->if_data.ifi_link_state) == 0) && (tp_next)->tp_if->if_flags & 0x1)) { |
1267 | rval = tp_next; |
1268 | goto found; |
1269 | } |
1270 | |
1271 | search: |
1272 | SLIST_FOREACH(tp_next, &tr->tr_ports, tp_entries)for((tp_next) = ((&tr->tr_ports)->slh_first); (tp_next ) != ((void *)0); (tp_next) = ((tp_next)->tp_entries.sle_next )) { |
1273 | if (TRUNK_PORTACTIVE(tp_next)( (((tp_next)->tp_if->if_data.ifi_link_state) >= 4 || ((tp_next)->tp_if->if_data.ifi_link_state) == 0) && (tp_next)->tp_if->if_flags & 0x1)) { |
1274 | rval = tp_next; |
1275 | goto found; |
1276 | } |
1277 | } |
1278 | |
1279 | found: |
1280 | if (rval != NULL((void *)0)) { |
1281 | /* |
1282 | * The IEEE 802.1D standard assumes that a trunk with |
1283 | * multiple ports is always full duplex. This is valid |
1284 | * for load sharing trunks and if at least two links |
1285 | * are active. Unfortunately, checking the latter would |
1286 | * be too expensive at this point. |
1287 | */ |
1288 | if ((tr->tr_capabilitiestr_ac.ac_if.if_data.ifi_capabilities & IFCAP_TRUNK_FULLDUPLEX0x00010000) && |
1289 | (tr->tr_count > 1)) |
1290 | new_link = LINK_STATE_FULL_DUPLEX6; |
1291 | else |
1292 | new_link = rval->tp_link_statetp_if->if_data.ifi_link_state; |
1293 | } |
1294 | |
1295 | if (tr->tr_ac.ac_if.if_link_stateif_data.ifi_link_state != new_link) { |
1296 | tr->tr_ac.ac_if.if_link_stateif_data.ifi_link_state = new_link; |
1297 | if_link_state_change(&tr->tr_ac.ac_if); |
1298 | } |
1299 | |
1300 | return (rval); |
1301 | } |
1302 | |
1303 | const void * |
1304 | trunk_gethdr(struct mbuf *m, u_int off, u_int len, void *buf) |
1305 | { |
1306 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < (off + len)) |
1307 | return (NULL((void *)0)); |
1308 | else if (m->m_lenm_hdr.mh_len < (off + len)) { |
1309 | m_copydata(m, off, len, buf); |
1310 | return (buf); |
1311 | } |
1312 | return (mtod(m, caddr_t)((caddr_t)((m)->m_hdr.mh_data)) + off); |
1313 | } |
1314 | |
1315 | /* |
1316 | * Simple round robin trunking |
1317 | */ |
1318 | |
1319 | int |
1320 | trunk_rr_attach(struct trunk_softc *tr) |
1321 | { |
1322 | struct trunk_port *tp; |
1323 | |
1324 | tr->tr_detach = trunk_rr_detach; |
1325 | tr->tr_start = trunk_rr_start; |
1326 | tr->tr_input = trunk_rr_input; |
1327 | tr->tr_init = NULL((void *)0); |
1328 | tr->tr_stop = NULL((void *)0); |
1329 | tr->tr_linkstate = NULL((void *)0); |
1330 | tr->tr_port_create = NULL((void *)0); |
1331 | tr->tr_port_destroy = trunk_rr_port_destroy; |
1332 | tr->tr_capabilitiestr_ac.ac_if.if_data.ifi_capabilities = IFCAP_TRUNK_FULLDUPLEX0x00010000; |
1333 | tr->tr_req = NULL((void *)0); |
1334 | tr->tr_portreq = NULL((void *)0); |
1335 | |
1336 | tp = SLIST_FIRST(&tr->tr_ports)((&tr->tr_ports)->slh_first); |
1337 | tr->tr_psc = (caddr_t)tp; |
1338 | |
1339 | return (0); |
1340 | } |
1341 | |
1342 | int |
1343 | trunk_rr_detach(struct trunk_softc *tr) |
1344 | { |
1345 | tr->tr_psc = NULL((void *)0); |
1346 | return (0); |
1347 | } |
1348 | |
1349 | void |
1350 | trunk_rr_port_destroy(struct trunk_port *tp) |
1351 | { |
1352 | struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; |
1353 | |
1354 | if (tp == (struct trunk_port *)tr->tr_psc) |
1355 | tr->tr_psc = NULL((void *)0); |
1356 | } |
1357 | |
1358 | int |
1359 | trunk_rr_start(struct trunk_softc *tr, struct mbuf *m) |
1360 | { |
1361 | struct trunk_port *tp = (struct trunk_port *)tr->tr_psc, *tp_next; |
1362 | int error = 0; |
1363 | |
1364 | if (tp == NULL((void *)0) && (tp = trunk_link_active(tr, NULL((void *)0))) == NULL((void *)0)) { |
1365 | m_freem(m); |
1366 | return (ENOENT2); |
1367 | } |
1368 | |
1369 | if ((error = if_enqueue(tp->tp_if, m)) != 0) |
1370 | return (error); |
1371 | |
1372 | /* Get next active port */ |
1373 | tp_next = trunk_link_active(tr, SLIST_NEXT(tp, tp_entries)((tp)->tp_entries.sle_next)); |
1374 | tr->tr_psc = (caddr_t)tp_next; |
1375 | |
1376 | return (0); |
1377 | } |
1378 | |
1379 | int |
1380 | trunk_rr_input(struct trunk_softc *tr, struct trunk_port *tp, struct mbuf *m) |
1381 | { |
1382 | /* Just pass in the packet to our trunk device */ |
1383 | return (0); |
1384 | } |
1385 | |
1386 | /* |
1387 | * Active failover |
1388 | */ |
1389 | |
1390 | int |
1391 | trunk_fail_attach(struct trunk_softc *tr) |
1392 | { |
1393 | tr->tr_detach = trunk_fail_detach; |
1394 | tr->tr_start = trunk_fail_start; |
1395 | tr->tr_input = trunk_fail_input; |
1396 | tr->tr_init = NULL((void *)0); |
1397 | tr->tr_stop = NULL((void *)0); |
1398 | tr->tr_port_create = trunk_fail_port_create; |
1399 | tr->tr_port_destroy = trunk_fail_port_destroy; |
1400 | tr->tr_linkstate = trunk_fail_linkstate; |
1401 | tr->tr_req = NULL((void *)0); |
1402 | tr->tr_portreq = NULL((void *)0); |
1403 | |
1404 | /* Get primary or the next active port */ |
1405 | tr->tr_psc = (caddr_t)trunk_link_active(tr, tr->tr_primary); |
1406 | |
1407 | return (0); |
1408 | } |
1409 | |
1410 | int |
1411 | trunk_fail_detach(struct trunk_softc *tr) |
1412 | { |
1413 | tr->tr_psc = NULL((void *)0); |
1414 | return (0); |
1415 | } |
1416 | |
1417 | int |
1418 | trunk_fail_port_create(struct trunk_port *tp) |
1419 | { |
1420 | struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; |
1421 | |
1422 | /* Get primary or the next active port */ |
1423 | tr->tr_psc = (caddr_t)trunk_link_active(tr, tr->tr_primary); |
1424 | return (0); |
1425 | } |
1426 | |
1427 | void |
1428 | trunk_fail_port_destroy(struct trunk_port *tp) |
1429 | { |
1430 | struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; |
1431 | struct trunk_port *tp_next; |
1432 | |
1433 | if ((caddr_t)tp == tr->tr_psc) { |
1434 | /* Get the next active port */ |
1435 | tp_next = trunk_link_active(tr, SLIST_NEXT(tp, tp_entries)((tp)->tp_entries.sle_next)); |
1436 | if (tp_next == tp) |
1437 | tr->tr_psc = NULL((void *)0); |
1438 | else |
1439 | tr->tr_psc = (caddr_t)tp_next; |
1440 | } else { |
1441 | /* Get primary or the next active port */ |
1442 | tr->tr_psc = (caddr_t)trunk_link_active(tr, tr->tr_primary); |
1443 | } |
1444 | } |
1445 | |
1446 | int |
1447 | trunk_fail_start(struct trunk_softc *tr, struct mbuf *m) |
1448 | { |
1449 | struct trunk_port *tp = (struct trunk_port *)tr->tr_psc; |
1450 | |
1451 | /* Use the master port if active or the next available port */ |
1452 | if (tp == NULL((void *)0)) { |
1453 | m_freem(m); |
1454 | return (ENOENT2); |
1455 | } |
1456 | |
1457 | return (if_enqueue(tp->tp_if, m)); |
1458 | } |
1459 | |
1460 | int |
1461 | trunk_fail_input(struct trunk_softc *tr, struct trunk_port *tp, struct mbuf *m) |
1462 | { |
1463 | if ((caddr_t)tp == tr->tr_psc) |
1464 | return (0); |
1465 | m_freem(m); |
1466 | return (-1); |
1467 | } |
1468 | |
1469 | void |
1470 | trunk_fail_linkstate(struct trunk_port *tp) |
1471 | { |
1472 | struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; |
1473 | |
1474 | tr->tr_psc = (caddr_t)trunk_link_active(tr, tr->tr_primary); |
1475 | } |
1476 | |
1477 | /* |
1478 | * Loadbalancing |
1479 | */ |
1480 | |
1481 | int |
1482 | trunk_lb_attach(struct trunk_softc *tr) |
1483 | { |
1484 | struct trunk_lb *lb; |
1485 | |
1486 | if ((lb = malloc(sizeof(*lb), M_DEVBUF2, M_NOWAIT0x0002|M_ZERO0x0008)) == NULL((void *)0)) |
1487 | return (ENOMEM12); |
1488 | |
1489 | tr->tr_detach = trunk_lb_detach; |
1490 | tr->tr_start = trunk_lb_start; |
1491 | tr->tr_input = trunk_lb_input; |
1492 | tr->tr_port_create = trunk_lb_port_create; |
1493 | tr->tr_port_destroy = trunk_lb_port_destroy; |
1494 | tr->tr_linkstate = NULL((void *)0); |
1495 | tr->tr_capabilitiestr_ac.ac_if.if_data.ifi_capabilities = IFCAP_TRUNK_FULLDUPLEX0x00010000; |
1496 | tr->tr_req = NULL((void *)0); |
1497 | tr->tr_portreq = NULL((void *)0); |
1498 | tr->tr_init = NULL((void *)0); |
1499 | tr->tr_stop = NULL((void *)0); |
1500 | |
1501 | arc4random_buf(&lb->lb_key, sizeof(lb->lb_key)); |
1502 | tr->tr_psc = (caddr_t)lb; |
1503 | |
1504 | return (0); |
1505 | } |
1506 | |
1507 | int |
1508 | trunk_lb_detach(struct trunk_softc *tr) |
1509 | { |
1510 | struct trunk_lb *lb = (struct trunk_lb *)tr->tr_psc; |
1511 | |
1512 | free(lb, M_DEVBUF2, sizeof *lb); |
1513 | return (0); |
1514 | } |
1515 | |
1516 | int |
1517 | trunk_lb_porttable(struct trunk_softc *tr, struct trunk_port *tp) |
1518 | { |
1519 | struct trunk_lb *lb = (struct trunk_lb *)tr->tr_psc; |
1520 | struct trunk_port *tp_next; |
1521 | int i = 0; |
1522 | |
1523 | bzero(&lb->lb_ports, sizeof(lb->lb_ports))__builtin_bzero((&lb->lb_ports), (sizeof(lb->lb_ports ))); |
1524 | SLIST_FOREACH(tp_next, &tr->tr_ports, tp_entries)for((tp_next) = ((&tr->tr_ports)->slh_first); (tp_next ) != ((void *)0); (tp_next) = ((tp_next)->tp_entries.sle_next )) { |
1525 | if (tp_next == tp) |
1526 | continue; |
1527 | if (i >= TRUNK_MAX_PORTS32) |
1528 | return (EINVAL22); |
1529 | if (tr->tr_ifflagstr_ac.ac_if.if_flags & IFF_DEBUG0x4) |
1530 | printf("%s: port %s at index %d\n", |
1531 | tr->tr_ifnametr_ac.ac_if.if_xname, tp_next->tp_ifnametp_if->if_xname, i); |
1532 | lb->lb_ports[i++] = tp_next; |
1533 | } |
1534 | |
1535 | return (0); |
1536 | } |
1537 | |
1538 | int |
1539 | trunk_lb_port_create(struct trunk_port *tp) |
1540 | { |
1541 | struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; |
1542 | return (trunk_lb_porttable(tr, NULL((void *)0))); |
1543 | } |
1544 | |
1545 | void |
1546 | trunk_lb_port_destroy(struct trunk_port *tp) |
1547 | { |
1548 | struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; |
1549 | trunk_lb_porttable(tr, tp); |
1550 | } |
1551 | |
1552 | int |
1553 | trunk_lb_start(struct trunk_softc *tr, struct mbuf *m) |
1554 | { |
1555 | struct trunk_lb *lb = (struct trunk_lb *)tr->tr_psc; |
1556 | struct trunk_port *tp = NULL((void *)0); |
1557 | u_int32_t p = 0; |
1558 | |
1559 | p = trunk_hashmbuf(m, &lb->lb_key); |
1560 | p %= tr->tr_count; |
1561 | tp = lb->lb_ports[p]; |
1562 | |
1563 | /* |
1564 | * Check the port's link state. This will return the next active |
1565 | * port if the link is down or the port is NULL. |
1566 | */ |
1567 | if ((tp = trunk_link_active(tr, tp)) == NULL((void *)0)) { |
1568 | m_freem(m); |
1569 | return (ENOENT2); |
1570 | } |
1571 | |
1572 | return (if_enqueue(tp->tp_if, m)); |
1573 | } |
1574 | |
1575 | int |
1576 | trunk_lb_input(struct trunk_softc *tr, struct trunk_port *tp, struct mbuf *m) |
1577 | { |
1578 | /* Just pass in the packet to our trunk device */ |
1579 | return (0); |
1580 | } |
1581 | |
1582 | /* |
1583 | * Broadcast mode |
1584 | */ |
1585 | |
1586 | int |
1587 | trunk_bcast_attach(struct trunk_softc *tr) |
1588 | { |
1589 | tr->tr_detach = trunk_bcast_detach; |
1590 | tr->tr_start = trunk_bcast_start; |
1591 | tr->tr_input = trunk_bcast_input; |
1592 | tr->tr_init = NULL((void *)0); |
1593 | tr->tr_stop = NULL((void *)0); |
1594 | tr->tr_port_create = NULL((void *)0); |
1595 | tr->tr_port_destroy = NULL((void *)0); |
1596 | tr->tr_linkstate = NULL((void *)0); |
1597 | tr->tr_req = NULL((void *)0); |
1598 | tr->tr_portreq = NULL((void *)0); |
1599 | |
1600 | return (0); |
1601 | } |
1602 | |
1603 | int |
1604 | trunk_bcast_detach(struct trunk_softc *tr) |
1605 | { |
1606 | return (0); |
1607 | } |
1608 | |
1609 | int |
1610 | trunk_bcast_start(struct trunk_softc *tr, struct mbuf *m0) |
1611 | { |
1612 | int active_ports = 0; |
1613 | int errors = 0; |
1614 | struct trunk_port *tp, *last = NULL((void *)0); |
1615 | struct mbuf *m; |
1616 | |
1617 | SLIST_FOREACH(tp, &tr->tr_ports, tp_entries)for((tp) = ((&tr->tr_ports)->slh_first); (tp) != (( void *)0); (tp) = ((tp)->tp_entries.sle_next)) { |
1618 | if (!TRUNK_PORTACTIVE(tp)( (((tp)->tp_if->if_data.ifi_link_state) >= 4 || ((tp )->tp_if->if_data.ifi_link_state) == 0) && (tp) ->tp_if->if_flags & 0x1)) |
1619 | continue; |
1620 | |
1621 | active_ports++; |
1622 | |
1623 | if (last != NULL((void *)0)) { |
1624 | m = m_copym(m0, 0, M_COPYALL1000000000, M_DONTWAIT0x0002); |
1625 | if (m == NULL((void *)0)) { |
1626 | errors++; |
1627 | break; |
1628 | } |
1629 | |
1630 | if (if_enqueue(last->tp_if, m) != 0) |
1631 | errors++; |
1632 | } |
1633 | last = tp; |
1634 | } |
1635 | if (last == NULL((void *)0)) { |
1636 | m_freem(m0); |
1637 | return (ENOENT2); |
1638 | } |
1639 | |
1640 | if (if_enqueue(last->tp_if, m0) != 0) |
1641 | errors++; |
1642 | |
1643 | if (errors == active_ports) |
1644 | return (ENOBUFS55); |
1645 | |
1646 | return (0); |
1647 | } |
1648 | |
1649 | int |
1650 | trunk_bcast_input(struct trunk_softc *tr, struct trunk_port *tp, struct mbuf *m) |
1651 | { |
1652 | return (0); |
1653 | } |
1654 | |
1655 | /* |
1656 | * 802.3ad LACP |
1657 | */ |
1658 | |
1659 | int |
1660 | trunk_lacp_attach(struct trunk_softc *tr) |
1661 | { |
1662 | struct trunk_port *tp; |
1663 | int error; |
1664 | |
1665 | tr->tr_detach = trunk_lacp_detach; |
1666 | tr->tr_port_create = lacp_port_create; |
1667 | tr->tr_port_destroy = lacp_port_destroy; |
1668 | tr->tr_linkstate = lacp_linkstate; |
1669 | tr->tr_start = trunk_lacp_start; |
1670 | tr->tr_input = trunk_lacp_input; |
1671 | tr->tr_init = lacp_init; |
1672 | tr->tr_stop = lacp_stop; |
1673 | tr->tr_req = lacp_req; |
1674 | tr->tr_portreq = lacp_portreq; |
1675 | |
1676 | error = lacp_attach(tr); |
1677 | if (error) |
1678 | return (error); |
1679 | |
1680 | SLIST_FOREACH(tp, &tr->tr_ports, tp_entries)for((tp) = ((&tr->tr_ports)->slh_first); (tp) != (( void *)0); (tp) = ((tp)->tp_entries.sle_next)) |
1681 | lacp_port_create(tp); |
1682 | |
1683 | return (error); |
1684 | } |
1685 | |
1686 | int |
1687 | trunk_lacp_detach(struct trunk_softc *tr) |
1688 | { |
1689 | struct trunk_port *tp; |
1690 | int error; |
1691 | |
1692 | SLIST_FOREACH(tp, &tr->tr_ports, tp_entries)for((tp) = ((&tr->tr_ports)->slh_first); (tp) != (( void *)0); (tp) = ((tp)->tp_entries.sle_next)) |
1693 | lacp_port_destroy(tp); |
1694 | |
1695 | /* unlocking is safe here */ |
1696 | error = lacp_detach(tr); |
1697 | |
1698 | return (error); |
1699 | } |
1700 | |
1701 | int |
1702 | trunk_lacp_start(struct trunk_softc *tr, struct mbuf *m) |
1703 | { |
1704 | struct trunk_port *tp; |
1705 | |
1706 | tp = lacp_select_tx_port(tr, m); |
1707 | if (tp == NULL((void *)0)) { |
1708 | m_freem(m); |
1709 | return (EBUSY16); |
1710 | } |
1711 | |
1712 | return (if_enqueue(tp->tp_if, m)); |
1713 | } |
1714 | |
1715 | int |
1716 | trunk_lacp_input(struct trunk_softc *tr, struct trunk_port *tp, struct mbuf *m) |
1717 | { |
1718 | return (lacp_input(tp, m)); |
1719 | } |