File: | net/route.c |
Warning: | line 1598, column 18 Result of 'malloc' is converted to a pointer of type 'char', which is incompatible with sizeof operand type 'struct rt_mpls' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: route.c,v 1.426 2023/11/13 17:18:27 bluhm Exp $ */ |
2 | /* $NetBSD: route.c,v 1.14 1996/02/13 22:00:46 christos Exp $ */ |
3 | |
4 | /* |
5 | * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. |
6 | * All rights reserved. |
7 | * |
8 | * Redistribution and use in source and binary forms, with or without |
9 | * modification, are permitted provided that the following conditions |
10 | * are met: |
11 | * 1. Redistributions of source code must retain the above copyright |
12 | * notice, this list of conditions and the following disclaimer. |
13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. |
16 | * 3. Neither the name of the project nor the names of its contributors |
17 | * may be used to endorse or promote products derived from this software |
18 | * without specific prior written permission. |
19 | * |
20 | * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND |
21 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
22 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
23 | * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE |
24 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
25 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
26 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
27 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
28 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
29 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
30 | * SUCH DAMAGE. |
31 | */ |
32 | |
33 | /* |
34 | * Copyright (c) 1980, 1986, 1991, 1993 |
35 | * The Regents of the University of California. All rights reserved. |
36 | * |
37 | * Redistribution and use in source and binary forms, with or without |
38 | * modification, are permitted provided that the following conditions |
39 | * are met: |
40 | * 1. Redistributions of source code must retain the above copyright |
41 | * notice, this list of conditions and the following disclaimer. |
42 | * 2. Redistributions in binary form must reproduce the above copyright |
43 | * notice, this list of conditions and the following disclaimer in the |
44 | * documentation and/or other materials provided with the distribution. |
45 | * 3. Neither the name of the University nor the names of its contributors |
46 | * may be used to endorse or promote products derived from this software |
47 | * without specific prior written permission. |
48 | * |
49 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
50 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
51 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
52 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
53 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
54 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
55 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
56 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
57 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
58 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
59 | * SUCH DAMAGE. |
60 | * |
61 | * @(#)route.c 8.2 (Berkeley) 11/15/93 |
62 | */ |
63 | |
64 | /* |
65 | * @(#)COPYRIGHT 1.1 (NRL) 17 January 1995 |
66 | * |
67 | * NRL grants permission for redistribution and use in source and binary |
68 | * forms, with or without modification, of the software and documentation |
69 | * created at NRL provided that the following conditions are met: |
70 | * |
71 | * 1. Redistributions of source code must retain the above copyright |
72 | * notice, this list of conditions and the following disclaimer. |
73 | * 2. Redistributions in binary form must reproduce the above copyright |
74 | * notice, this list of conditions and the following disclaimer in the |
75 | * documentation and/or other materials provided with the distribution. |
76 | * 3. All advertising materials mentioning features or use of this software |
77 | * must display the following acknowledgements: |
78 | * This product includes software developed by the University of |
79 | * California, Berkeley and its contributors. |
80 | * This product includes software developed at the Information |
81 | * Technology Division, US Naval Research Laboratory. |
82 | * 4. Neither the name of the NRL nor the names of its contributors |
83 | * may be used to endorse or promote products derived from this software |
84 | * without specific prior written permission. |
85 | * |
86 | * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS |
87 | * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
88 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A |
89 | * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NRL OR |
90 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
91 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
92 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
93 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
94 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
95 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
96 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
97 | * |
98 | * The views and conclusions contained in the software and documentation |
99 | * are those of the authors and should not be interpreted as representing |
100 | * official policies, either expressed or implied, of the US Naval |
101 | * Research Laboratory (NRL). |
102 | */ |
103 | |
104 | #include <sys/param.h> |
105 | #include <sys/systm.h> |
106 | #include <sys/mbuf.h> |
107 | #include <sys/socket.h> |
108 | #include <sys/socketvar.h> |
109 | #include <sys/timeout.h> |
110 | #include <sys/domain.h> |
111 | #include <sys/ioctl.h> |
112 | #include <sys/kernel.h> |
113 | #include <sys/queue.h> |
114 | #include <sys/pool.h> |
115 | #include <sys/atomic.h> |
116 | #include <sys/mutex.h> |
117 | |
118 | #include <net/if.h> |
119 | #include <net/if_var.h> |
120 | #include <net/if_dl.h> |
121 | #include <net/route.h> |
122 | |
123 | #include <netinet/in.h> |
124 | #include <netinet/ip_var.h> |
125 | #include <netinet/in_var.h> |
126 | |
127 | #ifdef INET61 |
128 | #include <netinet/ip6.h> |
129 | #include <netinet6/ip6_var.h> |
130 | #include <netinet6/in6_var.h> |
131 | #endif |
132 | |
133 | #ifdef MPLS1 |
134 | #include <netmpls/mpls.h> |
135 | #endif |
136 | |
137 | #ifdef BFD |
138 | #include <net/bfd.h> |
139 | #endif |
140 | |
141 | /* |
142 | * Locks used to protect struct members: |
143 | * I immutable after creation |
144 | * L rtlabel_mtx |
145 | * T rttimer_mtx |
146 | */ |
147 | |
148 | #define ROUNDUP(a)(a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long )) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long)) |
149 | |
150 | /* Give some jitter to hash, to avoid synchronization between routers. */ |
151 | static uint32_t rt_hashjitter; |
152 | |
153 | extern unsigned int rtmap_limit; |
154 | |
155 | struct cpumem * rtcounters; |
156 | int rttrash; /* routes not in table but not freed */ |
157 | |
158 | struct pool rtentry_pool; /* pool for rtentry structures */ |
159 | struct pool rttimer_pool; /* pool for rttimer structures */ |
160 | |
161 | int rt_setgwroute(struct rtentry *, const struct sockaddr *, u_int); |
162 | void rt_putgwroute(struct rtentry *, struct rtentry *); |
163 | int rtflushclone1(struct rtentry *, void *, u_int); |
164 | int rtflushclone(struct rtentry *, unsigned int); |
165 | int rt_ifa_purge_walker(struct rtentry *, void *, unsigned int); |
166 | struct rtentry *rt_match(const struct sockaddr *, uint32_t *, int, |
167 | unsigned int); |
168 | int rt_clone(struct rtentry **, const struct sockaddr *, unsigned int); |
169 | struct sockaddr *rt_plentosa(sa_family_t, int, struct sockaddr_in6 *); |
170 | static int rt_copysa(const struct sockaddr *, const struct sockaddr *, |
171 | struct sockaddr **); |
172 | |
173 | #define LABELID_MAX50000 50000 |
174 | |
175 | struct rt_label { |
176 | TAILQ_ENTRY(rt_label)struct { struct rt_label *tqe_next; struct rt_label **tqe_prev ; } rtl_entry; /* [L] */ |
177 | char rtl_name[RTLABEL_LEN32]; /* [I] */ |
178 | u_int16_t rtl_id; /* [I] */ |
179 | int rtl_ref; /* [L] */ |
180 | }; |
181 | |
182 | TAILQ_HEAD(rt_labels, rt_label)struct rt_labels { struct rt_label *tqh_first; struct rt_label **tqh_last; } rt_labels = |
183 | TAILQ_HEAD_INITIALIZER(rt_labels){ ((void *)0), &(rt_labels).tqh_first }; /* [L] */ |
184 | struct mutex rtlabel_mtx = MUTEX_INITIALIZER(IPL_NET){ ((void *)0), ((((0x4)) > 0x0 && ((0x4)) < 0x9 ) ? 0x9 : ((0x4))), 0x0 }; |
185 | |
186 | void |
187 | route_init(void) |
188 | { |
189 | rtcounters = counters_alloc(rts_ncounters); |
190 | |
191 | pool_init(&rtentry_pool, sizeof(struct rtentry), 0, IPL_MPFLOOR0x9, 0, |
192 | "rtentry", NULL((void *)0)); |
193 | |
194 | while (rt_hashjitter == 0) |
195 | rt_hashjitter = arc4random(); |
196 | |
197 | #ifdef BFD |
198 | bfdinit(); |
199 | #endif |
200 | } |
201 | |
202 | /* |
203 | * Returns 1 if the (cached) ``rt'' entry is still valid, 0 otherwise. |
204 | */ |
205 | int |
206 | rtisvalid(struct rtentry *rt) |
207 | { |
208 | if (rt == NULL((void *)0)) |
209 | return (0); |
210 | |
211 | if (!ISSET(rt->rt_flags, RTF_UP)((rt->rt_flags) & (0x1))) |
212 | return (0); |
213 | |
214 | if (ISSET(rt->rt_flags, RTF_GATEWAY)((rt->rt_flags) & (0x2))) { |
215 | KASSERT(rt->rt_gwroute != NULL)((rt->RT_gw._nh != ((void *)0)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/route.c", 215, "rt->rt_gwroute != NULL" )); |
216 | KASSERT(!ISSET(rt->rt_gwroute->rt_flags, RTF_GATEWAY))((!((rt->RT_gw._nh->rt_flags) & (0x2))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/route.c", 216, "!ISSET(rt->rt_gwroute->rt_flags, RTF_GATEWAY)" )); |
217 | if (!ISSET(rt->rt_gwroute->rt_flags, RTF_UP)((rt->RT_gw._nh->rt_flags) & (0x1))) |
218 | return (0); |
219 | } |
220 | |
221 | return (1); |
222 | } |
223 | |
224 | /* |
225 | * Do the actual lookup for rtalloc(9), do not use directly! |
226 | * |
227 | * Return the best matching entry for the destination ``dst''. |
228 | * |
229 | * "RT_RESOLVE" means that a corresponding L2 entry should |
230 | * be added to the routing table and resolved (via ARP or |
231 | * NDP), if it does not exist. |
232 | */ |
233 | struct rtentry * |
234 | rt_match(const struct sockaddr *dst, uint32_t *src, int flags, |
235 | unsigned int tableid) |
236 | { |
237 | struct rtentry *rt = NULL((void *)0); |
238 | |
239 | rt = rtable_match(tableid, dst, src); |
240 | if (rt == NULL((void *)0)) { |
241 | rtstat_inc(rts_unreach); |
242 | return (NULL((void *)0)); |
243 | } |
244 | |
245 | if (ISSET(rt->rt_flags, RTF_CLONING)((rt->rt_flags) & (0x100)) && ISSET(flags, RT_RESOLVE)((flags) & (1))) |
246 | rt_clone(&rt, dst, tableid); |
247 | |
248 | rt->rt_usert_rmx.rmx_pksent++; |
249 | return (rt); |
250 | } |
251 | |
252 | int |
253 | rt_clone(struct rtentry **rtp, const struct sockaddr *dst, |
254 | unsigned int rtableid) |
255 | { |
256 | struct rt_addrinfo info; |
257 | struct rtentry *rt = *rtp; |
258 | int error = 0; |
259 | |
260 | memset(&info, 0, sizeof(info))__builtin_memset((&info), (0), (sizeof(info))); |
261 | info.rti_info[RTAX_DST0] = dst; |
262 | |
263 | /* |
264 | * The priority of cloned route should be different |
265 | * to avoid conflict with /32 cloning routes. |
266 | * |
267 | * It should also be higher to let the ARP layer find |
268 | * cloned routes instead of the cloning one. |
269 | */ |
270 | KERNEL_LOCK()_kernel_lock(); |
271 | error = rtrequest(RTM_RESOLVE0xb, &info, rt->rt_priority - 1, &rt, |
272 | rtableid); |
273 | KERNEL_UNLOCK()_kernel_unlock(); |
274 | if (error) { |
275 | rtm_miss(RTM_MISS0x7, &info, 0, RTP_NONE0, 0, error, rtableid); |
276 | } else { |
277 | /* Inform listeners of the new route */ |
278 | rtm_send(rt, RTM_ADD0x1, 0, rtableid); |
279 | rtfree(*rtp); |
280 | *rtp = rt; |
281 | } |
282 | return (error); |
283 | } |
284 | |
285 | /* |
286 | * Originated from bridge_hash() in if_bridge.c |
287 | */ |
288 | #define mix(a, b, c)do { a -= b; a -= c; a ^= (c >> 13); b -= c; b -= a; b ^= (a << 8); c -= a; c -= b; c ^= (b >> 13); a -= b ; a -= c; a ^= (c >> 12); b -= c; b -= a; b ^= (a << 16); c -= a; c -= b; c ^= (b >> 5); a -= b; a -= c; a ^= (c >> 3); b -= c; b -= a; b ^= (a << 10); c -= a ; c -= b; c ^= (b >> 15); } while (0) do { \ |
289 | a -= b; a -= c; a ^= (c >> 13); \ |
290 | b -= c; b -= a; b ^= (a << 8); \ |
291 | c -= a; c -= b; c ^= (b >> 13); \ |
292 | a -= b; a -= c; a ^= (c >> 12); \ |
293 | b -= c; b -= a; b ^= (a << 16); \ |
294 | c -= a; c -= b; c ^= (b >> 5); \ |
295 | a -= b; a -= c; a ^= (c >> 3); \ |
296 | b -= c; b -= a; b ^= (a << 10); \ |
297 | c -= a; c -= b; c ^= (b >> 15); \ |
298 | } while (0) |
299 | |
300 | int |
301 | rt_hash(struct rtentry *rt, const struct sockaddr *dst, uint32_t *src) |
302 | { |
303 | uint32_t a, b, c; |
304 | |
305 | if (src == NULL((void *)0) || !rtisvalid(rt) || !ISSET(rt->rt_flags, RTF_MPATH)((rt->rt_flags) & (0x40000))) |
306 | return (-1); |
307 | |
308 | a = b = 0x9e3779b9; |
309 | c = rt_hashjitter; |
310 | |
311 | switch (dst->sa_family) { |
312 | case AF_INET2: |
313 | { |
314 | const struct sockaddr_in *sin; |
315 | |
316 | if (!ipmultipath) |
317 | return (-1); |
318 | |
319 | sin = satosin_const(dst); |
320 | a += sin->sin_addr.s_addr; |
321 | b += src[0]; |
322 | mix(a, b, c)do { a -= b; a -= c; a ^= (c >> 13); b -= c; b -= a; b ^= (a << 8); c -= a; c -= b; c ^= (b >> 13); a -= b ; a -= c; a ^= (c >> 12); b -= c; b -= a; b ^= (a << 16); c -= a; c -= b; c ^= (b >> 5); a -= b; a -= c; a ^= (c >> 3); b -= c; b -= a; b ^= (a << 10); c -= a ; c -= b; c ^= (b >> 15); } while (0); |
323 | break; |
324 | } |
325 | #ifdef INET61 |
326 | case AF_INET624: |
327 | { |
328 | const struct sockaddr_in6 *sin6; |
329 | |
330 | if (!ip6_multipath) |
331 | return (-1); |
332 | |
333 | sin6 = satosin6_const(dst); |
334 | a += sin6->sin6_addr.s6_addr32__u6_addr.__u6_addr32[0]; |
335 | b += sin6->sin6_addr.s6_addr32__u6_addr.__u6_addr32[2]; |
336 | c += src[0]; |
337 | mix(a, b, c)do { a -= b; a -= c; a ^= (c >> 13); b -= c; b -= a; b ^= (a << 8); c -= a; c -= b; c ^= (b >> 13); a -= b ; a -= c; a ^= (c >> 12); b -= c; b -= a; b ^= (a << 16); c -= a; c -= b; c ^= (b >> 5); a -= b; a -= c; a ^= (c >> 3); b -= c; b -= a; b ^= (a << 10); c -= a ; c -= b; c ^= (b >> 15); } while (0); |
338 | a += sin6->sin6_addr.s6_addr32__u6_addr.__u6_addr32[1]; |
339 | b += sin6->sin6_addr.s6_addr32__u6_addr.__u6_addr32[3]; |
340 | c += src[1]; |
341 | mix(a, b, c)do { a -= b; a -= c; a ^= (c >> 13); b -= c; b -= a; b ^= (a << 8); c -= a; c -= b; c ^= (b >> 13); a -= b ; a -= c; a ^= (c >> 12); b -= c; b -= a; b ^= (a << 16); c -= a; c -= b; c ^= (b >> 5); a -= b; a -= c; a ^= (c >> 3); b -= c; b -= a; b ^= (a << 10); c -= a ; c -= b; c ^= (b >> 15); } while (0); |
342 | a += sin6->sin6_addr.s6_addr32__u6_addr.__u6_addr32[2]; |
343 | b += sin6->sin6_addr.s6_addr32__u6_addr.__u6_addr32[1]; |
344 | c += src[2]; |
345 | mix(a, b, c)do { a -= b; a -= c; a ^= (c >> 13); b -= c; b -= a; b ^= (a << 8); c -= a; c -= b; c ^= (b >> 13); a -= b ; a -= c; a ^= (c >> 12); b -= c; b -= a; b ^= (a << 16); c -= a; c -= b; c ^= (b >> 5); a -= b; a -= c; a ^= (c >> 3); b -= c; b -= a; b ^= (a << 10); c -= a ; c -= b; c ^= (b >> 15); } while (0); |
346 | a += sin6->sin6_addr.s6_addr32__u6_addr.__u6_addr32[3]; |
347 | b += sin6->sin6_addr.s6_addr32__u6_addr.__u6_addr32[0]; |
348 | c += src[3]; |
349 | mix(a, b, c)do { a -= b; a -= c; a ^= (c >> 13); b -= c; b -= a; b ^= (a << 8); c -= a; c -= b; c ^= (b >> 13); a -= b ; a -= c; a ^= (c >> 12); b -= c; b -= a; b ^= (a << 16); c -= a; c -= b; c ^= (b >> 5); a -= b; a -= c; a ^= (c >> 3); b -= c; b -= a; b ^= (a << 10); c -= a ; c -= b; c ^= (b >> 15); } while (0); |
350 | break; |
351 | } |
352 | #endif /* INET6 */ |
353 | } |
354 | |
355 | return (c & 0xffff); |
356 | } |
357 | |
358 | /* |
359 | * Allocate a route, potentially using multipath to select the peer. |
360 | */ |
361 | struct rtentry * |
362 | rtalloc_mpath(const struct sockaddr *dst, uint32_t *src, unsigned int rtableid) |
363 | { |
364 | return (rt_match(dst, src, RT_RESOLVE1, rtableid)); |
365 | } |
366 | |
367 | /* |
368 | * Look in the routing table for the best matching entry for |
369 | * ``dst''. |
370 | * |
371 | * If a route with a gateway is found and its next hop is no |
372 | * longer valid, try to cache it. |
373 | */ |
374 | struct rtentry * |
375 | rtalloc(const struct sockaddr *dst, int flags, unsigned int rtableid) |
376 | { |
377 | return (rt_match(dst, NULL((void *)0), flags, rtableid)); |
378 | } |
379 | |
380 | /* |
381 | * Cache the route entry corresponding to a reachable next hop in |
382 | * the gateway entry ``rt''. |
383 | */ |
384 | int |
385 | rt_setgwroute(struct rtentry *rt, const struct sockaddr *gate, u_int rtableid) |
386 | { |
387 | struct rtentry *prt, *nhrt; |
388 | unsigned int rdomain = rtable_l2(rtableid); |
389 | int error; |
390 | |
391 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
392 | |
393 | /* If we cannot find a valid next hop bail. */ |
394 | nhrt = rt_match(gate, NULL((void *)0), RT_RESOLVE1, rdomain); |
395 | if (nhrt == NULL((void *)0)) |
396 | return (ENOENT2); |
397 | |
398 | /* Next hop entry must be on the same interface. */ |
399 | if (nhrt->rt_ifidx != rt->rt_ifidx) { |
400 | struct sockaddr_in6 sa_mask; |
401 | |
402 | if (!ISSET(nhrt->rt_flags, RTF_LLINFO)((nhrt->rt_flags) & (0x400)) || |
403 | !ISSET(nhrt->rt_flags, RTF_CLONED)((nhrt->rt_flags) & (0x10000))) { |
404 | rtfree(nhrt); |
405 | return (EHOSTUNREACH65); |
406 | } |
407 | |
408 | /* |
409 | * We found a L2 entry, so we might have multiple |
410 | * RTF_CLONING routes for the same subnet. Query |
411 | * the first route of the multipath chain and iterate |
412 | * until we find the correct one. |
413 | */ |
414 | prt = rtable_lookup(rdomain, rt_key(nhrt->rt_parent)((nhrt->rt_parent)->rt_dest), |
415 | rt_plen2mask(nhrt->rt_parent, &sa_mask), NULL((void *)0), RTP_ANY64); |
416 | rtfree(nhrt); |
417 | |
418 | while (prt != NULL((void *)0) && prt->rt_ifidx != rt->rt_ifidx) |
419 | prt = rtable_iterate(prt); |
420 | |
421 | /* We found nothing or a non-cloning MPATH route. */ |
422 | if (prt == NULL((void *)0) || !ISSET(prt->rt_flags, RTF_CLONING)((prt->rt_flags) & (0x100))) { |
423 | rtfree(prt); |
424 | return (EHOSTUNREACH65); |
425 | } |
426 | |
427 | error = rt_clone(&prt, gate, rdomain); |
428 | if (error) { |
429 | rtfree(prt); |
430 | return (error); |
431 | } |
432 | nhrt = prt; |
433 | } |
434 | |
435 | /* |
436 | * Next hop must be reachable, this also prevents rtentry |
437 | * loops for example when rt->rt_gwroute points to rt. |
438 | */ |
439 | if (ISSET(nhrt->rt_flags, RTF_CLONING|RTF_GATEWAY)((nhrt->rt_flags) & (0x100|0x2))) { |
440 | rtfree(nhrt); |
441 | return (ENETUNREACH51); |
442 | } |
443 | |
444 | /* |
445 | * If the MTU of next hop is 0, this will reset the MTU of the |
446 | * route to run PMTUD again from scratch. |
447 | */ |
448 | if (!ISSET(rt->rt_locks, RTV_MTU)((rt->rt_rmx.rmx_locks) & (0x1)) && (rt->rt_mturt_rmx.rmx_mtu > nhrt->rt_mturt_rmx.rmx_mtu)) |
449 | rt->rt_mturt_rmx.rmx_mtu = nhrt->rt_mturt_rmx.rmx_mtu; |
450 | |
451 | /* |
452 | * To avoid reference counting problems when writing link-layer |
453 | * addresses in an outgoing packet, we ensure that the lifetime |
454 | * of a cached entry is greater than the bigger lifetime of the |
455 | * gateway entries it is pointed by. |
456 | */ |
457 | nhrt->rt_flags |= RTF_CACHED0x20000; |
458 | nhrt->rt_cachecntRT_gw._ref++; |
459 | |
460 | /* commit */ |
461 | rt_putgwroute(rt, nhrt); |
462 | |
463 | return (0); |
464 | } |
465 | |
466 | /* |
467 | * Invalidate the cached route entry of the gateway entry ``rt''. |
468 | */ |
469 | void |
470 | rt_putgwroute(struct rtentry *rt, struct rtentry *nhrt) |
471 | { |
472 | struct rtentry *onhrt; |
473 | |
474 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
475 | |
476 | if (!ISSET(rt->rt_flags, RTF_GATEWAY)((rt->rt_flags) & (0x2))) |
477 | return; |
478 | |
479 | /* this is protected as per [X] in route.h */ |
480 | onhrt = rt->rt_gwrouteRT_gw._nh; |
481 | rt->rt_gwrouteRT_gw._nh = nhrt; |
482 | |
483 | if (onhrt != NULL((void *)0)) { |
484 | KASSERT(onhrt->rt_cachecnt > 0)((onhrt->RT_gw._ref > 0) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/route.c", 484, "onhrt->rt_cachecnt > 0" )); |
485 | KASSERT(ISSET(onhrt->rt_flags, RTF_CACHED))((((onhrt->rt_flags) & (0x20000))) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/net/route.c", 485, "ISSET(onhrt->rt_flags, RTF_CACHED)" )); |
486 | |
487 | --onhrt->rt_cachecntRT_gw._ref; |
488 | if (onhrt->rt_cachecntRT_gw._ref == 0) |
489 | CLR(onhrt->rt_flags, RTF_CACHED)((onhrt->rt_flags) &= ~(0x20000)); |
490 | |
491 | rtfree(onhrt); |
492 | } |
493 | } |
494 | |
495 | void |
496 | rtref(struct rtentry *rt) |
497 | { |
498 | refcnt_take(&rt->rt_refcnt); |
499 | } |
500 | |
501 | void |
502 | rtfree(struct rtentry *rt) |
503 | { |
504 | if (rt == NULL((void *)0)) |
505 | return; |
506 | |
507 | if (refcnt_rele(&rt->rt_refcnt) == 0) |
508 | return; |
509 | |
510 | KASSERT(!ISSET(rt->rt_flags, RTF_UP))((!((rt->rt_flags) & (0x1))) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/route.c", 510, "!ISSET(rt->rt_flags, RTF_UP)" )); |
511 | KASSERT(!RT_ROOT(rt))((!(0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/route.c" , 511, "!RT_ROOT(rt)")); |
512 | atomic_dec_int(&rttrash)_atomic_dec_int(&rttrash); |
513 | |
514 | rt_timer_remove_all(rt); |
515 | ifafree(rt->rt_ifa); |
516 | rtlabel_unref(rt->rt_labelid); |
517 | #ifdef MPLS1 |
518 | rt_mpls_clear(rt); |
519 | #endif |
520 | if (rt->rt_gateway != NULL((void *)0)) { |
521 | free(rt->rt_gateway, M_RTABLE5, |
522 | ROUNDUP(rt->rt_gateway->sa_len)(rt->rt_gateway->sa_len>0 ? (1 + (((rt->rt_gateway ->sa_len) - 1) | (sizeof(long) - 1))) : sizeof(long))); |
523 | } |
524 | free(rt_key(rt)((rt)->rt_dest), M_RTABLE5, rt_key(rt)((rt)->rt_dest)->sa_len); |
525 | |
526 | pool_put(&rtentry_pool, rt); |
527 | } |
528 | |
529 | struct ifaddr * |
530 | ifaref(struct ifaddr *ifa) |
531 | { |
532 | refcnt_take(&ifa->ifa_refcnt); |
533 | return ifa; |
534 | } |
535 | |
536 | void |
537 | ifafree(struct ifaddr *ifa) |
538 | { |
539 | if (refcnt_rele(&ifa->ifa_refcnt) == 0) |
540 | return; |
541 | free(ifa, M_IFADDR9, 0); |
542 | } |
543 | |
544 | /* |
545 | * Force a routing table entry to the specified |
546 | * destination to go through the given gateway. |
547 | * Normally called as a result of a routing redirect |
548 | * message from the network layer. |
549 | */ |
550 | void |
551 | rtredirect(struct sockaddr *dst, struct sockaddr *gateway, |
552 | struct sockaddr *src, struct rtentry **rtp, unsigned int rdomain) |
553 | { |
554 | struct rtentry *rt; |
555 | int error = 0; |
556 | enum rtstat_counters stat = rts_ncounters; |
557 | struct rt_addrinfo info; |
558 | struct ifaddr *ifa; |
559 | unsigned int ifidx = 0; |
560 | int flags = RTF_GATEWAY0x2|RTF_HOST0x4; |
561 | uint8_t prio = RTP_NONE0; |
562 | |
563 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
564 | |
565 | /* verify the gateway is directly reachable */ |
566 | rt = rtalloc(gateway, 0, rdomain); |
567 | if (!rtisvalid(rt) || ISSET(rt->rt_flags, RTF_GATEWAY)((rt->rt_flags) & (0x2))) { |
568 | rtfree(rt); |
569 | error = ENETUNREACH51; |
570 | goto out; |
571 | } |
572 | ifidx = rt->rt_ifidx; |
573 | ifa = rt->rt_ifa; |
574 | rtfree(rt); |
575 | rt = NULL((void *)0); |
576 | |
577 | rt = rtable_lookup(rdomain, dst, NULL((void *)0), NULL((void *)0), RTP_ANY64); |
578 | /* |
579 | * If the redirect isn't from our current router for this dst, |
580 | * it's either old or wrong. If it redirects us to ourselves, |
581 | * we have a routing loop, perhaps as a result of an interface |
582 | * going down recently. |
583 | */ |
584 | #define equal(a1, a2)((a1)->sa_len == (a2)->sa_len && bcmp((caddr_t) (a1), (caddr_t)(a2), (a1)->sa_len) == 0) \ |
585 | ((a1)->sa_len == (a2)->sa_len && \ |
586 | bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0) |
587 | if (rt != NULL((void *)0) && (!equal(src, rt->rt_gateway)((src)->sa_len == (rt->rt_gateway)->sa_len && bcmp((caddr_t)(src), (caddr_t)(rt->rt_gateway), (src)-> sa_len) == 0) || rt->rt_ifa != ifa)) |
588 | error = EINVAL22; |
589 | else if (ifa_ifwithaddr(gateway, rdomain) != NULL((void *)0) || |
590 | (gateway->sa_family == AF_INET2 && |
591 | in_broadcast(satosin(gateway)->sin_addr, rdomain))) |
592 | error = EHOSTUNREACH65; |
593 | if (error) |
594 | goto done; |
595 | /* |
596 | * Create a new entry if we just got back a wildcard entry |
597 | * or the lookup failed. This is necessary for hosts |
598 | * which use routing redirects generated by smart gateways |
599 | * to dynamically build the routing tables. |
600 | */ |
601 | if (rt == NULL((void *)0)) |
602 | goto create; |
603 | /* |
604 | * Don't listen to the redirect if it's |
605 | * for a route to an interface. |
606 | */ |
607 | if (ISSET(rt->rt_flags, RTF_GATEWAY)((rt->rt_flags) & (0x2))) { |
608 | if (!ISSET(rt->rt_flags, RTF_HOST)((rt->rt_flags) & (0x4))) { |
609 | /* |
610 | * Changing from route to net => route to host. |
611 | * Create new route, rather than smashing route to net. |
612 | */ |
613 | create: |
614 | rtfree(rt); |
615 | flags |= RTF_DYNAMIC0x10; |
616 | bzero(&info, sizeof(info))__builtin_bzero((&info), (sizeof(info))); |
617 | info.rti_info[RTAX_DST0] = dst; |
618 | info.rti_info[RTAX_GATEWAY1] = gateway; |
619 | info.rti_ifa = ifa; |
620 | info.rti_flags = flags; |
621 | rt = NULL((void *)0); |
622 | error = rtrequest(RTM_ADD0x1, &info, RTP_DEFAULT56, &rt, |
623 | rdomain); |
624 | if (error == 0) { |
625 | flags = rt->rt_flags; |
626 | prio = rt->rt_priority; |
627 | } |
628 | stat = rts_dynamic; |
629 | } else { |
630 | /* |
631 | * Smash the current notion of the gateway to |
632 | * this destination. Should check about netmask!!! |
633 | */ |
634 | rt->rt_flags |= RTF_MODIFIED0x20; |
635 | flags |= RTF_MODIFIED0x20; |
636 | prio = rt->rt_priority; |
637 | stat = rts_newgateway; |
638 | rt_setgate(rt, gateway, rdomain); |
639 | } |
640 | } else |
641 | error = EHOSTUNREACH65; |
642 | done: |
643 | if (rt) { |
644 | if (rtp && !error) |
645 | *rtp = rt; |
646 | else |
647 | rtfree(rt); |
648 | } |
649 | out: |
650 | if (error) |
651 | rtstat_inc(rts_badredirect); |
652 | else if (stat != rts_ncounters) |
653 | rtstat_inc(stat); |
654 | bzero((caddr_t)&info, sizeof(info))__builtin_bzero(((caddr_t)&info), (sizeof(info))); |
655 | info.rti_info[RTAX_DST0] = dst; |
656 | info.rti_info[RTAX_GATEWAY1] = gateway; |
657 | info.rti_info[RTAX_AUTHOR6] = src; |
658 | rtm_miss(RTM_REDIRECT0x6, &info, flags, prio, ifidx, error, rdomain); |
659 | } |
660 | |
661 | /* |
662 | * Delete a route and generate a message |
663 | */ |
664 | int |
665 | rtdeletemsg(struct rtentry *rt, struct ifnet *ifp, u_int tableid) |
666 | { |
667 | int error; |
668 | struct rt_addrinfo info; |
669 | struct sockaddr_rtlabel sa_rl; |
670 | struct sockaddr_in6 sa_mask; |
671 | |
672 | KASSERT(rt->rt_ifidx == ifp->if_index)((rt->rt_ifidx == ifp->if_index) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/route.c", 672, "rt->rt_ifidx == ifp->if_index" )); |
673 | |
674 | /* |
675 | * Request the new route so that the entry is not actually |
676 | * deleted. That will allow the information being reported to |
677 | * be accurate (and consistent with route_output()). |
678 | */ |
679 | memset(&info, 0, sizeof(info))__builtin_memset((&info), (0), (sizeof(info))); |
680 | info.rti_info[RTAX_DST0] = rt_key(rt)((rt)->rt_dest); |
681 | info.rti_info[RTAX_GATEWAY1] = rt->rt_gateway; |
682 | if (!ISSET(rt->rt_flags, RTF_HOST)((rt->rt_flags) & (0x4))) |
683 | info.rti_info[RTAX_NETMASK2] = rt_plen2mask(rt, &sa_mask); |
684 | info.rti_info[RTAX_LABEL10] = rtlabel_id2sa(rt->rt_labelid, &sa_rl); |
685 | info.rti_flags = rt->rt_flags; |
686 | info.rti_info[RTAX_IFP4] = sdltosa(ifp->if_sadl); |
687 | info.rti_info[RTAX_IFA5] = rt->rt_ifa->ifa_addr; |
688 | error = rtrequest_delete(&info, rt->rt_priority, ifp, &rt, tableid); |
689 | rtm_miss(RTM_DELETE0x2, &info, info.rti_flags, rt->rt_priority, |
690 | rt->rt_ifidx, error, tableid); |
691 | if (error == 0) |
692 | rtfree(rt); |
693 | return (error); |
694 | } |
695 | |
696 | static inline int |
697 | rtequal(struct rtentry *a, struct rtentry *b) |
698 | { |
699 | if (a == b) |
700 | return 1; |
701 | |
702 | if (memcmp(rt_key(a), rt_key(b), rt_key(a)->sa_len)__builtin_memcmp((((a)->rt_dest)), (((b)->rt_dest)), (( (a)->rt_dest)->sa_len)) == 0 && |
703 | rt_plen(a)((a)->rt_plen) == rt_plen(b)((b)->rt_plen)) |
704 | return 1; |
705 | else |
706 | return 0; |
707 | } |
708 | |
709 | int |
710 | rtflushclone1(struct rtentry *rt, void *arg, u_int id) |
711 | { |
712 | struct rtentry *cloningrt = arg; |
713 | struct ifnet *ifp; |
714 | |
715 | if (!ISSET(rt->rt_flags, RTF_CLONED)((rt->rt_flags) & (0x10000))) |
716 | return 0; |
717 | |
718 | /* Cached route must stay alive as long as their parent are alive. */ |
719 | if (ISSET(rt->rt_flags, RTF_CACHED)((rt->rt_flags) & (0x20000)) && (rt->rt_parent != cloningrt)) |
720 | return 0; |
721 | |
722 | if (!rtequal(rt->rt_parent, cloningrt)) |
723 | return 0; |
724 | /* |
725 | * This happens when an interface with a RTF_CLONING route is |
726 | * being detached. In this case it's safe to bail because all |
727 | * the routes are being purged by rt_ifa_purge(). |
728 | */ |
729 | ifp = if_get(rt->rt_ifidx); |
730 | if (ifp == NULL((void *)0)) |
731 | return 0; |
732 | |
733 | if_put(ifp); |
734 | return EEXIST17; |
735 | } |
736 | |
737 | int |
738 | rtflushclone(struct rtentry *parent, unsigned int rtableid) |
739 | { |
740 | struct rtentry *rt = NULL((void *)0); |
741 | struct ifnet *ifp; |
742 | int error; |
743 | |
744 | #ifdef DIAGNOSTIC1 |
745 | if (!parent || (parent->rt_flags & RTF_CLONING0x100) == 0) |
746 | panic("rtflushclone: called with a non-cloning route"); |
747 | #endif |
748 | |
749 | do { |
750 | error = rtable_walk(rtableid, rt_key(parent)((parent)->rt_dest)->sa_family, &rt, |
751 | rtflushclone1, parent); |
752 | if (rt != NULL((void *)0) && error == EEXIST17) { |
753 | ifp = if_get(rt->rt_ifidx); |
754 | if (ifp == NULL((void *)0)) { |
755 | error = EAGAIN35; |
756 | } else { |
757 | error = rtdeletemsg(rt, ifp, rtableid); |
758 | if (error == 0) |
759 | error = EAGAIN35; |
760 | if_put(ifp); |
761 | } |
762 | } |
763 | rtfree(rt); |
764 | rt = NULL((void *)0); |
765 | } while (error == EAGAIN35); |
766 | |
767 | return error; |
768 | |
769 | } |
770 | |
771 | int |
772 | rtrequest_delete(struct rt_addrinfo *info, u_int8_t prio, struct ifnet *ifp, |
773 | struct rtentry **ret_nrt, u_int tableid) |
774 | { |
775 | struct rtentry *rt; |
776 | int error; |
777 | |
778 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
779 | |
780 | if (!rtable_exists(tableid)) |
781 | return (EAFNOSUPPORT47); |
782 | rt = rtable_lookup(tableid, info->rti_info[RTAX_DST0], |
783 | info->rti_info[RTAX_NETMASK2], info->rti_info[RTAX_GATEWAY1], prio); |
784 | if (rt == NULL((void *)0)) |
785 | return (ESRCH3); |
786 | |
787 | /* Make sure that's the route the caller want to delete. */ |
788 | if (ifp != NULL((void *)0) && ifp->if_index != rt->rt_ifidx) { |
789 | rtfree(rt); |
790 | return (ESRCH3); |
791 | } |
792 | |
793 | #ifdef BFD |
794 | if (ISSET(rt->rt_flags, RTF_BFD)((rt->rt_flags) & (0x1000000))) |
795 | bfdclear(rt); |
796 | #endif |
797 | |
798 | error = rtable_delete(tableid, info->rti_info[RTAX_DST0], |
799 | info->rti_info[RTAX_NETMASK2], rt); |
800 | if (error != 0) { |
801 | rtfree(rt); |
802 | return (ESRCH3); |
803 | } |
804 | |
805 | /* Release next hop cache before flushing cloned entries. */ |
806 | rt_putgwroute(rt, NULL((void *)0)); |
807 | |
808 | /* Clean up any cloned children. */ |
809 | if (ISSET(rt->rt_flags, RTF_CLONING)((rt->rt_flags) & (0x100))) |
810 | rtflushclone(rt, tableid); |
811 | |
812 | rtfree(rt->rt_parent); |
813 | rt->rt_parent = NULL((void *)0); |
814 | |
815 | rt->rt_flags &= ~RTF_UP0x1; |
816 | |
817 | KASSERT(ifp->if_index == rt->rt_ifidx)((ifp->if_index == rt->rt_ifidx) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/route.c", 817, "ifp->if_index == rt->rt_ifidx" )); |
818 | ifp->if_rtrequest(ifp, RTM_DELETE0x2, rt); |
819 | |
820 | atomic_inc_int(&rttrash)_atomic_inc_int(&rttrash); |
821 | |
822 | if (ret_nrt != NULL((void *)0)) |
823 | *ret_nrt = rt; |
824 | else |
825 | rtfree(rt); |
826 | |
827 | return (0); |
828 | } |
829 | |
830 | int |
831 | rtrequest(int req, struct rt_addrinfo *info, u_int8_t prio, |
832 | struct rtentry **ret_nrt, u_int tableid) |
833 | { |
834 | struct ifnet *ifp; |
835 | struct rtentry *rt, *crt; |
836 | struct ifaddr *ifa; |
837 | struct sockaddr *ndst; |
838 | struct sockaddr_rtlabel *sa_rl, sa_rl2; |
839 | struct sockaddr_dl sa_dl = { sizeof(sa_dl), AF_LINK18 }; |
840 | int error; |
841 | |
842 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
843 | |
844 | if (!rtable_exists(tableid)) |
845 | return (EAFNOSUPPORT47); |
846 | if (info->rti_flags & RTF_HOST0x4) |
847 | info->rti_info[RTAX_NETMASK2] = NULL((void *)0); |
848 | switch (req) { |
849 | case RTM_DELETE0x2: |
850 | return (EINVAL22); |
851 | |
852 | case RTM_RESOLVE0xb: |
853 | if (ret_nrt == NULL((void *)0) || (rt = *ret_nrt) == NULL((void *)0)) |
854 | return (EINVAL22); |
855 | if ((rt->rt_flags & RTF_CLONING0x100) == 0) |
856 | return (EINVAL22); |
857 | KASSERT(rt->rt_ifa->ifa_ifp != NULL)((rt->rt_ifa->ifa_ifp != ((void *)0)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/net/route.c", 857, "rt->rt_ifa->ifa_ifp != NULL" )); |
858 | info->rti_ifa = rt->rt_ifa; |
859 | info->rti_flags = rt->rt_flags | (RTF_CLONED0x10000|RTF_HOST0x4); |
860 | info->rti_flags &= ~(RTF_CLONING0x100|RTF_CONNECTED0x800000|RTF_STATIC0x800); |
861 | info->rti_info[RTAX_GATEWAY1] = sdltosa(&sa_dl); |
862 | info->rti_info[RTAX_LABEL10] = |
863 | rtlabel_id2sa(rt->rt_labelid, &sa_rl2); |
864 | /* FALLTHROUGH */ |
865 | |
866 | case RTM_ADD0x1: |
867 | if (info->rti_ifa == NULL((void *)0)) |
868 | return (EINVAL22); |
869 | ifa = info->rti_ifa; |
870 | ifp = ifa->ifa_ifp; |
871 | if (prio == 0) |
872 | prio = ifp->if_priority + RTP_STATIC8; |
873 | |
874 | error = rt_copysa(info->rti_info[RTAX_DST0], |
875 | info->rti_info[RTAX_NETMASK2], &ndst); |
876 | if (error) |
877 | return (error); |
878 | |
879 | rt = pool_get(&rtentry_pool, PR_NOWAIT0x0002 | PR_ZERO0x0008); |
880 | if (rt == NULL((void *)0)) { |
881 | free(ndst, M_RTABLE5, ndst->sa_len); |
882 | return (ENOBUFS55); |
883 | } |
884 | |
885 | refcnt_init_trace(&rt->rt_refcnt, DT_REFCNT_IDX_RTENTRY5); |
886 | rt->rt_flags = info->rti_flags | RTF_UP0x1; |
887 | rt->rt_priority = prio; /* init routing priority */ |
888 | LIST_INIT(&rt->rt_timer)do { ((&rt->rt_timer)->lh_first) = ((void *)0); } while (0); |
889 | |
890 | /* Check the link state if the table supports it. */ |
891 | if (rtable_mpath_capable(tableid, ndst->sa_family) && |
892 | !ISSET(rt->rt_flags, RTF_LOCAL)((rt->rt_flags) & (0x200000)) && |
893 | (!LINK_STATE_IS_UP(ifp->if_link_state)((ifp->if_data.ifi_link_state) >= 4 || (ifp->if_data .ifi_link_state) == 0) || |
894 | !ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1)))) { |
895 | rt->rt_flags &= ~RTF_UP0x1; |
896 | rt->rt_priority |= RTP_DOWN0x80; |
897 | } |
898 | |
899 | if (info->rti_info[RTAX_LABEL10] != NULL((void *)0)) { |
900 | sa_rl = (struct sockaddr_rtlabel *) |
901 | info->rti_info[RTAX_LABEL10]; |
902 | rt->rt_labelid = rtlabel_name2id(sa_rl->sr_label); |
903 | } |
904 | |
905 | #ifdef MPLS1 |
906 | /* We have to allocate additional space for MPLS infos */ |
907 | if (info->rti_flags & RTF_MPLS0x100000 && |
908 | (info->rti_info[RTAX_SRC8] != NULL((void *)0) || |
909 | info->rti_info[RTAX_DST0]->sa_family == AF_MPLS33)) { |
910 | error = rt_mpls_set(rt, info->rti_info[RTAX_SRC8], |
911 | info->rti_mpls); |
912 | if (error) { |
913 | free(ndst, M_RTABLE5, ndst->sa_len); |
914 | pool_put(&rtentry_pool, rt); |
915 | return (error); |
916 | } |
917 | } else |
918 | rt_mpls_clear(rt); |
919 | #endif |
920 | |
921 | rt->rt_ifa = ifaref(ifa); |
922 | rt->rt_ifidx = ifp->if_index; |
923 | /* |
924 | * Copy metrics and a back pointer from the cloned |
925 | * route's parent. |
926 | */ |
927 | if (ISSET(rt->rt_flags, RTF_CLONED)((rt->rt_flags) & (0x10000))) { |
928 | rtref(*ret_nrt); |
929 | rt->rt_parent = *ret_nrt; |
930 | rt->rt_rmx = (*ret_nrt)->rt_rmx; |
931 | } |
932 | |
933 | /* |
934 | * We must set rt->rt_gateway before adding ``rt'' to |
935 | * the routing table because the radix MPATH code use |
936 | * it to (re)order routes. |
937 | */ |
938 | if ((error = rt_setgate(rt, info->rti_info[RTAX_GATEWAY1], |
939 | tableid))) { |
940 | ifafree(ifa); |
941 | rtfree(rt->rt_parent); |
942 | rt_putgwroute(rt, NULL((void *)0)); |
943 | if (rt->rt_gateway != NULL((void *)0)) { |
944 | free(rt->rt_gateway, M_RTABLE5, |
945 | ROUNDUP(rt->rt_gateway->sa_len)(rt->rt_gateway->sa_len>0 ? (1 + (((rt->rt_gateway ->sa_len) - 1) | (sizeof(long) - 1))) : sizeof(long))); |
946 | } |
947 | free(ndst, M_RTABLE5, ndst->sa_len); |
948 | pool_put(&rtentry_pool, rt); |
949 | return (error); |
950 | } |
951 | |
952 | error = rtable_insert(tableid, ndst, |
953 | info->rti_info[RTAX_NETMASK2], info->rti_info[RTAX_GATEWAY1], |
954 | rt->rt_priority, rt); |
955 | if (error != 0 && |
956 | (crt = rtable_match(tableid, ndst, NULL((void *)0))) != NULL((void *)0)) { |
957 | /* overwrite cloned route */ |
958 | if (ISSET(crt->rt_flags, RTF_CLONED)((crt->rt_flags) & (0x10000)) && |
959 | !ISSET(crt->rt_flags, RTF_CACHED)((crt->rt_flags) & (0x20000))) { |
960 | struct ifnet *cifp; |
961 | |
962 | cifp = if_get(crt->rt_ifidx); |
963 | KASSERT(cifp != NULL)((cifp != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/route.c" , 963, "cifp != NULL")); |
964 | rtdeletemsg(crt, cifp, tableid); |
965 | if_put(cifp); |
966 | |
967 | error = rtable_insert(tableid, ndst, |
968 | info->rti_info[RTAX_NETMASK2], |
969 | info->rti_info[RTAX_GATEWAY1], |
970 | rt->rt_priority, rt); |
971 | } |
972 | rtfree(crt); |
973 | } |
974 | if (error != 0) { |
975 | ifafree(ifa); |
976 | rtfree(rt->rt_parent); |
977 | rt_putgwroute(rt, NULL((void *)0)); |
978 | if (rt->rt_gateway != NULL((void *)0)) { |
979 | free(rt->rt_gateway, M_RTABLE5, |
980 | ROUNDUP(rt->rt_gateway->sa_len)(rt->rt_gateway->sa_len>0 ? (1 + (((rt->rt_gateway ->sa_len) - 1) | (sizeof(long) - 1))) : sizeof(long))); |
981 | } |
982 | free(ndst, M_RTABLE5, ndst->sa_len); |
983 | pool_put(&rtentry_pool, rt); |
984 | return (EEXIST17); |
985 | } |
986 | ifp->if_rtrequest(ifp, req, rt); |
987 | |
988 | if_group_routechange(info->rti_info[RTAX_DST0], |
989 | info->rti_info[RTAX_NETMASK2]); |
990 | |
991 | if (ret_nrt != NULL((void *)0)) |
992 | *ret_nrt = rt; |
993 | else |
994 | rtfree(rt); |
995 | break; |
996 | } |
997 | |
998 | return (0); |
999 | } |
1000 | |
1001 | int |
1002 | rt_setgate(struct rtentry *rt, const struct sockaddr *gate, u_int rtableid) |
1003 | { |
1004 | int glen = ROUNDUP(gate->sa_len)(gate->sa_len>0 ? (1 + (((gate->sa_len) - 1) | (sizeof (long) - 1))) : sizeof(long)); |
1005 | struct sockaddr *sa, *osa; |
1006 | int error = 0; |
1007 | |
1008 | KASSERT(gate != NULL)((gate != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/route.c" , 1008, "gate != NULL")); |
1009 | if (rt->rt_gateway == gate) { |
1010 | /* nop */ |
1011 | return (0); |
1012 | }; |
1013 | |
1014 | sa = malloc(glen, M_RTABLE5, M_NOWAIT0x0002 | M_ZERO0x0008); |
1015 | if (sa == NULL((void *)0)) |
1016 | return (ENOBUFS55); |
1017 | memcpy(sa, gate, gate->sa_len)__builtin_memcpy((sa), (gate), (gate->sa_len)); |
1018 | |
1019 | KERNEL_LOCK()_kernel_lock(); /* see [X] in route.h */ |
1020 | osa = rt->rt_gateway; |
1021 | rt->rt_gateway = sa; |
1022 | |
1023 | if (ISSET(rt->rt_flags, RTF_GATEWAY)((rt->rt_flags) & (0x2))) |
1024 | error = rt_setgwroute(rt, gate, rtableid); |
1025 | KERNEL_UNLOCK()_kernel_unlock(); |
1026 | |
1027 | if (osa != NULL((void *)0)) |
1028 | free(osa, M_RTABLE5, ROUNDUP(osa->sa_len)(osa->sa_len>0 ? (1 + (((osa->sa_len) - 1) | (sizeof (long) - 1))) : sizeof(long))); |
1029 | |
1030 | return (error); |
1031 | } |
1032 | |
1033 | /* |
1034 | * Return the route entry containing the next hop link-layer |
1035 | * address corresponding to ``rt''. |
1036 | */ |
1037 | struct rtentry * |
1038 | rt_getll(struct rtentry *rt) |
1039 | { |
1040 | if (ISSET(rt->rt_flags, RTF_GATEWAY)((rt->rt_flags) & (0x2))) { |
1041 | KASSERT(rt->rt_gwroute != NULL)((rt->RT_gw._nh != ((void *)0)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/route.c", 1041, "rt->rt_gwroute != NULL" )); |
1042 | return (rt->rt_gwrouteRT_gw._nh); |
1043 | } |
1044 | |
1045 | return (rt); |
1046 | } |
1047 | |
1048 | void |
1049 | rt_maskedcopy(struct sockaddr *src, struct sockaddr *dst, |
1050 | struct sockaddr *netmask) |
1051 | { |
1052 | u_char *cp1 = (u_char *)src; |
1053 | u_char *cp2 = (u_char *)dst; |
1054 | u_char *cp3 = (u_char *)netmask; |
1055 | u_char *cplim = cp2 + *cp3; |
1056 | u_char *cplim2 = cp2 + *cp1; |
1057 | |
1058 | *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */ |
1059 | cp3 += 2; |
1060 | if (cplim > cplim2) |
1061 | cplim = cplim2; |
1062 | while (cp2 < cplim) |
1063 | *cp2++ = *cp1++ & *cp3++; |
1064 | if (cp2 < cplim2) |
1065 | bzero(cp2, cplim2 - cp2)__builtin_bzero((cp2), (cplim2 - cp2)); |
1066 | } |
1067 | |
1068 | /* |
1069 | * allocate new sockaddr structure based on the user supplied src and mask |
1070 | * that is useable for the routing table. |
1071 | */ |
1072 | static int |
1073 | rt_copysa(const struct sockaddr *src, const struct sockaddr *mask, |
1074 | struct sockaddr **dst) |
1075 | { |
1076 | static const u_char maskarray[] = { |
1077 | 0x0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe }; |
1078 | struct sockaddr *ndst; |
1079 | const struct domain *dp; |
1080 | u_char *csrc, *cdst; |
1081 | int i, plen; |
1082 | |
1083 | for (i = 0; (dp = domains[i]) != NULL((void *)0); i++) { |
1084 | if (dp->dom_rtoffset == 0) |
1085 | continue; |
1086 | if (src->sa_family == dp->dom_family) |
1087 | break; |
1088 | } |
1089 | if (dp == NULL((void *)0)) |
1090 | return (EAFNOSUPPORT47); |
1091 | |
1092 | if (src->sa_len < dp->dom_sasize) |
1093 | return (EINVAL22); |
1094 | |
1095 | plen = rtable_satoplen(src->sa_family, mask); |
1096 | if (plen == -1) |
1097 | return (EINVAL22); |
1098 | |
1099 | ndst = malloc(dp->dom_sasize, M_RTABLE5, M_NOWAIT0x0002|M_ZERO0x0008); |
1100 | if (ndst == NULL((void *)0)) |
1101 | return (ENOBUFS55); |
1102 | |
1103 | ndst->sa_family = src->sa_family; |
1104 | ndst->sa_len = dp->dom_sasize; |
1105 | |
1106 | csrc = (u_char *)src + dp->dom_rtoffset; |
1107 | cdst = (u_char *)ndst + dp->dom_rtoffset; |
1108 | |
1109 | memcpy(cdst, csrc, plen / 8)__builtin_memcpy((cdst), (csrc), (plen / 8)); |
1110 | if (plen % 8 != 0) |
1111 | cdst[plen / 8] = csrc[plen / 8] & maskarray[plen % 8]; |
1112 | |
1113 | *dst = ndst; |
1114 | return (0); |
1115 | } |
1116 | |
1117 | int |
1118 | rt_ifa_add(struct ifaddr *ifa, int flags, struct sockaddr *dst, |
1119 | unsigned int rdomain) |
1120 | { |
1121 | struct ifnet *ifp = ifa->ifa_ifp; |
1122 | struct rtentry *rt; |
1123 | struct sockaddr_rtlabel sa_rl; |
1124 | struct rt_addrinfo info; |
1125 | uint8_t prio = ifp->if_priority + RTP_STATIC8; |
1126 | int error; |
1127 | |
1128 | KASSERT(rdomain == rtable_l2(rdomain))((rdomain == rtable_l2(rdomain)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/route.c", 1128, "rdomain == rtable_l2(rdomain)" )); |
1129 | |
1130 | memset(&info, 0, sizeof(info))__builtin_memset((&info), (0), (sizeof(info))); |
1131 | info.rti_ifa = ifa; |
1132 | info.rti_flags = flags; |
1133 | info.rti_info[RTAX_DST0] = dst; |
1134 | if (flags & RTF_LLINFO0x400) |
1135 | info.rti_info[RTAX_GATEWAY1] = sdltosa(ifp->if_sadl); |
1136 | else |
1137 | info.rti_info[RTAX_GATEWAY1] = ifa->ifa_addr; |
1138 | info.rti_info[RTAX_LABEL10] = rtlabel_id2sa(ifp->if_rtlabelid, &sa_rl); |
1139 | |
1140 | #ifdef MPLS1 |
1141 | if ((flags & RTF_MPLS0x100000) == RTF_MPLS0x100000) |
1142 | info.rti_mpls = MPLS_OP_POP0x1; |
1143 | #endif /* MPLS */ |
1144 | |
1145 | if ((flags & RTF_HOST0x4) == 0) |
1146 | info.rti_info[RTAX_NETMASK2] = ifa->ifa_netmask; |
1147 | |
1148 | if (flags & (RTF_LOCAL0x200000|RTF_BROADCAST0x400000)) |
1149 | prio = RTP_LOCAL1; |
1150 | |
1151 | if (flags & RTF_CONNECTED0x800000) |
1152 | prio = ifp->if_priority + RTP_CONNECTED4; |
1153 | |
1154 | error = rtrequest(RTM_ADD0x1, &info, prio, &rt, rdomain); |
1155 | if (error == 0) { |
1156 | /* |
1157 | * A local route is created for every address configured |
1158 | * on an interface, so use this information to notify |
1159 | * userland that a new address has been added. |
1160 | */ |
1161 | if (flags & RTF_LOCAL0x200000) |
1162 | rtm_addr(RTM_NEWADDR0xc, ifa); |
1163 | rtm_send(rt, RTM_ADD0x1, 0, rdomain); |
1164 | rtfree(rt); |
1165 | } |
1166 | return (error); |
1167 | } |
1168 | |
1169 | int |
1170 | rt_ifa_del(struct ifaddr *ifa, int flags, struct sockaddr *dst, |
1171 | unsigned int rdomain) |
1172 | { |
1173 | struct ifnet *ifp = ifa->ifa_ifp; |
1174 | struct rtentry *rt; |
1175 | struct mbuf *m = NULL((void *)0); |
1176 | struct sockaddr *deldst; |
1177 | struct rt_addrinfo info; |
1178 | struct sockaddr_rtlabel sa_rl; |
1179 | uint8_t prio = ifp->if_priority + RTP_STATIC8; |
1180 | int error; |
1181 | |
1182 | KASSERT(rdomain == rtable_l2(rdomain))((rdomain == rtable_l2(rdomain)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/route.c", 1182, "rdomain == rtable_l2(rdomain)" )); |
1183 | |
1184 | if ((flags & RTF_HOST0x4) == 0 && ifa->ifa_netmask) { |
1185 | m = m_get(M_DONTWAIT0x0002, MT_SONAME3); |
1186 | if (m == NULL((void *)0)) |
1187 | return (ENOBUFS55); |
1188 | deldst = mtod(m, struct sockaddr *)((struct sockaddr *)((m)->m_hdr.mh_data)); |
1189 | rt_maskedcopy(dst, deldst, ifa->ifa_netmask); |
1190 | dst = deldst; |
1191 | } |
1192 | |
1193 | memset(&info, 0, sizeof(info))__builtin_memset((&info), (0), (sizeof(info))); |
1194 | info.rti_ifa = ifa; |
1195 | info.rti_flags = flags; |
1196 | info.rti_info[RTAX_DST0] = dst; |
1197 | if ((flags & RTF_LLINFO0x400) == 0) |
1198 | info.rti_info[RTAX_GATEWAY1] = ifa->ifa_addr; |
1199 | info.rti_info[RTAX_LABEL10] = rtlabel_id2sa(ifp->if_rtlabelid, &sa_rl); |
1200 | |
1201 | if ((flags & RTF_HOST0x4) == 0) |
1202 | info.rti_info[RTAX_NETMASK2] = ifa->ifa_netmask; |
1203 | |
1204 | if (flags & (RTF_LOCAL0x200000|RTF_BROADCAST0x400000)) |
1205 | prio = RTP_LOCAL1; |
1206 | |
1207 | if (flags & RTF_CONNECTED0x800000) |
1208 | prio = ifp->if_priority + RTP_CONNECTED4; |
1209 | |
1210 | rtable_clearsource(rdomain, ifa->ifa_addr); |
1211 | error = rtrequest_delete(&info, prio, ifp, &rt, rdomain); |
1212 | if (error == 0) { |
1213 | rtm_send(rt, RTM_DELETE0x2, 0, rdomain); |
1214 | if (flags & RTF_LOCAL0x200000) |
1215 | rtm_addr(RTM_DELADDR0xd, ifa); |
1216 | rtfree(rt); |
1217 | } |
1218 | m_free(m); |
1219 | |
1220 | return (error); |
1221 | } |
1222 | |
1223 | /* |
1224 | * Add ifa's address as a local rtentry. |
1225 | */ |
1226 | int |
1227 | rt_ifa_addlocal(struct ifaddr *ifa) |
1228 | { |
1229 | struct ifnet *ifp = ifa->ifa_ifp; |
1230 | struct rtentry *rt; |
1231 | u_int flags = RTF_HOST0x4|RTF_LOCAL0x200000; |
1232 | int error = 0; |
1233 | |
1234 | /* |
1235 | * If the configured address correspond to the magical "any" |
1236 | * address do not add a local route entry because that might |
1237 | * corrupt the routing tree which uses this value for the |
1238 | * default routes. |
1239 | */ |
1240 | switch (ifa->ifa_addr->sa_family) { |
1241 | case AF_INET2: |
1242 | if (satosin(ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000000 )) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000000)) & 0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000000)) & 0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000000)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000000) ) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000000 ))))) |
1243 | return (0); |
1244 | break; |
1245 | #ifdef INET61 |
1246 | case AF_INET624: |
1247 | if (IN6_ARE_ADDR_EQUAL(&satosin6(ifa->ifa_addr)->sin6_addr,(__builtin_memcmp((&(&satosin6(ifa->ifa_addr)-> sin6_addr)->__u6_addr.__u6_addr8[0]), (&(&in6addr_any )->__u6_addr.__u6_addr8[0]), (sizeof(struct in6_addr))) == 0) |
1248 | &in6addr_any)(__builtin_memcmp((&(&satosin6(ifa->ifa_addr)-> sin6_addr)->__u6_addr.__u6_addr8[0]), (&(&in6addr_any )->__u6_addr.__u6_addr8[0]), (sizeof(struct in6_addr))) == 0)) |
1249 | return (0); |
1250 | break; |
1251 | #endif |
1252 | default: |
1253 | break; |
1254 | } |
1255 | |
1256 | if (!ISSET(ifp->if_flags, (IFF_LOOPBACK|IFF_POINTOPOINT))((ifp->if_flags) & ((0x8|0x10)))) |
1257 | flags |= RTF_LLINFO0x400; |
1258 | |
1259 | /* If there is no local entry, allocate one. */ |
1260 | rt = rtalloc(ifa->ifa_addr, 0, ifp->if_rdomainif_data.ifi_rdomain); |
1261 | if (rt == NULL((void *)0) || ISSET(rt->rt_flags, flags)((rt->rt_flags) & (flags)) != flags) { |
1262 | error = rt_ifa_add(ifa, flags | RTF_MPATH0x40000, ifa->ifa_addr, |
1263 | ifp->if_rdomainif_data.ifi_rdomain); |
1264 | } |
1265 | rtfree(rt); |
1266 | |
1267 | return (error); |
1268 | } |
1269 | |
1270 | /* |
1271 | * Remove local rtentry of ifa's address if it exists. |
1272 | */ |
1273 | int |
1274 | rt_ifa_dellocal(struct ifaddr *ifa) |
1275 | { |
1276 | struct ifnet *ifp = ifa->ifa_ifp; |
1277 | struct rtentry *rt; |
1278 | u_int flags = RTF_HOST0x4|RTF_LOCAL0x200000; |
1279 | int error = 0; |
1280 | |
1281 | /* |
1282 | * We do not add local routes for such address, so do not bother |
1283 | * removing them. |
1284 | */ |
1285 | switch (ifa->ifa_addr->sa_family) { |
1286 | case AF_INET2: |
1287 | if (satosin(ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000000 )) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000000)) & 0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000000)) & 0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000000)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000000) ) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000000 ))))) |
1288 | return (0); |
1289 | break; |
1290 | #ifdef INET61 |
1291 | case AF_INET624: |
1292 | if (IN6_ARE_ADDR_EQUAL(&satosin6(ifa->ifa_addr)->sin6_addr,(__builtin_memcmp((&(&satosin6(ifa->ifa_addr)-> sin6_addr)->__u6_addr.__u6_addr8[0]), (&(&in6addr_any )->__u6_addr.__u6_addr8[0]), (sizeof(struct in6_addr))) == 0) |
1293 | &in6addr_any)(__builtin_memcmp((&(&satosin6(ifa->ifa_addr)-> sin6_addr)->__u6_addr.__u6_addr8[0]), (&(&in6addr_any )->__u6_addr.__u6_addr8[0]), (sizeof(struct in6_addr))) == 0)) |
1294 | return (0); |
1295 | break; |
1296 | #endif |
1297 | default: |
1298 | break; |
1299 | } |
1300 | |
1301 | if (!ISSET(ifp->if_flags, (IFF_LOOPBACK|IFF_POINTOPOINT))((ifp->if_flags) & ((0x8|0x10)))) |
1302 | flags |= RTF_LLINFO0x400; |
1303 | |
1304 | /* |
1305 | * Before deleting, check if a corresponding local host |
1306 | * route surely exists. With this check, we can avoid to |
1307 | * delete an interface direct route whose destination is same |
1308 | * as the address being removed. This can happen when removing |
1309 | * a subnet-router anycast address on an interface attached |
1310 | * to a shared medium. |
1311 | */ |
1312 | rt = rtalloc(ifa->ifa_addr, 0, ifp->if_rdomainif_data.ifi_rdomain); |
1313 | if (rt != NULL((void *)0) && ISSET(rt->rt_flags, flags)((rt->rt_flags) & (flags)) == flags) { |
1314 | error = rt_ifa_del(ifa, flags, ifa->ifa_addr, |
1315 | ifp->if_rdomainif_data.ifi_rdomain); |
1316 | } |
1317 | rtfree(rt); |
1318 | |
1319 | return (error); |
1320 | } |
1321 | |
1322 | /* |
1323 | * Remove all addresses attached to ``ifa''. |
1324 | */ |
1325 | void |
1326 | rt_ifa_purge(struct ifaddr *ifa) |
1327 | { |
1328 | struct ifnet *ifp = ifa->ifa_ifp; |
1329 | struct rtentry *rt = NULL((void *)0); |
1330 | unsigned int rtableid; |
1331 | int error, af = ifa->ifa_addr->sa_family; |
1332 | |
1333 | KASSERT(ifp != NULL)((ifp != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/route.c" , 1333, "ifp != NULL")); |
1334 | |
1335 | for (rtableid = 0; rtableid < rtmap_limit; rtableid++) { |
1336 | /* skip rtables that are not in the rdomain of the ifp */ |
1337 | if (rtable_l2(rtableid) != ifp->if_rdomainif_data.ifi_rdomain) |
1338 | continue; |
1339 | |
1340 | do { |
1341 | error = rtable_walk(rtableid, af, &rt, |
1342 | rt_ifa_purge_walker, ifa); |
1343 | if (rt != NULL((void *)0) && error == EEXIST17) { |
1344 | error = rtdeletemsg(rt, ifp, rtableid); |
1345 | if (error == 0) |
1346 | error = EAGAIN35; |
1347 | } |
1348 | rtfree(rt); |
1349 | rt = NULL((void *)0); |
1350 | } while (error == EAGAIN35); |
1351 | |
1352 | if (error == EAFNOSUPPORT47) |
1353 | error = 0; |
1354 | |
1355 | if (error) |
1356 | break; |
1357 | } |
1358 | } |
1359 | |
1360 | int |
1361 | rt_ifa_purge_walker(struct rtentry *rt, void *vifa, unsigned int rtableid) |
1362 | { |
1363 | struct ifaddr *ifa = vifa; |
1364 | |
1365 | if (rt->rt_ifa == ifa) |
1366 | return EEXIST17; |
1367 | |
1368 | return 0; |
1369 | } |
1370 | |
1371 | /* |
1372 | * Route timer routines. These routines allow functions to be called |
1373 | * for various routes at any time. This is useful in supporting |
1374 | * path MTU discovery and redirect route deletion. |
1375 | * |
1376 | * This is similar to some BSDI internal functions, but it provides |
1377 | * for multiple queues for efficiency's sake... |
1378 | */ |
1379 | |
1380 | struct mutex rttimer_mtx; |
1381 | |
1382 | struct rttimer { |
1383 | TAILQ_ENTRY(rttimer)struct { struct rttimer *tqe_next; struct rttimer **tqe_prev; } rtt_next; /* [T] entry on timer queue */ |
1384 | LIST_ENTRY(rttimer)struct { struct rttimer *le_next; struct rttimer **le_prev; } rtt_link; /* [T] timers per rtentry */ |
1385 | struct timeout rtt_timeout; /* [I] timeout for this entry */ |
1386 | struct rttimer_queue *rtt_queue; /* [I] back pointer to queue */ |
1387 | struct rtentry *rtt_rt; /* [T] back pointer to route */ |
1388 | time_t rtt_expire; /* [I] rt expire time */ |
1389 | u_int rtt_tableid; /* [I] rtable id of rtt_rt */ |
1390 | }; |
1391 | |
1392 | #define RTTIMER_CALLOUT(r){ if (r->rtt_queue->rtq_func != ((void *)0)) { (*r-> rtt_queue->rtq_func)(r->rtt_rt, r->rtt_tableid); } else { struct ifnet *ifp; ifp = if_get(r->rtt_rt->rt_ifidx) ; if (ifp != ((void *)0) && (r->rtt_rt->rt_flags & (0x10|0x4)) == (0x10|0x4)) rtdeletemsg(r->rtt_rt, ifp , r->rtt_tableid); if_put(ifp); } } { \ |
1393 | if (r->rtt_queue->rtq_func != NULL((void *)0)) { \ |
1394 | (*r->rtt_queue->rtq_func)(r->rtt_rt, r->rtt_tableid); \ |
1395 | } else { \ |
1396 | struct ifnet *ifp; \ |
1397 | \ |
1398 | ifp = if_get(r->rtt_rt->rt_ifidx); \ |
1399 | if (ifp != NULL((void *)0) && \ |
1400 | (r->rtt_rt->rt_flags & (RTF_DYNAMIC0x10|RTF_HOST0x4)) == \ |
1401 | (RTF_DYNAMIC0x10|RTF_HOST0x4)) \ |
1402 | rtdeletemsg(r->rtt_rt, ifp, r->rtt_tableid); \ |
1403 | if_put(ifp); \ |
1404 | } \ |
1405 | } |
1406 | |
1407 | void |
1408 | rt_timer_init(void) |
1409 | { |
1410 | pool_init(&rttimer_pool, sizeof(struct rttimer), 0, |
1411 | IPL_MPFLOOR0x9, 0, "rttmr", NULL((void *)0)); |
1412 | mtx_init(&rttimer_mtx, IPL_MPFLOOR)do { (void)(((void *)0)); (void)(0); __mtx_init((&rttimer_mtx ), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 : (( 0x9)))); } while (0); |
1413 | } |
1414 | |
1415 | void |
1416 | rt_timer_queue_init(struct rttimer_queue *rtq, int timeout, |
1417 | void (*func)(struct rtentry *, u_int)) |
1418 | { |
1419 | rtq->rtq_timeout = timeout; |
1420 | rtq->rtq_count = 0; |
1421 | rtq->rtq_func = func; |
1422 | TAILQ_INIT(&rtq->rtq_head)do { (&rtq->rtq_head)->tqh_first = ((void *)0); (& rtq->rtq_head)->tqh_last = &(&rtq->rtq_head) ->tqh_first; } while (0); |
1423 | } |
1424 | |
1425 | void |
1426 | rt_timer_queue_change(struct rttimer_queue *rtq, int timeout) |
1427 | { |
1428 | mtx_enter(&rttimer_mtx); |
1429 | rtq->rtq_timeout = timeout; |
1430 | mtx_leave(&rttimer_mtx); |
1431 | } |
1432 | |
1433 | void |
1434 | rt_timer_queue_flush(struct rttimer_queue *rtq) |
1435 | { |
1436 | struct rttimer *r; |
1437 | TAILQ_HEAD(, rttimer)struct { struct rttimer *tqh_first; struct rttimer **tqh_last ; } rttlist; |
1438 | |
1439 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
1440 | |
1441 | TAILQ_INIT(&rttlist)do { (&rttlist)->tqh_first = ((void *)0); (&rttlist )->tqh_last = &(&rttlist)->tqh_first; } while ( 0); |
1442 | mtx_enter(&rttimer_mtx); |
1443 | while ((r = TAILQ_FIRST(&rtq->rtq_head)((&rtq->rtq_head)->tqh_first)) != NULL((void *)0)) { |
1444 | LIST_REMOVE(r, rtt_link)do { if ((r)->rtt_link.le_next != ((void *)0)) (r)->rtt_link .le_next->rtt_link.le_prev = (r)->rtt_link.le_prev; *(r )->rtt_link.le_prev = (r)->rtt_link.le_next; ((r)->rtt_link .le_prev) = ((void *)-1); ((r)->rtt_link.le_next) = ((void *)-1); } while (0); |
1445 | TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next)do { if (((r)->rtt_next.tqe_next) != ((void *)0)) (r)-> rtt_next.tqe_next->rtt_next.tqe_prev = (r)->rtt_next.tqe_prev ; else (&rtq->rtq_head)->tqh_last = (r)->rtt_next .tqe_prev; *(r)->rtt_next.tqe_prev = (r)->rtt_next.tqe_next ; ((r)->rtt_next.tqe_prev) = ((void *)-1); ((r)->rtt_next .tqe_next) = ((void *)-1); } while (0); |
1446 | TAILQ_INSERT_TAIL(&rttlist, r, rtt_next)do { (r)->rtt_next.tqe_next = ((void *)0); (r)->rtt_next .tqe_prev = (&rttlist)->tqh_last; *(&rttlist)-> tqh_last = (r); (&rttlist)->tqh_last = &(r)->rtt_next .tqe_next; } while (0); |
1447 | KASSERT(rtq->rtq_count > 0)((rtq->rtq_count > 0) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/route.c", 1447, "rtq->rtq_count > 0" )); |
1448 | rtq->rtq_count--; |
1449 | } |
1450 | mtx_leave(&rttimer_mtx); |
1451 | |
1452 | while ((r = TAILQ_FIRST(&rttlist)((&rttlist)->tqh_first)) != NULL((void *)0)) { |
1453 | TAILQ_REMOVE(&rttlist, r, rtt_next)do { if (((r)->rtt_next.tqe_next) != ((void *)0)) (r)-> rtt_next.tqe_next->rtt_next.tqe_prev = (r)->rtt_next.tqe_prev ; else (&rttlist)->tqh_last = (r)->rtt_next.tqe_prev ; *(r)->rtt_next.tqe_prev = (r)->rtt_next.tqe_next; ((r )->rtt_next.tqe_prev) = ((void *)-1); ((r)->rtt_next.tqe_next ) = ((void *)-1); } while (0); |
1454 | RTTIMER_CALLOUT(r){ if (r->rtt_queue->rtq_func != ((void *)0)) { (*r-> rtt_queue->rtq_func)(r->rtt_rt, r->rtt_tableid); } else { struct ifnet *ifp; ifp = if_get(r->rtt_rt->rt_ifidx) ; if (ifp != ((void *)0) && (r->rtt_rt->rt_flags & (0x10|0x4)) == (0x10|0x4)) rtdeletemsg(r->rtt_rt, ifp , r->rtt_tableid); if_put(ifp); } }; |
1455 | pool_put(&rttimer_pool, r); |
1456 | } |
1457 | } |
1458 | |
1459 | unsigned long |
1460 | rt_timer_queue_count(struct rttimer_queue *rtq) |
1461 | { |
1462 | return (rtq->rtq_count); |
1463 | } |
1464 | |
1465 | static inline struct rttimer * |
1466 | rt_timer_unlink(struct rttimer *r) |
1467 | { |
1468 | MUTEX_ASSERT_LOCKED(&rttimer_mtx)do { if (((&rttimer_mtx)->mtx_owner != ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})) && !(panicstr || db_active)) panic("mutex %p not held in %s", (&rttimer_mtx ), __func__); } while (0); |
1469 | |
1470 | LIST_REMOVE(r, rtt_link)do { if ((r)->rtt_link.le_next != ((void *)0)) (r)->rtt_link .le_next->rtt_link.le_prev = (r)->rtt_link.le_prev; *(r )->rtt_link.le_prev = (r)->rtt_link.le_next; ((r)->rtt_link .le_prev) = ((void *)-1); ((r)->rtt_link.le_next) = ((void *)-1); } while (0); |
1471 | r->rtt_rt = NULL((void *)0); |
1472 | |
1473 | if (timeout_del(&r->rtt_timeout) == 0) { |
1474 | /* timeout fired, so rt_timer_timer will do the cleanup */ |
1475 | return NULL((void *)0); |
1476 | } |
1477 | |
1478 | TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next)do { if (((r)->rtt_next.tqe_next) != ((void *)0)) (r)-> rtt_next.tqe_next->rtt_next.tqe_prev = (r)->rtt_next.tqe_prev ; else (&r->rtt_queue->rtq_head)->tqh_last = (r) ->rtt_next.tqe_prev; *(r)->rtt_next.tqe_prev = (r)-> rtt_next.tqe_next; ((r)->rtt_next.tqe_prev) = ((void *)-1) ; ((r)->rtt_next.tqe_next) = ((void *)-1); } while (0); |
1479 | KASSERT(r->rtt_queue->rtq_count > 0)((r->rtt_queue->rtq_count > 0) ? (void)0 : __assert( "diagnostic ", "/usr/src/sys/net/route.c", 1479, "r->rtt_queue->rtq_count > 0" )); |
1480 | r->rtt_queue->rtq_count--; |
1481 | return r; |
1482 | } |
1483 | |
1484 | void |
1485 | rt_timer_remove_all(struct rtentry *rt) |
1486 | { |
1487 | struct rttimer *r; |
1488 | TAILQ_HEAD(, rttimer)struct { struct rttimer *tqh_first; struct rttimer **tqh_last ; } rttlist; |
1489 | |
1490 | TAILQ_INIT(&rttlist)do { (&rttlist)->tqh_first = ((void *)0); (&rttlist )->tqh_last = &(&rttlist)->tqh_first; } while ( 0); |
1491 | mtx_enter(&rttimer_mtx); |
1492 | while ((r = LIST_FIRST(&rt->rt_timer)((&rt->rt_timer)->lh_first)) != NULL((void *)0)) { |
1493 | r = rt_timer_unlink(r); |
1494 | if (r != NULL((void *)0)) |
1495 | TAILQ_INSERT_TAIL(&rttlist, r, rtt_next)do { (r)->rtt_next.tqe_next = ((void *)0); (r)->rtt_next .tqe_prev = (&rttlist)->tqh_last; *(&rttlist)-> tqh_last = (r); (&rttlist)->tqh_last = &(r)->rtt_next .tqe_next; } while (0); |
1496 | } |
1497 | mtx_leave(&rttimer_mtx); |
1498 | |
1499 | while ((r = TAILQ_FIRST(&rttlist)((&rttlist)->tqh_first)) != NULL((void *)0)) { |
1500 | TAILQ_REMOVE(&rttlist, r, rtt_next)do { if (((r)->rtt_next.tqe_next) != ((void *)0)) (r)-> rtt_next.tqe_next->rtt_next.tqe_prev = (r)->rtt_next.tqe_prev ; else (&rttlist)->tqh_last = (r)->rtt_next.tqe_prev ; *(r)->rtt_next.tqe_prev = (r)->rtt_next.tqe_next; ((r )->rtt_next.tqe_prev) = ((void *)-1); ((r)->rtt_next.tqe_next ) = ((void *)-1); } while (0); |
1501 | pool_put(&rttimer_pool, r); |
1502 | } |
1503 | } |
1504 | |
1505 | time_t |
1506 | rt_timer_get_expire(const struct rtentry *rt) |
1507 | { |
1508 | const struct rttimer *r; |
1509 | time_t expire = 0; |
1510 | |
1511 | mtx_enter(&rttimer_mtx); |
1512 | LIST_FOREACH(r, &rt->rt_timer, rtt_link)for((r) = ((&rt->rt_timer)->lh_first); (r)!= ((void *)0); (r) = ((r)->rtt_link.le_next)) { |
1513 | if (expire == 0 || expire > r->rtt_expire) |
1514 | expire = r->rtt_expire; |
1515 | } |
1516 | mtx_leave(&rttimer_mtx); |
1517 | |
1518 | return expire; |
1519 | } |
1520 | |
1521 | int |
1522 | rt_timer_add(struct rtentry *rt, struct rttimer_queue *queue, u_int rtableid) |
1523 | { |
1524 | struct rttimer *r, *rnew; |
1525 | |
1526 | rnew = pool_get(&rttimer_pool, PR_NOWAIT0x0002 | PR_ZERO0x0008); |
1527 | if (rnew == NULL((void *)0)) |
1528 | return (ENOBUFS55); |
1529 | |
1530 | rnew->rtt_rt = rt; |
1531 | rnew->rtt_queue = queue; |
1532 | rnew->rtt_tableid = rtableid; |
1533 | rnew->rtt_expire = getuptime() + queue->rtq_timeout; |
1534 | timeout_set_proc(&rnew->rtt_timeout, rt_timer_timer, rnew); |
1535 | |
1536 | mtx_enter(&rttimer_mtx); |
1537 | /* |
1538 | * If there's already a timer with this action, destroy it before |
1539 | * we add a new one. |
1540 | */ |
1541 | LIST_FOREACH(r, &rt->rt_timer, rtt_link)for((r) = ((&rt->rt_timer)->lh_first); (r)!= ((void *)0); (r) = ((r)->rtt_link.le_next)) { |
1542 | if (r->rtt_queue == queue) { |
1543 | r = rt_timer_unlink(r); |
1544 | break; /* only one per list, so we can quit... */ |
1545 | } |
1546 | } |
1547 | |
1548 | LIST_INSERT_HEAD(&rt->rt_timer, rnew, rtt_link)do { if (((rnew)->rtt_link.le_next = (&rt->rt_timer )->lh_first) != ((void *)0)) (&rt->rt_timer)->lh_first ->rtt_link.le_prev = &(rnew)->rtt_link.le_next; (& rt->rt_timer)->lh_first = (rnew); (rnew)->rtt_link.le_prev = &(&rt->rt_timer)->lh_first; } while (0); |
1549 | TAILQ_INSERT_TAIL(&queue->rtq_head, rnew, rtt_next)do { (rnew)->rtt_next.tqe_next = ((void *)0); (rnew)->rtt_next .tqe_prev = (&queue->rtq_head)->tqh_last; *(&queue ->rtq_head)->tqh_last = (rnew); (&queue->rtq_head )->tqh_last = &(rnew)->rtt_next.tqe_next; } while ( 0); |
1550 | timeout_add_sec(&rnew->rtt_timeout, queue->rtq_timeout); |
1551 | rnew->rtt_queue->rtq_count++; |
1552 | mtx_leave(&rttimer_mtx); |
1553 | |
1554 | if (r != NULL((void *)0)) |
1555 | pool_put(&rttimer_pool, r); |
1556 | |
1557 | return (0); |
1558 | } |
1559 | |
1560 | void |
1561 | rt_timer_timer(void *arg) |
1562 | { |
1563 | struct rttimer *r = arg; |
1564 | struct rttimer_queue *rtq = r->rtt_queue; |
1565 | |
1566 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
1567 | mtx_enter(&rttimer_mtx); |
1568 | |
1569 | if (r->rtt_rt != NULL((void *)0)) |
1570 | LIST_REMOVE(r, rtt_link)do { if ((r)->rtt_link.le_next != ((void *)0)) (r)->rtt_link .le_next->rtt_link.le_prev = (r)->rtt_link.le_prev; *(r )->rtt_link.le_prev = (r)->rtt_link.le_next; ((r)->rtt_link .le_prev) = ((void *)-1); ((r)->rtt_link.le_next) = ((void *)-1); } while (0); |
1571 | TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next)do { if (((r)->rtt_next.tqe_next) != ((void *)0)) (r)-> rtt_next.tqe_next->rtt_next.tqe_prev = (r)->rtt_next.tqe_prev ; else (&rtq->rtq_head)->tqh_last = (r)->rtt_next .tqe_prev; *(r)->rtt_next.tqe_prev = (r)->rtt_next.tqe_next ; ((r)->rtt_next.tqe_prev) = ((void *)-1); ((r)->rtt_next .tqe_next) = ((void *)-1); } while (0); |
1572 | KASSERT(rtq->rtq_count > 0)((rtq->rtq_count > 0) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/route.c", 1572, "rtq->rtq_count > 0" )); |
1573 | rtq->rtq_count--; |
1574 | |
1575 | mtx_leave(&rttimer_mtx); |
1576 | |
1577 | if (r->rtt_rt != NULL((void *)0)) |
1578 | RTTIMER_CALLOUT(r){ if (r->rtt_queue->rtq_func != ((void *)0)) { (*r-> rtt_queue->rtq_func)(r->rtt_rt, r->rtt_tableid); } else { struct ifnet *ifp; ifp = if_get(r->rtt_rt->rt_ifidx) ; if (ifp != ((void *)0) && (r->rtt_rt->rt_flags & (0x10|0x4)) == (0x10|0x4)) rtdeletemsg(r->rtt_rt, ifp , r->rtt_tableid); if_put(ifp); } }; |
1579 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
1580 | |
1581 | pool_put(&rttimer_pool, r); |
1582 | } |
1583 | |
1584 | #ifdef MPLS1 |
1585 | int |
1586 | rt_mpls_set(struct rtentry *rt, const struct sockaddr *src, uint8_t op) |
1587 | { |
1588 | struct sockaddr_mpls *psa_mpls = (struct sockaddr_mpls *)src; |
1589 | struct rt_mpls *rt_mpls; |
1590 | |
1591 | if (psa_mpls == NULL((void *)0) && op != MPLS_OP_POP0x1) |
1592 | return (EOPNOTSUPP45); |
1593 | if (psa_mpls != NULL((void *)0) && psa_mpls->smpls_len != sizeof(*psa_mpls)) |
1594 | return (EINVAL22); |
1595 | if (psa_mpls != NULL((void *)0) && psa_mpls->smpls_family != AF_MPLS33) |
1596 | return (EAFNOSUPPORT47); |
1597 | |
1598 | rt->rt_llinfo = malloc(sizeof(struct rt_mpls), M_TEMP127, M_NOWAIT0x0002|M_ZERO0x0008); |
Result of 'malloc' is converted to a pointer of type 'char', which is incompatible with sizeof operand type 'struct rt_mpls' | |
1599 | if (rt->rt_llinfo == NULL((void *)0)) |
1600 | return (ENOMEM12); |
1601 | |
1602 | rt_mpls = (struct rt_mpls *)rt->rt_llinfo; |
1603 | if (psa_mpls != NULL((void *)0)) |
1604 | rt_mpls->mpls_label = psa_mpls->smpls_label; |
1605 | rt_mpls->mpls_operation = op; |
1606 | /* XXX: set experimental bits */ |
1607 | rt->rt_flags |= RTF_MPLS0x100000; |
1608 | |
1609 | return (0); |
1610 | } |
1611 | |
1612 | void |
1613 | rt_mpls_clear(struct rtentry *rt) |
1614 | { |
1615 | if (rt->rt_llinfo != NULL((void *)0) && rt->rt_flags & RTF_MPLS0x100000) { |
1616 | free(rt->rt_llinfo, M_TEMP127, sizeof(struct rt_mpls)); |
1617 | rt->rt_llinfo = NULL((void *)0); |
1618 | } |
1619 | rt->rt_flags &= ~RTF_MPLS0x100000; |
1620 | } |
1621 | #endif |
1622 | |
1623 | u_int16_t |
1624 | rtlabel_name2id(const char *name) |
1625 | { |
1626 | struct rt_label *label, *p; |
1627 | u_int16_t new_id = 1, id = 0; |
1628 | |
1629 | if (!name[0]) |
1630 | return (0); |
1631 | |
1632 | mtx_enter(&rtlabel_mtx); |
1633 | TAILQ_FOREACH(label, &rt_labels, rtl_entry)for((label) = ((&rt_labels)->tqh_first); (label) != (( void *)0); (label) = ((label)->rtl_entry.tqe_next)) |
1634 | if (strcmp(name, label->rtl_name) == 0) { |
1635 | label->rtl_ref++; |
1636 | id = label->rtl_id; |
1637 | goto out; |
1638 | } |
1639 | |
1640 | /* |
1641 | * to avoid fragmentation, we do a linear search from the beginning |
1642 | * and take the first free slot we find. if there is none or the list |
1643 | * is empty, append a new entry at the end. |
1644 | */ |
1645 | TAILQ_FOREACH(p, &rt_labels, rtl_entry)for((p) = ((&rt_labels)->tqh_first); (p) != ((void *)0 ); (p) = ((p)->rtl_entry.tqe_next)) { |
1646 | if (p->rtl_id != new_id) |
1647 | break; |
1648 | new_id = p->rtl_id + 1; |
1649 | } |
1650 | if (new_id > LABELID_MAX50000) |
1651 | goto out; |
1652 | |
1653 | label = malloc(sizeof(*label), M_RTABLE5, M_NOWAIT0x0002|M_ZERO0x0008); |
1654 | if (label == NULL((void *)0)) |
1655 | goto out; |
1656 | strlcpy(label->rtl_name, name, sizeof(label->rtl_name)); |
1657 | label->rtl_id = new_id; |
1658 | label->rtl_ref++; |
1659 | |
1660 | if (p != NULL((void *)0)) /* insert new entry before p */ |
1661 | TAILQ_INSERT_BEFORE(p, label, rtl_entry)do { (label)->rtl_entry.tqe_prev = (p)->rtl_entry.tqe_prev ; (label)->rtl_entry.tqe_next = (p); *(p)->rtl_entry.tqe_prev = (label); (p)->rtl_entry.tqe_prev = &(label)->rtl_entry .tqe_next; } while (0); |
1662 | else /* either list empty or no free slot in between */ |
1663 | TAILQ_INSERT_TAIL(&rt_labels, label, rtl_entry)do { (label)->rtl_entry.tqe_next = ((void *)0); (label)-> rtl_entry.tqe_prev = (&rt_labels)->tqh_last; *(&rt_labels )->tqh_last = (label); (&rt_labels)->tqh_last = & (label)->rtl_entry.tqe_next; } while (0); |
1664 | |
1665 | id = label->rtl_id; |
1666 | out: |
1667 | mtx_leave(&rtlabel_mtx); |
1668 | |
1669 | return (id); |
1670 | } |
1671 | |
1672 | const char * |
1673 | rtlabel_id2name_locked(u_int16_t id) |
1674 | { |
1675 | struct rt_label *label; |
1676 | |
1677 | MUTEX_ASSERT_LOCKED(&rtlabel_mtx)do { if (((&rtlabel_mtx)->mtx_owner != ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})) && !(panicstr || db_active)) panic("mutex %p not held in %s", (&rtlabel_mtx ), __func__); } while (0); |
1678 | |
1679 | TAILQ_FOREACH(label, &rt_labels, rtl_entry)for((label) = ((&rt_labels)->tqh_first); (label) != (( void *)0); (label) = ((label)->rtl_entry.tqe_next)) |
1680 | if (label->rtl_id == id) |
1681 | return (label->rtl_name); |
1682 | |
1683 | return (NULL((void *)0)); |
1684 | } |
1685 | |
1686 | const char * |
1687 | rtlabel_id2name(u_int16_t id, char *rtlabelbuf, size_t sz) |
1688 | { |
1689 | const char *label; |
1690 | |
1691 | if (id == 0) |
1692 | return (NULL((void *)0)); |
1693 | |
1694 | mtx_enter(&rtlabel_mtx); |
1695 | if ((label = rtlabel_id2name_locked(id)) != NULL((void *)0)) |
1696 | strlcpy(rtlabelbuf, label, sz); |
1697 | mtx_leave(&rtlabel_mtx); |
1698 | |
1699 | if (label == NULL((void *)0)) |
1700 | return (NULL((void *)0)); |
1701 | |
1702 | return (rtlabelbuf); |
1703 | } |
1704 | |
1705 | struct sockaddr * |
1706 | rtlabel_id2sa(u_int16_t labelid, struct sockaddr_rtlabel *sa_rl) |
1707 | { |
1708 | const char *label; |
1709 | |
1710 | if (labelid == 0) |
1711 | return (NULL((void *)0)); |
1712 | |
1713 | mtx_enter(&rtlabel_mtx); |
1714 | if ((label = rtlabel_id2name_locked(labelid)) != NULL((void *)0)) { |
1715 | bzero(sa_rl, sizeof(*sa_rl))__builtin_bzero((sa_rl), (sizeof(*sa_rl))); |
1716 | sa_rl->sr_len = sizeof(*sa_rl); |
1717 | sa_rl->sr_family = AF_UNSPEC0; |
1718 | strlcpy(sa_rl->sr_label, label, sizeof(sa_rl->sr_label)); |
1719 | } |
1720 | mtx_leave(&rtlabel_mtx); |
1721 | |
1722 | if (label == NULL((void *)0)) |
1723 | return (NULL((void *)0)); |
1724 | |
1725 | return ((struct sockaddr *)sa_rl); |
1726 | } |
1727 | |
1728 | void |
1729 | rtlabel_unref(u_int16_t id) |
1730 | { |
1731 | struct rt_label *p, *next; |
1732 | |
1733 | if (id == 0) |
1734 | return; |
1735 | |
1736 | mtx_enter(&rtlabel_mtx); |
1737 | TAILQ_FOREACH_SAFE(p, &rt_labels, rtl_entry, next)for ((p) = ((&rt_labels)->tqh_first); (p) != ((void *) 0) && ((next) = ((p)->rtl_entry.tqe_next), 1); (p) = (next)) { |
1738 | if (id == p->rtl_id) { |
1739 | if (--p->rtl_ref == 0) { |
1740 | TAILQ_REMOVE(&rt_labels, p, rtl_entry)do { if (((p)->rtl_entry.tqe_next) != ((void *)0)) (p)-> rtl_entry.tqe_next->rtl_entry.tqe_prev = (p)->rtl_entry .tqe_prev; else (&rt_labels)->tqh_last = (p)->rtl_entry .tqe_prev; *(p)->rtl_entry.tqe_prev = (p)->rtl_entry.tqe_next ; ((p)->rtl_entry.tqe_prev) = ((void *)-1); ((p)->rtl_entry .tqe_next) = ((void *)-1); } while (0); |
1741 | free(p, M_RTABLE5, sizeof(*p)); |
1742 | } |
1743 | break; |
1744 | } |
1745 | } |
1746 | mtx_leave(&rtlabel_mtx); |
1747 | } |
1748 | |
1749 | int |
1750 | rt_if_track(struct ifnet *ifp) |
1751 | { |
1752 | unsigned int rtableid; |
1753 | struct rtentry *rt = NULL((void *)0); |
1754 | int i, error = 0; |
1755 | |
1756 | for (rtableid = 0; rtableid < rtmap_limit; rtableid++) { |
1757 | /* skip rtables that are not in the rdomain of the ifp */ |
1758 | if (rtable_l2(rtableid) != ifp->if_rdomainif_data.ifi_rdomain) |
1759 | continue; |
1760 | for (i = 1; i <= AF_MAX36; i++) { |
1761 | if (!rtable_mpath_capable(rtableid, i)) |
1762 | continue; |
1763 | |
1764 | do { |
1765 | error = rtable_walk(rtableid, i, &rt, |
1766 | rt_if_linkstate_change, ifp); |
1767 | if (rt != NULL((void *)0) && error == EEXIST17) { |
1768 | error = rtdeletemsg(rt, ifp, rtableid); |
1769 | if (error == 0) |
1770 | error = EAGAIN35; |
1771 | } |
1772 | rtfree(rt); |
1773 | rt = NULL((void *)0); |
1774 | } while (error == EAGAIN35); |
1775 | |
1776 | if (error == EAFNOSUPPORT47) |
1777 | error = 0; |
1778 | |
1779 | if (error) |
1780 | break; |
1781 | } |
1782 | } |
1783 | |
1784 | return (error); |
1785 | } |
1786 | |
1787 | int |
1788 | rt_if_linkstate_change(struct rtentry *rt, void *arg, u_int id) |
1789 | { |
1790 | struct ifnet *ifp = arg; |
1791 | struct sockaddr_in6 sa_mask; |
1792 | int error; |
1793 | |
1794 | if (rt->rt_ifidx != ifp->if_index) |
1795 | return (0); |
1796 | |
1797 | /* Local routes are always usable. */ |
1798 | if (rt->rt_flags & RTF_LOCAL0x200000) { |
1799 | rt->rt_flags |= RTF_UP0x1; |
1800 | return (0); |
1801 | } |
1802 | |
1803 | if (LINK_STATE_IS_UP(ifp->if_link_state)((ifp->if_data.ifi_link_state) >= 4 || (ifp->if_data .ifi_link_state) == 0) && ifp->if_flags & IFF_UP0x1) { |
1804 | if (ISSET(rt->rt_flags, RTF_UP)((rt->rt_flags) & (0x1))) |
1805 | return (0); |
1806 | |
1807 | /* bring route up */ |
1808 | rt->rt_flags |= RTF_UP0x1; |
1809 | error = rtable_mpath_reprio(id, rt_key(rt)((rt)->rt_dest), rt_plen(rt)((rt)->rt_plen), |
1810 | rt->rt_priority & RTP_MASK0x7f, rt); |
1811 | } else { |
1812 | /* |
1813 | * Remove redirected and cloned routes (mainly ARP) |
1814 | * from down interfaces so we have a chance to get |
1815 | * new routes from a better source. |
1816 | */ |
1817 | if (ISSET(rt->rt_flags, RTF_CLONED|RTF_DYNAMIC)((rt->rt_flags) & (0x10000|0x10)) && |
1818 | !ISSET(rt->rt_flags, RTF_CACHED|RTF_BFD)((rt->rt_flags) & (0x20000|0x1000000))) { |
1819 | return (EEXIST17); |
1820 | } |
1821 | |
1822 | if (!ISSET(rt->rt_flags, RTF_UP)((rt->rt_flags) & (0x1))) |
1823 | return (0); |
1824 | |
1825 | /* take route down */ |
1826 | rt->rt_flags &= ~RTF_UP0x1; |
1827 | error = rtable_mpath_reprio(id, rt_key(rt)((rt)->rt_dest), rt_plen(rt)((rt)->rt_plen), |
1828 | rt->rt_priority | RTP_DOWN0x80, rt); |
1829 | } |
1830 | if_group_routechange(rt_key(rt)((rt)->rt_dest), rt_plen2mask(rt, &sa_mask)); |
1831 | |
1832 | return (error); |
1833 | } |
1834 | |
1835 | struct sockaddr * |
1836 | rt_plentosa(sa_family_t af, int plen, struct sockaddr_in6 *sa_mask) |
1837 | { |
1838 | struct sockaddr_in *sin = (struct sockaddr_in *)sa_mask; |
1839 | #ifdef INET61 |
1840 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa_mask; |
1841 | #endif |
1842 | |
1843 | KASSERT(plen >= 0 || plen == -1)((plen >= 0 || plen == -1) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/route.c", 1843, "plen >= 0 || plen == -1" )); |
1844 | |
1845 | if (plen == -1) |
1846 | return (NULL((void *)0)); |
1847 | |
1848 | memset(sa_mask, 0, sizeof(*sa_mask))__builtin_memset((sa_mask), (0), (sizeof(*sa_mask))); |
1849 | |
1850 | switch (af) { |
1851 | case AF_INET2: |
1852 | sin->sin_family = AF_INET2; |
1853 | sin->sin_len = sizeof(struct sockaddr_in); |
1854 | in_prefixlen2mask(&sin->sin_addr, plen); |
1855 | break; |
1856 | #ifdef INET61 |
1857 | case AF_INET624: |
1858 | sin6->sin6_family = AF_INET624; |
1859 | sin6->sin6_len = sizeof(struct sockaddr_in6); |
1860 | in6_prefixlen2mask(&sin6->sin6_addr, plen); |
1861 | break; |
1862 | #endif /* INET6 */ |
1863 | default: |
1864 | return (NULL((void *)0)); |
1865 | } |
1866 | |
1867 | return ((struct sockaddr *)sa_mask); |
1868 | } |
1869 | |
1870 | struct sockaddr * |
1871 | rt_plen2mask(struct rtentry *rt, struct sockaddr_in6 *sa_mask) |
1872 | { |
1873 | return (rt_plentosa(rt_key(rt)((rt)->rt_dest)->sa_family, rt_plen(rt)((rt)->rt_plen), sa_mask)); |
1874 | } |
1875 | |
1876 | #ifdef DDB1 |
1877 | #include <machine/db_machdep.h> |
1878 | #include <ddb/db_output.h> |
1879 | |
1880 | void db_print_sa(struct sockaddr *); |
1881 | void db_print_ifa(struct ifaddr *); |
1882 | |
1883 | void |
1884 | db_print_sa(struct sockaddr *sa) |
1885 | { |
1886 | int len; |
1887 | u_char *p; |
1888 | |
1889 | if (sa == NULL((void *)0)) { |
1890 | db_printf("[NULL]"); |
1891 | return; |
1892 | } |
1893 | |
1894 | p = (u_char *)sa; |
1895 | len = sa->sa_len; |
1896 | db_printf("["); |
1897 | while (len > 0) { |
1898 | db_printf("%d", *p); |
1899 | p++; |
1900 | len--; |
1901 | if (len) |
1902 | db_printf(","); |
1903 | } |
1904 | db_printf("]\n"); |
1905 | } |
1906 | |
1907 | void |
1908 | db_print_ifa(struct ifaddr *ifa) |
1909 | { |
1910 | if (ifa == NULL((void *)0)) |
1911 | return; |
1912 | db_printf(" ifa_addr="); |
1913 | db_print_sa(ifa->ifa_addr); |
1914 | db_printf(" ifa_dsta="); |
1915 | db_print_sa(ifa->ifa_dstaddr); |
1916 | db_printf(" ifa_mask="); |
1917 | db_print_sa(ifa->ifa_netmask); |
1918 | db_printf(" flags=0x%x, refcnt=%u, metric=%d\n", |
1919 | ifa->ifa_flags, ifa->ifa_refcnt.r_refs, ifa->ifa_metric); |
1920 | } |
1921 | |
1922 | /* |
1923 | * Function to pass to rtable_walk(). |
1924 | * Return non-zero error to abort walk. |
1925 | */ |
1926 | int |
1927 | db_show_rtentry(struct rtentry *rt, void *w, unsigned int id) |
1928 | { |
1929 | db_printf("rtentry=%p", rt); |
1930 | |
1931 | db_printf(" flags=0x%x refcnt=%u use=%llu expire=%lld\n", |
1932 | rt->rt_flags, rt->rt_refcnt.r_refs, rt->rt_usert_rmx.rmx_pksent, rt->rt_expirert_rmx.rmx_expire); |
1933 | |
1934 | db_printf(" key="); db_print_sa(rt_key(rt)((rt)->rt_dest)); |
1935 | db_printf(" plen=%d", rt_plen(rt)((rt)->rt_plen)); |
1936 | db_printf(" gw="); db_print_sa(rt->rt_gateway); |
1937 | db_printf(" ifidx=%u ", rt->rt_ifidx); |
1938 | db_printf(" ifa=%p\n", rt->rt_ifa); |
1939 | db_print_ifa(rt->rt_ifa); |
1940 | |
1941 | db_printf(" gwroute=%p llinfo=%p priority=%d\n", |
1942 | rt->rt_gwrouteRT_gw._nh, rt->rt_llinfo, rt->rt_priority); |
1943 | return (0); |
1944 | } |
1945 | |
1946 | /* |
1947 | * Function to print all the route trees. |
1948 | */ |
1949 | int |
1950 | db_show_rtable(int af, unsigned int rtableid) |
1951 | { |
1952 | db_printf("Route tree for af %d, rtableid %u\n", af, rtableid); |
1953 | rtable_walk(rtableid, af, NULL((void *)0), db_show_rtentry, NULL((void *)0)); |
1954 | return (0); |
1955 | } |
1956 | #endif /* DDB */ |