Bug Summary

File:net/route.c
Warning:line 1560, column 18
Result of 'malloc' is converted to a pointer of type 'char', which is incompatible with sizeof operand type 'struct rt_mpls'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name route.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/net/route.c
1/* $OpenBSD: route.c,v 1.400 2022/01/02 22:36:04 jsg Exp $ */
2/* $NetBSD: route.c,v 1.14 1996/02/13 22:00:46 christos Exp $ */
3
4/*
5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33/*
34 * Copyright (c) 1980, 1986, 1991, 1993
35 * The Regents of the University of California. All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)route.c 8.2 (Berkeley) 11/15/93
62 */
63
64/*
65 * @(#)COPYRIGHT 1.1 (NRL) 17 January 1995
66 *
67 * NRL grants permission for redistribution and use in source and binary
68 * forms, with or without modification, of the software and documentation
69 * created at NRL provided that the following conditions are met:
70 *
71 * 1. Redistributions of source code must retain the above copyright
72 * notice, this list of conditions and the following disclaimer.
73 * 2. Redistributions in binary form must reproduce the above copyright
74 * notice, this list of conditions and the following disclaimer in the
75 * documentation and/or other materials provided with the distribution.
76 * 3. All advertising materials mentioning features or use of this software
77 * must display the following acknowledgements:
78 * This product includes software developed by the University of
79 * California, Berkeley and its contributors.
80 * This product includes software developed at the Information
81 * Technology Division, US Naval Research Laboratory.
82 * 4. Neither the name of the NRL nor the names of its contributors
83 * may be used to endorse or promote products derived from this software
84 * without specific prior written permission.
85 *
86 * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
87 * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
88 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
89 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NRL OR
90 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
91 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
92 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
93 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
94 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
95 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
96 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97 *
98 * The views and conclusions contained in the software and documentation
99 * are those of the authors and should not be interpreted as representing
100 * official policies, either expressed or implied, of the US Naval
101 * Research Laboratory (NRL).
102 */
103
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/mbuf.h>
107#include <sys/socket.h>
108#include <sys/socketvar.h>
109#include <sys/timeout.h>
110#include <sys/domain.h>
111#include <sys/protosw.h>
112#include <sys/ioctl.h>
113#include <sys/kernel.h>
114#include <sys/queue.h>
115#include <sys/pool.h>
116#include <sys/atomic.h>
117
118#include <net/if.h>
119#include <net/if_var.h>
120#include <net/if_dl.h>
121#include <net/route.h>
122
123#include <netinet/in.h>
124#include <netinet/ip_var.h>
125#include <netinet/in_var.h>
126
127#ifdef INET61
128#include <netinet/ip6.h>
129#include <netinet6/ip6_var.h>
130#include <netinet6/in6_var.h>
131#endif
132
133#ifdef MPLS1
134#include <netmpls/mpls.h>
135#endif
136
137#ifdef BFD
138#include <net/bfd.h>
139#endif
140
141#define ROUNDUP(a)(a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long
))
(a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
142
143/* Give some jitter to hash, to avoid synchronization between routers. */
144static uint32_t rt_hashjitter;
145
146extern unsigned int rtmap_limit;
147
148struct cpumem * rtcounters;
149int rttrash; /* routes not in table but not freed */
150int ifatrash; /* ifas not in ifp list but not free */
151
152struct pool rtentry_pool; /* pool for rtentry structures */
153struct pool rttimer_pool; /* pool for rttimer structures */
154
155void rt_timer_init(void);
156int rt_setgwroute(struct rtentry *, u_int);
157void rt_putgwroute(struct rtentry *);
158int rtflushclone1(struct rtentry *, void *, u_int);
159int rtflushclone(struct rtentry *, unsigned int);
160int rt_ifa_purge_walker(struct rtentry *, void *, unsigned int);
161struct rtentry *rt_match(struct sockaddr *, uint32_t *, int, unsigned int);
162int rt_clone(struct rtentry **, struct sockaddr *, unsigned int);
163struct sockaddr *rt_plentosa(sa_family_t, int, struct sockaddr_in6 *);
164static int rt_copysa(struct sockaddr *, struct sockaddr *, struct sockaddr **);
165
166#ifdef DDB1
167void db_print_sa(struct sockaddr *);
168void db_print_ifa(struct ifaddr *);
169int db_show_rtentry(struct rtentry *, void *, unsigned int);
170#endif
171
172#define LABELID_MAX50000 50000
173
174struct rt_label {
175 TAILQ_ENTRY(rt_label)struct { struct rt_label *tqe_next; struct rt_label **tqe_prev
; }
rtl_entry;
176 char rtl_name[RTLABEL_LEN32];
177 u_int16_t rtl_id;
178 int rtl_ref;
179};
180
181TAILQ_HEAD(rt_labels, rt_label)struct rt_labels { struct rt_label *tqh_first; struct rt_label
**tqh_last; }
rt_labels = TAILQ_HEAD_INITIALIZER(rt_labels){ ((void *)0), &(rt_labels).tqh_first };
182
183void
184route_init(void)
185{
186 rtcounters = counters_alloc(rts_ncounters);
187
188 pool_init(&rtentry_pool, sizeof(struct rtentry), 0, IPL_SOFTNET0x5, 0,
189 "rtentry", NULL((void *)0));
190
191 while (rt_hashjitter == 0)
192 rt_hashjitter = arc4random();
193
194#ifdef BFD
195 bfdinit();
196#endif
197}
198
199/*
200 * Returns 1 if the (cached) ``rt'' entry is still valid, 0 otherwise.
201 */
202int
203rtisvalid(struct rtentry *rt)
204{
205 if (rt == NULL((void *)0))
206 return (0);
207
208 if (!ISSET(rt->rt_flags, RTF_UP)((rt->rt_flags) & (0x1)))
209 return (0);
210
211 if (ISSET(rt->rt_flags, RTF_GATEWAY)((rt->rt_flags) & (0x2))) {
212 KASSERT(rt->rt_gwroute != NULL)((rt->RT_gw._nh != ((void *)0)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/route.c", 212, "rt->rt_gwroute != NULL"
))
;
213 KASSERT(!ISSET(rt->rt_gwroute->rt_flags, RTF_GATEWAY))((!((rt->RT_gw._nh->rt_flags) & (0x2))) ? (void)0 :
__assert("diagnostic ", "/usr/src/sys/net/route.c", 213, "!ISSET(rt->rt_gwroute->rt_flags, RTF_GATEWAY)"
))
;
214 if (!ISSET(rt->rt_gwroute->rt_flags, RTF_UP)((rt->RT_gw._nh->rt_flags) & (0x1)))
215 return (0);
216 }
217
218 return (1);
219}
220
221/*
222 * Do the actual lookup for rtalloc(9), do not use directly!
223 *
224 * Return the best matching entry for the destination ``dst''.
225 *
226 * "RT_RESOLVE" means that a corresponding L2 entry should
227 * be added to the routing table and resolved (via ARP or
228 * NDP), if it does not exist.
229 */
230struct rtentry *
231rt_match(struct sockaddr *dst, uint32_t *src, int flags, unsigned int tableid)
232{
233 struct rtentry *rt = NULL((void *)0);
234
235 rt = rtable_match(tableid, dst, src);
236 if (rt == NULL((void *)0)) {
237 rtstat_inc(rts_unreach);
238 return (NULL((void *)0));
239 }
240
241 if (ISSET(rt->rt_flags, RTF_CLONING)((rt->rt_flags) & (0x100)) && ISSET(flags, RT_RESOLVE)((flags) & (1)))
242 rt_clone(&rt, dst, tableid);
243
244 rt->rt_usert_rmx.rmx_pksent++;
245 return (rt);
246}
247
248int
249rt_clone(struct rtentry **rtp, struct sockaddr *dst, unsigned int rtableid)
250{
251 struct rt_addrinfo info;
252 struct rtentry *rt = *rtp;
253 int error = 0;
254
255 memset(&info, 0, sizeof(info))__builtin_memset((&info), (0), (sizeof(info)));
256 info.rti_info[RTAX_DST0] = dst;
257
258 /*
259 * The priority of cloned route should be different
260 * to avoid conflict with /32 cloning routes.
261 *
262 * It should also be higher to let the ARP layer find
263 * cloned routes instead of the cloning one.
264 */
265 KERNEL_LOCK()_kernel_lock();
266 error = rtrequest(RTM_RESOLVE0xb, &info, rt->rt_priority - 1, &rt,
267 rtableid);
268 KERNEL_UNLOCK()_kernel_unlock();
269 if (error) {
270 rtm_miss(RTM_MISS0x7, &info, 0, RTP_NONE0, 0, error, rtableid);
271 } else {
272 /* Inform listeners of the new route */
273 rtm_send(rt, RTM_ADD0x1, 0, rtableid);
274 rtfree(*rtp);
275 *rtp = rt;
276 }
277 return (error);
278}
279
280/*
281 * Originated from bridge_hash() in if_bridge.c
282 */
283#define mix(a, b, c)do { a -= b; a -= c; a ^= (c >> 13); b -= c; b -= a; b ^=
(a << 8); c -= a; c -= b; c ^= (b >> 13); a -= b
; a -= c; a ^= (c >> 12); b -= c; b -= a; b ^= (a <<
16); c -= a; c -= b; c ^= (b >> 5); a -= b; a -= c; a ^=
(c >> 3); b -= c; b -= a; b ^= (a << 10); c -= a
; c -= b; c ^= (b >> 15); } while (0)
do { \
284 a -= b; a -= c; a ^= (c >> 13); \
285 b -= c; b -= a; b ^= (a << 8); \
286 c -= a; c -= b; c ^= (b >> 13); \
287 a -= b; a -= c; a ^= (c >> 12); \
288 b -= c; b -= a; b ^= (a << 16); \
289 c -= a; c -= b; c ^= (b >> 5); \
290 a -= b; a -= c; a ^= (c >> 3); \
291 b -= c; b -= a; b ^= (a << 10); \
292 c -= a; c -= b; c ^= (b >> 15); \
293} while (0)
294
295int
296rt_hash(struct rtentry *rt, struct sockaddr *dst, uint32_t *src)
297{
298 uint32_t a, b, c;
299
300 if (src == NULL((void *)0) || !rtisvalid(rt) || !ISSET(rt->rt_flags, RTF_MPATH)((rt->rt_flags) & (0x40000)))
301 return (-1);
302
303 a = b = 0x9e3779b9;
304 c = rt_hashjitter;
305
306 switch (dst->sa_family) {
307 case AF_INET2:
308 {
309 struct sockaddr_in *sin;
310
311 if (!ipmultipath)
312 return (-1);
313
314 sin = satosin(dst);
315 a += sin->sin_addr.s_addr;
316 b += src[0];
317 mix(a, b, c)do { a -= b; a -= c; a ^= (c >> 13); b -= c; b -= a; b ^=
(a << 8); c -= a; c -= b; c ^= (b >> 13); a -= b
; a -= c; a ^= (c >> 12); b -= c; b -= a; b ^= (a <<
16); c -= a; c -= b; c ^= (b >> 5); a -= b; a -= c; a ^=
(c >> 3); b -= c; b -= a; b ^= (a << 10); c -= a
; c -= b; c ^= (b >> 15); } while (0)
;
318 break;
319 }
320#ifdef INET61
321 case AF_INET624:
322 {
323 struct sockaddr_in6 *sin6;
324
325 if (!ip6_multipath)
326 return (-1);
327
328 sin6 = satosin6(dst);
329 a += sin6->sin6_addr.s6_addr32__u6_addr.__u6_addr32[0];
330 b += sin6->sin6_addr.s6_addr32__u6_addr.__u6_addr32[2];
331 c += src[0];
332 mix(a, b, c)do { a -= b; a -= c; a ^= (c >> 13); b -= c; b -= a; b ^=
(a << 8); c -= a; c -= b; c ^= (b >> 13); a -= b
; a -= c; a ^= (c >> 12); b -= c; b -= a; b ^= (a <<
16); c -= a; c -= b; c ^= (b >> 5); a -= b; a -= c; a ^=
(c >> 3); b -= c; b -= a; b ^= (a << 10); c -= a
; c -= b; c ^= (b >> 15); } while (0)
;
333 a += sin6->sin6_addr.s6_addr32__u6_addr.__u6_addr32[1];
334 b += sin6->sin6_addr.s6_addr32__u6_addr.__u6_addr32[3];
335 c += src[1];
336 mix(a, b, c)do { a -= b; a -= c; a ^= (c >> 13); b -= c; b -= a; b ^=
(a << 8); c -= a; c -= b; c ^= (b >> 13); a -= b
; a -= c; a ^= (c >> 12); b -= c; b -= a; b ^= (a <<
16); c -= a; c -= b; c ^= (b >> 5); a -= b; a -= c; a ^=
(c >> 3); b -= c; b -= a; b ^= (a << 10); c -= a
; c -= b; c ^= (b >> 15); } while (0)
;
337 a += sin6->sin6_addr.s6_addr32__u6_addr.__u6_addr32[2];
338 b += sin6->sin6_addr.s6_addr32__u6_addr.__u6_addr32[1];
339 c += src[2];
340 mix(a, b, c)do { a -= b; a -= c; a ^= (c >> 13); b -= c; b -= a; b ^=
(a << 8); c -= a; c -= b; c ^= (b >> 13); a -= b
; a -= c; a ^= (c >> 12); b -= c; b -= a; b ^= (a <<
16); c -= a; c -= b; c ^= (b >> 5); a -= b; a -= c; a ^=
(c >> 3); b -= c; b -= a; b ^= (a << 10); c -= a
; c -= b; c ^= (b >> 15); } while (0)
;
341 a += sin6->sin6_addr.s6_addr32__u6_addr.__u6_addr32[3];
342 b += sin6->sin6_addr.s6_addr32__u6_addr.__u6_addr32[0];
343 c += src[3];
344 mix(a, b, c)do { a -= b; a -= c; a ^= (c >> 13); b -= c; b -= a; b ^=
(a << 8); c -= a; c -= b; c ^= (b >> 13); a -= b
; a -= c; a ^= (c >> 12); b -= c; b -= a; b ^= (a <<
16); c -= a; c -= b; c ^= (b >> 5); a -= b; a -= c; a ^=
(c >> 3); b -= c; b -= a; b ^= (a << 10); c -= a
; c -= b; c ^= (b >> 15); } while (0)
;
345 break;
346 }
347#endif /* INET6 */
348 }
349
350 return (c & 0xffff);
351}
352
353/*
354 * Allocate a route, potentially using multipath to select the peer.
355 */
356struct rtentry *
357rtalloc_mpath(struct sockaddr *dst, uint32_t *src, unsigned int rtableid)
358{
359 return (rt_match(dst, src, RT_RESOLVE1, rtableid));
360}
361
362/*
363 * Look in the routing table for the best matching entry for
364 * ``dst''.
365 *
366 * If a route with a gateway is found and its next hop is no
367 * longer valid, try to cache it.
368 */
369struct rtentry *
370rtalloc(struct sockaddr *dst, int flags, unsigned int rtableid)
371{
372 return (rt_match(dst, NULL((void *)0), flags, rtableid));
373}
374
375/*
376 * Cache the route entry corresponding to a reachable next hop in
377 * the gateway entry ``rt''.
378 */
379int
380rt_setgwroute(struct rtentry *rt, u_int rtableid)
381{
382 struct rtentry *prt, *nhrt;
383 unsigned int rdomain = rtable_l2(rtableid);
384 int error;
385
386 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
387
388 KASSERT(ISSET(rt->rt_flags, RTF_GATEWAY))((((rt->rt_flags) & (0x2))) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/route.c", 388, "ISSET(rt->rt_flags, RTF_GATEWAY)"
))
;
389
390 /* If we cannot find a valid next hop bail. */
391 nhrt = rt_match(rt->rt_gateway, NULL((void *)0), RT_RESOLVE1, rdomain);
392 if (nhrt == NULL((void *)0))
393 return (ENOENT2);
394
395 /* Next hop entry must be on the same interface. */
396 if (nhrt->rt_ifidx != rt->rt_ifidx) {
397 struct sockaddr_in6 sa_mask;
398
399 if (!ISSET(nhrt->rt_flags, RTF_LLINFO)((nhrt->rt_flags) & (0x400)) ||
400 !ISSET(nhrt->rt_flags, RTF_CLONED)((nhrt->rt_flags) & (0x10000))) {
401 rtfree(nhrt);
402 return (EHOSTUNREACH65);
403 }
404
405 /*
406 * We found a L2 entry, so we might have multiple
407 * RTF_CLONING routes for the same subnet. Query
408 * the first route of the multipath chain and iterate
409 * until we find the correct one.
410 */
411 prt = rtable_lookup(rdomain, rt_key(nhrt->rt_parent)((nhrt->rt_parent)->rt_dest),
412 rt_plen2mask(nhrt->rt_parent, &sa_mask), NULL((void *)0), RTP_ANY64);
413 rtfree(nhrt);
414
415 while (prt != NULL((void *)0) && prt->rt_ifidx != rt->rt_ifidx)
416 prt = rtable_iterate(prt);
417
418 /* We found nothing or a non-cloning MPATH route. */
419 if (prt == NULL((void *)0) || !ISSET(prt->rt_flags, RTF_CLONING)((prt->rt_flags) & (0x100))) {
420 rtfree(prt);
421 return (EHOSTUNREACH65);
422 }
423
424 error = rt_clone(&prt, rt->rt_gateway, rdomain);
425 if (error) {
426 rtfree(prt);
427 return (error);
428 }
429 nhrt = prt;
430 }
431
432 /*
433 * Next hop must be reachable, this also prevents rtentry
434 * loops for example when rt->rt_gwroute points to rt.
435 */
436 if (ISSET(nhrt->rt_flags, RTF_CLONING|RTF_GATEWAY)((nhrt->rt_flags) & (0x100|0x2))) {
437 rtfree(nhrt);
438 return (ENETUNREACH51);
439 }
440
441 /* Next hop is valid so remove possible old cache. */
442 rt_putgwroute(rt);
443 KASSERT(rt->rt_gwroute == NULL)((rt->RT_gw._nh == ((void *)0)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/route.c", 443, "rt->rt_gwroute == NULL"
))
;
444
445 /*
446 * If the MTU of next hop is 0, this will reset the MTU of the
447 * route to run PMTUD again from scratch.
448 */
449 if (!ISSET(rt->rt_locks, RTV_MTU)((rt->rt_rmx.rmx_locks) & (0x1)) && (rt->rt_mturt_rmx.rmx_mtu > nhrt->rt_mturt_rmx.rmx_mtu))
450 rt->rt_mturt_rmx.rmx_mtu = nhrt->rt_mturt_rmx.rmx_mtu;
451
452 /*
453 * To avoid reference counting problems when writing link-layer
454 * addresses in an outgoing packet, we ensure that the lifetime
455 * of a cached entry is greater than the bigger lifetime of the
456 * gateway entries it is pointed by.
457 */
458 nhrt->rt_flags |= RTF_CACHED0x20000;
459 nhrt->rt_cachecntRT_gw._ref++;
460
461 rt->rt_gwrouteRT_gw._nh = nhrt;
462
463 return (0);
464}
465
466/*
467 * Invalidate the cached route entry of the gateway entry ``rt''.
468 */
469void
470rt_putgwroute(struct rtentry *rt)
471{
472 struct rtentry *nhrt = rt->rt_gwrouteRT_gw._nh;
473
474 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
475
476 if (!ISSET(rt->rt_flags, RTF_GATEWAY)((rt->rt_flags) & (0x2)) || nhrt == NULL((void *)0))
477 return;
478
479 KASSERT(ISSET(nhrt->rt_flags, RTF_CACHED))((((nhrt->rt_flags) & (0x20000))) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/net/route.c", 479, "ISSET(nhrt->rt_flags, RTF_CACHED)"
))
;
480 KASSERT(nhrt->rt_cachecnt > 0)((nhrt->RT_gw._ref > 0) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/route.c", 480, "nhrt->rt_cachecnt > 0"
))
;
481
482 --nhrt->rt_cachecntRT_gw._ref;
483 if (nhrt->rt_cachecntRT_gw._ref == 0)
484 nhrt->rt_flags &= ~RTF_CACHED0x20000;
485
486 rtfree(rt->rt_gwrouteRT_gw._nh);
487 rt->rt_gwrouteRT_gw._nh = NULL((void *)0);
488}
489
490void
491rtref(struct rtentry *rt)
492{
493 atomic_inc_int(&rt->rt_refcnt)_atomic_inc_int(&rt->rt_refcnt);
494}
495
496void
497rtfree(struct rtentry *rt)
498{
499 int refcnt;
500
501 if (rt == NULL((void *)0))
502 return;
503
504 refcnt = (int)atomic_dec_int_nv(&rt->rt_refcnt)_atomic_sub_int_nv((&rt->rt_refcnt), 1);
505 if (refcnt <= 0) {
506 KASSERT(!ISSET(rt->rt_flags, RTF_UP))((!((rt->rt_flags) & (0x1))) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/route.c", 506, "!ISSET(rt->rt_flags, RTF_UP)"
))
;
507 KASSERT(!RT_ROOT(rt))((!(0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/route.c"
, 507, "!RT_ROOT(rt)"))
;
508 atomic_dec_int(&rttrash)_atomic_dec_int(&rttrash);
509 if (refcnt < 0) {
510 printf("rtfree: %p not freed (neg refs)\n", rt);
511 return;
512 }
513
514 KERNEL_LOCK()_kernel_lock();
515 rt_timer_remove_all(rt);
516 ifafree(rt->rt_ifa);
517 rtlabel_unref(rt->rt_labelid);
518#ifdef MPLS1
519 rt_mpls_clear(rt);
520#endif
521 free(rt->rt_gateway, M_RTABLE5, ROUNDUP(rt->rt_gateway->sa_len)(rt->rt_gateway->sa_len>0 ? (1 + (((rt->rt_gateway
->sa_len) - 1) | (sizeof(long) - 1))) : sizeof(long))
);
522 free(rt_key(rt)((rt)->rt_dest), M_RTABLE5, rt_key(rt)((rt)->rt_dest)->sa_len);
523 KERNEL_UNLOCK()_kernel_unlock();
524
525 pool_put(&rtentry_pool, rt);
526 }
527}
528
529void
530ifafree(struct ifaddr *ifa)
531{
532 if (ifa == NULL((void *)0))
533 panic("ifafree");
534 if (ifa->ifa_refcnt == 0) {
535 ifatrash--;
536 free(ifa, M_IFADDR9, 0);
537 } else
538 ifa->ifa_refcnt--;
539}
540
541/*
542 * Force a routing table entry to the specified
543 * destination to go through the given gateway.
544 * Normally called as a result of a routing redirect
545 * message from the network layer.
546 */
547void
548rtredirect(struct sockaddr *dst, struct sockaddr *gateway,
549 struct sockaddr *src, struct rtentry **rtp, unsigned int rdomain)
550{
551 struct rtentry *rt;
552 int error = 0;
553 enum rtstat_counters stat = rts_ncounters;
554 struct rt_addrinfo info;
555 struct ifaddr *ifa;
556 unsigned int ifidx = 0;
557 int flags = RTF_GATEWAY0x2|RTF_HOST0x4;
558 uint8_t prio = RTP_NONE0;
559
560 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
561
562 /* verify the gateway is directly reachable */
563 rt = rtalloc(gateway, 0, rdomain);
564 if (!rtisvalid(rt) || ISSET(rt->rt_flags, RTF_GATEWAY)((rt->rt_flags) & (0x2))) {
565 rtfree(rt);
566 error = ENETUNREACH51;
567 goto out;
568 }
569 ifidx = rt->rt_ifidx;
570 ifa = rt->rt_ifa;
571 rtfree(rt);
572 rt = NULL((void *)0);
573
574 rt = rtable_lookup(rdomain, dst, NULL((void *)0), NULL((void *)0), RTP_ANY64);
575 /*
576 * If the redirect isn't from our current router for this dst,
577 * it's either old or wrong. If it redirects us to ourselves,
578 * we have a routing loop, perhaps as a result of an interface
579 * going down recently.
580 */
581#define equal(a1, a2)((a1)->sa_len == (a2)->sa_len && bcmp((caddr_t)
(a1), (caddr_t)(a2), (a1)->sa_len) == 0)
\
582 ((a1)->sa_len == (a2)->sa_len && \
583 bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0)
584 if (rt != NULL((void *)0) && (!equal(src, rt->rt_gateway)((src)->sa_len == (rt->rt_gateway)->sa_len &&
bcmp((caddr_t)(src), (caddr_t)(rt->rt_gateway), (src)->
sa_len) == 0)
|| rt->rt_ifa != ifa))
585 error = EINVAL22;
586 else if (ifa_ifwithaddr(gateway, rdomain) != NULL((void *)0) ||
587 (gateway->sa_family = AF_INET2 &&
588 in_broadcast(satosin(gateway)->sin_addr, rdomain)))
589 error = EHOSTUNREACH65;
590 if (error)
591 goto done;
592 /*
593 * Create a new entry if we just got back a wildcard entry
594 * or the lookup failed. This is necessary for hosts
595 * which use routing redirects generated by smart gateways
596 * to dynamically build the routing tables.
597 */
598 if (rt == NULL((void *)0))
599 goto create;
600 /*
601 * Don't listen to the redirect if it's
602 * for a route to an interface.
603 */
604 if (ISSET(rt->rt_flags, RTF_GATEWAY)((rt->rt_flags) & (0x2))) {
605 if (!ISSET(rt->rt_flags, RTF_HOST)((rt->rt_flags) & (0x4))) {
606 /*
607 * Changing from route to net => route to host.
608 * Create new route, rather than smashing route to net.
609 */
610create:
611 rtfree(rt);
612 flags |= RTF_DYNAMIC0x10;
613 bzero(&info, sizeof(info))__builtin_bzero((&info), (sizeof(info)));
614 info.rti_info[RTAX_DST0] = dst;
615 info.rti_info[RTAX_GATEWAY1] = gateway;
616 info.rti_ifa = ifa;
617 info.rti_flags = flags;
618 rt = NULL((void *)0);
619 error = rtrequest(RTM_ADD0x1, &info, RTP_DEFAULT56, &rt,
620 rdomain);
621 if (error == 0) {
622 flags = rt->rt_flags;
623 prio = rt->rt_priority;
624 }
625 stat = rts_dynamic;
626 } else {
627 /*
628 * Smash the current notion of the gateway to
629 * this destination. Should check about netmask!!!
630 */
631 rt->rt_flags |= RTF_MODIFIED0x20;
632 flags |= RTF_MODIFIED0x20;
633 prio = rt->rt_priority;
634 stat = rts_newgateway;
635 rt_setgate(rt, gateway, rdomain);
636 }
637 } else
638 error = EHOSTUNREACH65;
639done:
640 if (rt) {
641 if (rtp && !error)
642 *rtp = rt;
643 else
644 rtfree(rt);
645 }
646out:
647 if (error)
648 rtstat_inc(rts_badredirect);
649 else if (stat != rts_ncounters)
650 rtstat_inc(stat);
651 bzero((caddr_t)&info, sizeof(info))__builtin_bzero(((caddr_t)&info), (sizeof(info)));
652 info.rti_info[RTAX_DST0] = dst;
653 info.rti_info[RTAX_GATEWAY1] = gateway;
654 info.rti_info[RTAX_AUTHOR6] = src;
655 rtm_miss(RTM_REDIRECT0x6, &info, flags, prio, ifidx, error, rdomain);
656}
657
658/*
659 * Delete a route and generate a message
660 */
661int
662rtdeletemsg(struct rtentry *rt, struct ifnet *ifp, u_int tableid)
663{
664 int error;
665 struct rt_addrinfo info;
666 struct sockaddr_rtlabel sa_rl;
667 struct sockaddr_in6 sa_mask;
668
669 KASSERT(rt->rt_ifidx == ifp->if_index)((rt->rt_ifidx == ifp->if_index) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/route.c", 669, "rt->rt_ifidx == ifp->if_index"
))
;
670
671 /*
672 * Request the new route so that the entry is not actually
673 * deleted. That will allow the information being reported to
674 * be accurate (and consistent with route_output()).
675 */
676 memset(&info, 0, sizeof(info))__builtin_memset((&info), (0), (sizeof(info)));
677 info.rti_info[RTAX_DST0] = rt_key(rt)((rt)->rt_dest);
678 info.rti_info[RTAX_GATEWAY1] = rt->rt_gateway;
679 if (!ISSET(rt->rt_flags, RTF_HOST)((rt->rt_flags) & (0x4)))
680 info.rti_info[RTAX_NETMASK2] = rt_plen2mask(rt, &sa_mask);
681 info.rti_info[RTAX_LABEL10] = rtlabel_id2sa(rt->rt_labelid, &sa_rl);
682 info.rti_flags = rt->rt_flags;
683 info.rti_info[RTAX_IFP4] = sdltosa(ifp->if_sadl);
684 info.rti_info[RTAX_IFA5] = rt->rt_ifa->ifa_addr;
685 error = rtrequest_delete(&info, rt->rt_priority, ifp, &rt, tableid);
686 rtm_miss(RTM_DELETE0x2, &info, info.rti_flags, rt->rt_priority,
687 rt->rt_ifidx, error, tableid);
688 if (error == 0)
689 rtfree(rt);
690 return (error);
691}
692
693static inline int
694rtequal(struct rtentry *a, struct rtentry *b)
695{
696 if (a == b)
697 return 1;
698
699 if (memcmp(rt_key(a), rt_key(b), rt_key(a)->sa_len)__builtin_memcmp((((a)->rt_dest)), (((b)->rt_dest)), ((
(a)->rt_dest)->sa_len))
== 0 &&
700 rt_plen(a)((a)->rt_plen) == rt_plen(b)((b)->rt_plen))
701 return 1;
702 else
703 return 0;
704}
705
706int
707rtflushclone1(struct rtentry *rt, void *arg, u_int id)
708{
709 struct rtentry *cloningrt = arg;
710 struct ifnet *ifp;
711
712 if (!ISSET(rt->rt_flags, RTF_CLONED)((rt->rt_flags) & (0x10000)))
713 return 0;
714
715 /* Cached route must stay alive as long as their parent are alive. */
716 if (ISSET(rt->rt_flags, RTF_CACHED)((rt->rt_flags) & (0x20000)) && (rt->rt_parent != cloningrt))
717 return 0;
718
719 if (!rtequal(rt->rt_parent, cloningrt))
720 return 0;
721 /*
722 * This happens when an interface with a RTF_CLONING route is
723 * being detached. In this case it's safe to bail because all
724 * the routes are being purged by rt_ifa_purge().
725 */
726 ifp = if_get(rt->rt_ifidx);
727 if (ifp == NULL((void *)0))
728 return 0;
729
730 if_put(ifp);
731 return EEXIST17;
732}
733
734int
735rtflushclone(struct rtentry *parent, unsigned int rtableid)
736{
737 struct rtentry *rt = NULL((void *)0);
738 struct ifnet *ifp;
739 int error;
740
741#ifdef DIAGNOSTIC1
742 if (!parent || (parent->rt_flags & RTF_CLONING0x100) == 0)
743 panic("rtflushclone: called with a non-cloning route");
744#endif
745
746 do {
747 error = rtable_walk(rtableid, rt_key(parent)((parent)->rt_dest)->sa_family, &rt,
748 rtflushclone1, parent);
749 if (rt != NULL((void *)0) && error == EEXIST17) {
750 ifp = if_get(rt->rt_ifidx);
751 if (ifp == NULL((void *)0)) {
752 error = EAGAIN35;
753 } else {
754 error = rtdeletemsg(rt, ifp, rtableid);
755 if (error == 0)
756 error = EAGAIN35;
757 if_put(ifp);
758 }
759 }
760 rtfree(rt);
761 rt = NULL((void *)0);
762 } while (error == EAGAIN35);
763
764 return error;
765
766}
767
768int
769rtrequest_delete(struct rt_addrinfo *info, u_int8_t prio, struct ifnet *ifp,
770 struct rtentry **ret_nrt, u_int tableid)
771{
772 struct rtentry *rt;
773 int error;
774
775 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
776
777 if (!rtable_exists(tableid))
778 return (EAFNOSUPPORT47);
779 rt = rtable_lookup(tableid, info->rti_info[RTAX_DST0],
780 info->rti_info[RTAX_NETMASK2], info->rti_info[RTAX_GATEWAY1], prio);
781 if (rt == NULL((void *)0))
782 return (ESRCH3);
783
784 /* Make sure that's the route the caller want to delete. */
785 if (ifp != NULL((void *)0) && ifp->if_index != rt->rt_ifidx) {
786 rtfree(rt);
787 return (ESRCH3);
788 }
789
790#ifdef BFD
791 if (ISSET(rt->rt_flags, RTF_BFD)((rt->rt_flags) & (0x1000000)))
792 bfdclear(rt);
793#endif
794
795 error = rtable_delete(tableid, info->rti_info[RTAX_DST0],
796 info->rti_info[RTAX_NETMASK2], rt);
797 if (error != 0) {
798 rtfree(rt);
799 return (ESRCH3);
800 }
801
802 /* Release next hop cache before flushing cloned entries. */
803 rt_putgwroute(rt);
804
805 /* Clean up any cloned children. */
806 if (ISSET(rt->rt_flags, RTF_CLONING)((rt->rt_flags) & (0x100)))
807 rtflushclone(rt, tableid);
808
809 rtfree(rt->rt_parent);
810 rt->rt_parent = NULL((void *)0);
811
812 rt->rt_flags &= ~RTF_UP0x1;
813
814 KASSERT(ifp->if_index == rt->rt_ifidx)((ifp->if_index == rt->rt_ifidx) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/route.c", 814, "ifp->if_index == rt->rt_ifidx"
))
;
815 ifp->if_rtrequest(ifp, RTM_DELETE0x2, rt);
816
817 atomic_inc_int(&rttrash)_atomic_inc_int(&rttrash);
818
819 if (ret_nrt != NULL((void *)0))
820 *ret_nrt = rt;
821 else
822 rtfree(rt);
823
824 return (0);
825}
826
827int
828rtrequest(int req, struct rt_addrinfo *info, u_int8_t prio,
829 struct rtentry **ret_nrt, u_int tableid)
830{
831 struct ifnet *ifp;
832 struct rtentry *rt, *crt;
833 struct ifaddr *ifa;
834 struct sockaddr *ndst;
835 struct sockaddr_rtlabel *sa_rl, sa_rl2;
836 struct sockaddr_dl sa_dl = { sizeof(sa_dl), AF_LINK18 };
837 int error;
838
839 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
840
841 if (!rtable_exists(tableid))
842 return (EAFNOSUPPORT47);
843 if (info->rti_flags & RTF_HOST0x4)
844 info->rti_info[RTAX_NETMASK2] = NULL((void *)0);
845 switch (req) {
846 case RTM_DELETE0x2:
847 return (EINVAL22);
848
849 case RTM_RESOLVE0xb:
850 if (ret_nrt == NULL((void *)0) || (rt = *ret_nrt) == NULL((void *)0))
851 return (EINVAL22);
852 if ((rt->rt_flags & RTF_CLONING0x100) == 0)
853 return (EINVAL22);
854 KASSERT(rt->rt_ifa->ifa_ifp != NULL)((rt->rt_ifa->ifa_ifp != ((void *)0)) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/net/route.c", 854, "rt->rt_ifa->ifa_ifp != NULL"
))
;
855 info->rti_ifa = rt->rt_ifa;
856 info->rti_flags = rt->rt_flags | (RTF_CLONED0x10000|RTF_HOST0x4);
857 info->rti_flags &= ~(RTF_CLONING0x100|RTF_CONNECTED0x800000|RTF_STATIC0x800);
858 info->rti_info[RTAX_GATEWAY1] = sdltosa(&sa_dl);
859 info->rti_info[RTAX_LABEL10] =
860 rtlabel_id2sa(rt->rt_labelid, &sa_rl2);
861 /* FALLTHROUGH */
862
863 case RTM_ADD0x1:
864 if (info->rti_ifa == NULL((void *)0))
865 return (EINVAL22);
866 ifa = info->rti_ifa;
867 ifp = ifa->ifa_ifp;
868 if (prio == 0)
869 prio = ifp->if_priority + RTP_STATIC8;
870
871 error = rt_copysa(info->rti_info[RTAX_DST0],
872 info->rti_info[RTAX_NETMASK2], &ndst);
873 if (error)
874 return (error);
875
876 rt = pool_get(&rtentry_pool, PR_NOWAIT0x0002 | PR_ZERO0x0008);
877 if (rt == NULL((void *)0)) {
878 free(ndst, M_RTABLE5, ndst->sa_len);
879 return (ENOBUFS55);
880 }
881
882 rt->rt_refcnt = 1;
883 rt->rt_flags = info->rti_flags | RTF_UP0x1;
884 rt->rt_priority = prio; /* init routing priority */
885 LIST_INIT(&rt->rt_timer)do { ((&rt->rt_timer)->lh_first) = ((void *)0); } while
(0)
;
886
887 /* Check the link state if the table supports it. */
888 if (rtable_mpath_capable(tableid, ndst->sa_family) &&
889 !ISSET(rt->rt_flags, RTF_LOCAL)((rt->rt_flags) & (0x200000)) &&
890 (!LINK_STATE_IS_UP(ifp->if_link_state)((ifp->if_data.ifi_link_state) >= 4 || (ifp->if_data
.ifi_link_state) == 0)
||
891 !ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1)))) {
892 rt->rt_flags &= ~RTF_UP0x1;
893 rt->rt_priority |= RTP_DOWN0x80;
894 }
895
896 if (info->rti_info[RTAX_LABEL10] != NULL((void *)0)) {
897 sa_rl = (struct sockaddr_rtlabel *)
898 info->rti_info[RTAX_LABEL10];
899 rt->rt_labelid = rtlabel_name2id(sa_rl->sr_label);
900 }
901
902#ifdef MPLS1
903 /* We have to allocate additional space for MPLS infos */
904 if (info->rti_flags & RTF_MPLS0x100000 &&
905 (info->rti_info[RTAX_SRC8] != NULL((void *)0) ||
906 info->rti_info[RTAX_DST0]->sa_family == AF_MPLS33)) {
907 error = rt_mpls_set(rt, info->rti_info[RTAX_SRC8],
908 info->rti_mpls);
909 if (error) {
910 free(ndst, M_RTABLE5, ndst->sa_len);
911 pool_put(&rtentry_pool, rt);
912 return (error);
913 }
914 } else
915 rt_mpls_clear(rt);
916#endif
917
918 ifa->ifa_refcnt++;
919 rt->rt_ifa = ifa;
920 rt->rt_ifidx = ifp->if_index;
921 /*
922 * Copy metrics and a back pointer from the cloned
923 * route's parent.
924 */
925 if (ISSET(rt->rt_flags, RTF_CLONED)((rt->rt_flags) & (0x10000))) {
926 rtref(*ret_nrt);
927 rt->rt_parent = *ret_nrt;
928 rt->rt_rmx = (*ret_nrt)->rt_rmx;
929 }
930
931 /*
932 * We must set rt->rt_gateway before adding ``rt'' to
933 * the routing table because the radix MPATH code use
934 * it to (re)order routes.
935 */
936 if ((error = rt_setgate(rt, info->rti_info[RTAX_GATEWAY1],
937 tableid))) {
938 ifafree(ifa);
939 rtfree(rt->rt_parent);
940 rt_putgwroute(rt);
941 free(rt->rt_gateway, M_RTABLE5,
942 ROUNDUP(rt->rt_gateway->sa_len)(rt->rt_gateway->sa_len>0 ? (1 + (((rt->rt_gateway
->sa_len) - 1) | (sizeof(long) - 1))) : sizeof(long))
);
943 free(ndst, M_RTABLE5, ndst->sa_len);
944 pool_put(&rtentry_pool, rt);
945 return (error);
946 }
947
948 error = rtable_insert(tableid, ndst,
949 info->rti_info[RTAX_NETMASK2], info->rti_info[RTAX_GATEWAY1],
950 rt->rt_priority, rt);
951 if (error != 0 &&
952 (crt = rtable_match(tableid, ndst, NULL((void *)0))) != NULL((void *)0)) {
953 /* overwrite cloned route */
954 if (ISSET(crt->rt_flags, RTF_CLONED)((crt->rt_flags) & (0x10000)) &&
955 !ISSET(crt->rt_flags, RTF_CACHED)((crt->rt_flags) & (0x20000))) {
956 struct ifnet *cifp;
957
958 cifp = if_get(crt->rt_ifidx);
959 KASSERT(cifp != NULL)((cifp != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/route.c"
, 959, "cifp != NULL"))
;
960 rtdeletemsg(crt, cifp, tableid);
961 if_put(cifp);
962
963 error = rtable_insert(tableid, ndst,
964 info->rti_info[RTAX_NETMASK2],
965 info->rti_info[RTAX_GATEWAY1],
966 rt->rt_priority, rt);
967 }
968 rtfree(crt);
969 }
970 if (error != 0) {
971 ifafree(ifa);
972 rtfree(rt->rt_parent);
973 rt_putgwroute(rt);
974 free(rt->rt_gateway, M_RTABLE5,
975 ROUNDUP(rt->rt_gateway->sa_len)(rt->rt_gateway->sa_len>0 ? (1 + (((rt->rt_gateway
->sa_len) - 1) | (sizeof(long) - 1))) : sizeof(long))
);
976 free(ndst, M_RTABLE5, ndst->sa_len);
977 pool_put(&rtentry_pool, rt);
978 return (EEXIST17);
979 }
980 ifp->if_rtrequest(ifp, req, rt);
981
982 if_group_routechange(info->rti_info[RTAX_DST0],
983 info->rti_info[RTAX_NETMASK2]);
984
985 if (ret_nrt != NULL((void *)0))
986 *ret_nrt = rt;
987 else
988 rtfree(rt);
989 break;
990 }
991
992 return (0);
993}
994
995int
996rt_setgate(struct rtentry *rt, struct sockaddr *gate, u_int rtableid)
997{
998 int glen = ROUNDUP(gate->sa_len)(gate->sa_len>0 ? (1 + (((gate->sa_len) - 1) | (sizeof
(long) - 1))) : sizeof(long))
;
999 struct sockaddr *sa;
1000
1001 if (rt->rt_gateway == NULL((void *)0) || glen != ROUNDUP(rt->rt_gateway->sa_len)(rt->rt_gateway->sa_len>0 ? (1 + (((rt->rt_gateway
->sa_len) - 1) | (sizeof(long) - 1))) : sizeof(long))
) {
1002 sa = malloc(glen, M_RTABLE5, M_NOWAIT0x0002);
1003 if (sa == NULL((void *)0))
1004 return (ENOBUFS55);
1005 if (rt->rt_gateway != NULL((void *)0)) {
1006 free(rt->rt_gateway, M_RTABLE5,
1007 ROUNDUP(rt->rt_gateway->sa_len)(rt->rt_gateway->sa_len>0 ? (1 + (((rt->rt_gateway
->sa_len) - 1) | (sizeof(long) - 1))) : sizeof(long))
);
1008 }
1009 rt->rt_gateway = sa;
1010 }
1011 memmove(rt->rt_gateway, gate, glen)__builtin_memmove((rt->rt_gateway), (gate), (glen));
1012
1013 if (ISSET(rt->rt_flags, RTF_GATEWAY)((rt->rt_flags) & (0x2)))
1014 return (rt_setgwroute(rt, rtableid));
1015
1016 return (0);
1017}
1018
1019/*
1020 * Return the route entry containing the next hop link-layer
1021 * address corresponding to ``rt''.
1022 */
1023struct rtentry *
1024rt_getll(struct rtentry *rt)
1025{
1026 if (ISSET(rt->rt_flags, RTF_GATEWAY)((rt->rt_flags) & (0x2))) {
1027 KASSERT(rt->rt_gwroute != NULL)((rt->RT_gw._nh != ((void *)0)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/route.c", 1027, "rt->rt_gwroute != NULL"
))
;
1028 return (rt->rt_gwrouteRT_gw._nh);
1029 }
1030
1031 return (rt);
1032}
1033
1034void
1035rt_maskedcopy(struct sockaddr *src, struct sockaddr *dst,
1036 struct sockaddr *netmask)
1037{
1038 u_char *cp1 = (u_char *)src;
1039 u_char *cp2 = (u_char *)dst;
1040 u_char *cp3 = (u_char *)netmask;
1041 u_char *cplim = cp2 + *cp3;
1042 u_char *cplim2 = cp2 + *cp1;
1043
1044 *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
1045 cp3 += 2;
1046 if (cplim > cplim2)
1047 cplim = cplim2;
1048 while (cp2 < cplim)
1049 *cp2++ = *cp1++ & *cp3++;
1050 if (cp2 < cplim2)
1051 bzero(cp2, cplim2 - cp2)__builtin_bzero((cp2), (cplim2 - cp2));
1052}
1053
1054/*
1055 * allocate new sockaddr structure based on the user supplied src and mask
1056 * that is useable for the routing table.
1057 */
1058static int
1059rt_copysa(struct sockaddr *src, struct sockaddr *mask, struct sockaddr **dst)
1060{
1061 static const u_char maskarray[] = {
1062 0x0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe };
1063 struct sockaddr *ndst;
1064 const struct domain *dp;
1065 u_char *csrc, *cdst;
1066 int i, plen;
1067
1068 for (i = 0; (dp = domains[i]) != NULL((void *)0); i++) {
1069 if (dp->dom_rtoffset == 0)
1070 continue;
1071 if (src->sa_family == dp->dom_family)
1072 break;
1073 }
1074 if (dp == NULL((void *)0))
1075 return (EAFNOSUPPORT47);
1076
1077 if (src->sa_len < dp->dom_sasize)
1078 return (EINVAL22);
1079
1080 plen = rtable_satoplen(src->sa_family, mask);
1081 if (plen == -1)
1082 return (EINVAL22);
1083
1084 ndst = malloc(dp->dom_sasize, M_RTABLE5, M_NOWAIT0x0002|M_ZERO0x0008);
1085 if (ndst == NULL((void *)0))
1086 return (ENOBUFS55);
1087
1088 ndst->sa_family = src->sa_family;
1089 ndst->sa_len = dp->dom_sasize;
1090
1091 csrc = (u_char *)src + dp->dom_rtoffset;
1092 cdst = (u_char *)ndst + dp->dom_rtoffset;
1093
1094 memcpy(cdst, csrc, plen / 8)__builtin_memcpy((cdst), (csrc), (plen / 8));
1095 if (plen % 8 != 0)
1096 cdst[plen / 8] = csrc[plen / 8] & maskarray[plen % 8];
1097
1098 *dst = ndst;
1099 return (0);
1100}
1101
1102int
1103rt_ifa_add(struct ifaddr *ifa, int flags, struct sockaddr *dst,
1104 unsigned int rdomain)
1105{
1106 struct ifnet *ifp = ifa->ifa_ifp;
1107 struct rtentry *rt;
1108 struct sockaddr_rtlabel sa_rl;
1109 struct rt_addrinfo info;
1110 uint8_t prio = ifp->if_priority + RTP_STATIC8;
1111 int error;
1112
1113 KASSERT(rdomain == rtable_l2(rdomain))((rdomain == rtable_l2(rdomain)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/route.c", 1113, "rdomain == rtable_l2(rdomain)"
))
;
1114
1115 memset(&info, 0, sizeof(info))__builtin_memset((&info), (0), (sizeof(info)));
1116 info.rti_ifa = ifa;
1117 info.rti_flags = flags;
1118 info.rti_info[RTAX_DST0] = dst;
1119 if (flags & RTF_LLINFO0x400)
1120 info.rti_info[RTAX_GATEWAY1] = sdltosa(ifp->if_sadl);
1121 else
1122 info.rti_info[RTAX_GATEWAY1] = ifa->ifa_addr;
1123 info.rti_info[RTAX_LABEL10] = rtlabel_id2sa(ifp->if_rtlabelid, &sa_rl);
1124
1125#ifdef MPLS1
1126 if ((flags & RTF_MPLS0x100000) == RTF_MPLS0x100000)
1127 info.rti_mpls = MPLS_OP_POP0x1;
1128#endif /* MPLS */
1129
1130 if ((flags & RTF_HOST0x4) == 0)
1131 info.rti_info[RTAX_NETMASK2] = ifa->ifa_netmask;
1132
1133 if (flags & (RTF_LOCAL0x200000|RTF_BROADCAST0x400000))
1134 prio = RTP_LOCAL1;
1135
1136 if (flags & RTF_CONNECTED0x800000)
1137 prio = ifp->if_priority + RTP_CONNECTED4;
1138
1139 error = rtrequest(RTM_ADD0x1, &info, prio, &rt, rdomain);
1140 if (error == 0) {
1141 /*
1142 * A local route is created for every address configured
1143 * on an interface, so use this information to notify
1144 * userland that a new address has been added.
1145 */
1146 if (flags & RTF_LOCAL0x200000)
1147 rtm_addr(RTM_NEWADDR0xc, ifa);
1148 rtm_send(rt, RTM_ADD0x1, 0, rdomain);
1149 rtfree(rt);
1150 }
1151 return (error);
1152}
1153
1154int
1155rt_ifa_del(struct ifaddr *ifa, int flags, struct sockaddr *dst,
1156 unsigned int rdomain)
1157{
1158 struct ifnet *ifp = ifa->ifa_ifp;
1159 struct rtentry *rt;
1160 struct mbuf *m = NULL((void *)0);
1161 struct sockaddr *deldst;
1162 struct rt_addrinfo info;
1163 struct sockaddr_rtlabel sa_rl;
1164 uint8_t prio = ifp->if_priority + RTP_STATIC8;
1165 int error;
1166
1167 KASSERT(rdomain == rtable_l2(rdomain))((rdomain == rtable_l2(rdomain)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/route.c", 1167, "rdomain == rtable_l2(rdomain)"
))
;
1168
1169 if ((flags & RTF_HOST0x4) == 0 && ifa->ifa_netmask) {
1170 m = m_get(M_DONTWAIT0x0002, MT_SONAME3);
1171 if (m == NULL((void *)0))
1172 return (ENOBUFS55);
1173 deldst = mtod(m, struct sockaddr *)((struct sockaddr *)((m)->m_hdr.mh_data));
1174 rt_maskedcopy(dst, deldst, ifa->ifa_netmask);
1175 dst = deldst;
1176 }
1177
1178 memset(&info, 0, sizeof(info))__builtin_memset((&info), (0), (sizeof(info)));
1179 info.rti_ifa = ifa;
1180 info.rti_flags = flags;
1181 info.rti_info[RTAX_DST0] = dst;
1182 if ((flags & RTF_LLINFO0x400) == 0)
1183 info.rti_info[RTAX_GATEWAY1] = ifa->ifa_addr;
1184 info.rti_info[RTAX_LABEL10] = rtlabel_id2sa(ifp->if_rtlabelid, &sa_rl);
1185
1186 if ((flags & RTF_HOST0x4) == 0)
1187 info.rti_info[RTAX_NETMASK2] = ifa->ifa_netmask;
1188
1189 if (flags & (RTF_LOCAL0x200000|RTF_BROADCAST0x400000))
1190 prio = RTP_LOCAL1;
1191
1192 if (flags & RTF_CONNECTED0x800000)
1193 prio = ifp->if_priority + RTP_CONNECTED4;
1194
1195 rtable_clearsource(rdomain, ifa->ifa_addr);
1196 error = rtrequest_delete(&info, prio, ifp, &rt, rdomain);
1197 if (error == 0) {
1198 rtm_send(rt, RTM_DELETE0x2, 0, rdomain);
1199 if (flags & RTF_LOCAL0x200000)
1200 rtm_addr(RTM_DELADDR0xd, ifa);
1201 rtfree(rt);
1202 }
1203 m_free(m);
1204
1205 return (error);
1206}
1207
1208/*
1209 * Add ifa's address as a local rtentry.
1210 */
1211int
1212rt_ifa_addlocal(struct ifaddr *ifa)
1213{
1214 struct ifnet *ifp = ifa->ifa_ifp;
1215 struct rtentry *rt;
1216 u_int flags = RTF_HOST0x4|RTF_LOCAL0x200000;
1217 int error = 0;
1218
1219 /*
1220 * If the configured address correspond to the magical "any"
1221 * address do not add a local route entry because that might
1222 * corrupt the routing tree which uses this value for the
1223 * default routes.
1224 */
1225 switch (ifa->ifa_addr->sa_family) {
1226 case AF_INET2:
1227 if (satosin(ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000000
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000000)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000000)
) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000000
))))
)
1228 return (0);
1229 break;
1230#ifdef INET61
1231 case AF_INET624:
1232 if (IN6_ARE_ADDR_EQUAL(&satosin6(ifa->ifa_addr)->sin6_addr,(__builtin_memcmp((&(&satosin6(ifa->ifa_addr)->
sin6_addr)->__u6_addr.__u6_addr8[0]), (&(&in6addr_any
)->__u6_addr.__u6_addr8[0]), (sizeof(struct in6_addr))) ==
0)
1233 &in6addr_any)(__builtin_memcmp((&(&satosin6(ifa->ifa_addr)->
sin6_addr)->__u6_addr.__u6_addr8[0]), (&(&in6addr_any
)->__u6_addr.__u6_addr8[0]), (sizeof(struct in6_addr))) ==
0)
)
1234 return (0);
1235 break;
1236#endif
1237 default:
1238 break;
1239 }
1240
1241 if (!ISSET(ifp->if_flags, (IFF_LOOPBACK|IFF_POINTOPOINT))((ifp->if_flags) & ((0x8|0x10))))
1242 flags |= RTF_LLINFO0x400;
1243
1244 /* If there is no local entry, allocate one. */
1245 rt = rtalloc(ifa->ifa_addr, 0, ifp->if_rdomainif_data.ifi_rdomain);
1246 if (rt == NULL((void *)0) || ISSET(rt->rt_flags, flags)((rt->rt_flags) & (flags)) != flags) {
1247 error = rt_ifa_add(ifa, flags | RTF_MPATH0x40000, ifa->ifa_addr,
1248 ifp->if_rdomainif_data.ifi_rdomain);
1249 }
1250 rtfree(rt);
1251
1252 return (error);
1253}
1254
1255/*
1256 * Remove local rtentry of ifa's address if it exists.
1257 */
1258int
1259rt_ifa_dellocal(struct ifaddr *ifa)
1260{
1261 struct ifnet *ifp = ifa->ifa_ifp;
1262 struct rtentry *rt;
1263 u_int flags = RTF_HOST0x4|RTF_LOCAL0x200000;
1264 int error = 0;
1265
1266 /*
1267 * We do not add local routes for such address, so do not bother
1268 * removing them.
1269 */
1270 switch (ifa->ifa_addr->sa_family) {
1271 case AF_INET2:
1272 if (satosin(ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000000
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000000)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000000)
) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000000
))))
)
1273 return (0);
1274 break;
1275#ifdef INET61
1276 case AF_INET624:
1277 if (IN6_ARE_ADDR_EQUAL(&satosin6(ifa->ifa_addr)->sin6_addr,(__builtin_memcmp((&(&satosin6(ifa->ifa_addr)->
sin6_addr)->__u6_addr.__u6_addr8[0]), (&(&in6addr_any
)->__u6_addr.__u6_addr8[0]), (sizeof(struct in6_addr))) ==
0)
1278 &in6addr_any)(__builtin_memcmp((&(&satosin6(ifa->ifa_addr)->
sin6_addr)->__u6_addr.__u6_addr8[0]), (&(&in6addr_any
)->__u6_addr.__u6_addr8[0]), (sizeof(struct in6_addr))) ==
0)
)
1279 return (0);
1280 break;
1281#endif
1282 default:
1283 break;
1284 }
1285
1286 if (!ISSET(ifp->if_flags, (IFF_LOOPBACK|IFF_POINTOPOINT))((ifp->if_flags) & ((0x8|0x10))))
1287 flags |= RTF_LLINFO0x400;
1288
1289 /*
1290 * Before deleting, check if a corresponding local host
1291 * route surely exists. With this check, we can avoid to
1292 * delete an interface direct route whose destination is same
1293 * as the address being removed. This can happen when removing
1294 * a subnet-router anycast address on an interface attached
1295 * to a shared medium.
1296 */
1297 rt = rtalloc(ifa->ifa_addr, 0, ifp->if_rdomainif_data.ifi_rdomain);
1298 if (rt != NULL((void *)0) && ISSET(rt->rt_flags, flags)((rt->rt_flags) & (flags)) == flags) {
1299 error = rt_ifa_del(ifa, flags, ifa->ifa_addr,
1300 ifp->if_rdomainif_data.ifi_rdomain);
1301 }
1302 rtfree(rt);
1303
1304 return (error);
1305}
1306
1307/*
1308 * Remove all addresses attached to ``ifa''.
1309 */
1310void
1311rt_ifa_purge(struct ifaddr *ifa)
1312{
1313 struct ifnet *ifp = ifa->ifa_ifp;
1314 struct rtentry *rt = NULL((void *)0);
1315 unsigned int rtableid;
1316 int error, af = ifa->ifa_addr->sa_family;
1317
1318 KASSERT(ifp != NULL)((ifp != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/route.c"
, 1318, "ifp != NULL"))
;
1319
1320 for (rtableid = 0; rtableid < rtmap_limit; rtableid++) {
1321 /* skip rtables that are not in the rdomain of the ifp */
1322 if (rtable_l2(rtableid) != ifp->if_rdomainif_data.ifi_rdomain)
1323 continue;
1324
1325 do {
1326 error = rtable_walk(rtableid, af, &rt,
1327 rt_ifa_purge_walker, ifa);
1328 if (rt != NULL((void *)0) && error == EEXIST17) {
1329 error = rtdeletemsg(rt, ifp, rtableid);
1330 if (error == 0)
1331 error = EAGAIN35;
1332 }
1333 rtfree(rt);
1334 rt = NULL((void *)0);
1335 } while (error == EAGAIN35);
1336
1337 if (error == EAFNOSUPPORT47)
1338 error = 0;
1339
1340 if (error)
1341 break;
1342 }
1343}
1344
1345int
1346rt_ifa_purge_walker(struct rtentry *rt, void *vifa, unsigned int rtableid)
1347{
1348 struct ifaddr *ifa = vifa;
1349
1350 if (rt->rt_ifa == ifa)
1351 return EEXIST17;
1352
1353 return 0;
1354}
1355
1356/*
1357 * Route timer routines. These routes allow functions to be called
1358 * for various routes at any time. This is useful in supporting
1359 * path MTU discovery and redirect route deletion.
1360 *
1361 * This is similar to some BSDI internal functions, but it provides
1362 * for multiple queues for efficiency's sake...
1363 */
1364
1365LIST_HEAD(, rttimer_queue)struct { struct rttimer_queue *lh_first; } rttimer_queue_head;
1366static int rt_init_done = 0;
1367
1368#define RTTIMER_CALLOUT(r){ if (r->rtt_func != ((void *)0)) { (*r->rtt_func)(r->
rtt_rt, r); } else { struct ifnet *ifp; ifp = if_get(r->rtt_rt
->rt_ifidx); if (ifp != ((void *)0)) rtdeletemsg(r->rtt_rt
, ifp, r->rtt_tableid); if_put(ifp); } }
{ \
1369 if (r->rtt_func != NULL((void *)0)) { \
1370 (*r->rtt_func)(r->rtt_rt, r); \
1371 } else { \
1372 struct ifnet *ifp; \
1373 \
1374 ifp = if_get(r->rtt_rt->rt_ifidx); \
1375 if (ifp != NULL((void *)0)) \
1376 rtdeletemsg(r->rtt_rt, ifp, r->rtt_tableid); \
1377 if_put(ifp); \
1378 } \
1379}
1380
1381/*
1382 * Some subtle order problems with domain initialization mean that
1383 * we cannot count on this being run from rt_init before various
1384 * protocol initializations are done. Therefore, we make sure
1385 * that this is run when the first queue is added...
1386 */
1387
1388void
1389rt_timer_init(void)
1390{
1391 static struct timeout rt_timer_timeout;
1392
1393 if (rt_init_done)
1394 panic("rt_timer_init: already initialized");
1395
1396 pool_init(&rttimer_pool, sizeof(struct rttimer), 0, IPL_SOFTNET0x5, 0,
1397 "rttmr", NULL((void *)0));
1398
1399 LIST_INIT(&rttimer_queue_head)do { ((&rttimer_queue_head)->lh_first) = ((void *)0); }
while (0)
;
1400 timeout_set_proc(&rt_timer_timeout, rt_timer_timer, &rt_timer_timeout);
1401 timeout_add_sec(&rt_timer_timeout, 1);
1402 rt_init_done = 1;
1403}
1404
1405struct rttimer_queue *
1406rt_timer_queue_create(u_int timeout)
1407{
1408 struct rttimer_queue *rtq;
1409
1410 if (rt_init_done == 0)
1411 rt_timer_init();
1412
1413 if ((rtq = malloc(sizeof(*rtq), M_RTABLE5, M_NOWAIT0x0002|M_ZERO0x0008)) == NULL((void *)0))
1414 return (NULL((void *)0));
1415
1416 rtq->rtq_timeout = timeout;
1417 rtq->rtq_count = 0;
1418 TAILQ_INIT(&rtq->rtq_head)do { (&rtq->rtq_head)->tqh_first = ((void *)0); (&
rtq->rtq_head)->tqh_last = &(&rtq->rtq_head)
->tqh_first; } while (0)
;
1419 LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link)do { if (((rtq)->rtq_link.le_next = (&rttimer_queue_head
)->lh_first) != ((void *)0)) (&rttimer_queue_head)->
lh_first->rtq_link.le_prev = &(rtq)->rtq_link.le_next
; (&rttimer_queue_head)->lh_first = (rtq); (rtq)->rtq_link
.le_prev = &(&rttimer_queue_head)->lh_first; } while
(0)
;
1420
1421 return (rtq);
1422}
1423
1424void
1425rt_timer_queue_change(struct rttimer_queue *rtq, long timeout)
1426{
1427 rtq->rtq_timeout = timeout;
1428}
1429
1430void
1431rt_timer_queue_destroy(struct rttimer_queue *rtq)
1432{
1433 struct rttimer *r;
1434
1435 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
1436
1437 while ((r = TAILQ_FIRST(&rtq->rtq_head)((&rtq->rtq_head)->tqh_first)) != NULL((void *)0)) {
1438 LIST_REMOVE(r, rtt_link)do { if ((r)->rtt_link.le_next != ((void *)0)) (r)->rtt_link
.le_next->rtt_link.le_prev = (r)->rtt_link.le_prev; *(r
)->rtt_link.le_prev = (r)->rtt_link.le_next; ((r)->rtt_link
.le_prev) = ((void *)-1); ((r)->rtt_link.le_next) = ((void
*)-1); } while (0)
;
1439 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next)do { if (((r)->rtt_next.tqe_next) != ((void *)0)) (r)->
rtt_next.tqe_next->rtt_next.tqe_prev = (r)->rtt_next.tqe_prev
; else (&rtq->rtq_head)->tqh_last = (r)->rtt_next
.tqe_prev; *(r)->rtt_next.tqe_prev = (r)->rtt_next.tqe_next
; ((r)->rtt_next.tqe_prev) = ((void *)-1); ((r)->rtt_next
.tqe_next) = ((void *)-1); } while (0)
;
1440 RTTIMER_CALLOUT(r){ if (r->rtt_func != ((void *)0)) { (*r->rtt_func)(r->
rtt_rt, r); } else { struct ifnet *ifp; ifp = if_get(r->rtt_rt
->rt_ifidx); if (ifp != ((void *)0)) rtdeletemsg(r->rtt_rt
, ifp, r->rtt_tableid); if_put(ifp); } }
;
1441 pool_put(&rttimer_pool, r);
1442 if (rtq->rtq_count > 0)
1443 rtq->rtq_count--;
1444 else
1445 printf("rt_timer_queue_destroy: rtq_count reached 0\n");
1446 }
1447
1448 LIST_REMOVE(rtq, rtq_link)do { if ((rtq)->rtq_link.le_next != ((void *)0)) (rtq)->
rtq_link.le_next->rtq_link.le_prev = (rtq)->rtq_link.le_prev
; *(rtq)->rtq_link.le_prev = (rtq)->rtq_link.le_next; (
(rtq)->rtq_link.le_prev) = ((void *)-1); ((rtq)->rtq_link
.le_next) = ((void *)-1); } while (0)
;
1449 free(rtq, M_RTABLE5, sizeof(*rtq));
1450}
1451
1452unsigned long
1453rt_timer_queue_count(struct rttimer_queue *rtq)
1454{
1455 return (rtq->rtq_count);
1456}
1457
1458void
1459rt_timer_remove_all(struct rtentry *rt)
1460{
1461 struct rttimer *r;
1462
1463 while ((r = LIST_FIRST(&rt->rt_timer)((&rt->rt_timer)->lh_first)) != NULL((void *)0)) {
1464 LIST_REMOVE(r, rtt_link)do { if ((r)->rtt_link.le_next != ((void *)0)) (r)->rtt_link
.le_next->rtt_link.le_prev = (r)->rtt_link.le_prev; *(r
)->rtt_link.le_prev = (r)->rtt_link.le_next; ((r)->rtt_link
.le_prev) = ((void *)-1); ((r)->rtt_link.le_next) = ((void
*)-1); } while (0)
;
1465 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next)do { if (((r)->rtt_next.tqe_next) != ((void *)0)) (r)->
rtt_next.tqe_next->rtt_next.tqe_prev = (r)->rtt_next.tqe_prev
; else (&r->rtt_queue->rtq_head)->tqh_last = (r)
->rtt_next.tqe_prev; *(r)->rtt_next.tqe_prev = (r)->
rtt_next.tqe_next; ((r)->rtt_next.tqe_prev) = ((void *)-1)
; ((r)->rtt_next.tqe_next) = ((void *)-1); } while (0)
;
1466 if (r->rtt_queue->rtq_count > 0)
1467 r->rtt_queue->rtq_count--;
1468 else
1469 printf("rt_timer_remove_all: rtq_count reached 0\n");
1470 pool_put(&rttimer_pool, r);
1471 }
1472}
1473
1474int
1475rt_timer_add(struct rtentry *rt, void (*func)(struct rtentry *,
1476 struct rttimer *), struct rttimer_queue *queue, u_int rtableid)
1477{
1478 struct rttimer *r;
1479 long current_time;
1480
1481 current_time = getuptime();
1482 rt->rt_expirert_rmx.rmx_expire = getuptime() + queue->rtq_timeout;
1483
1484 /*
1485 * If there's already a timer with this action, destroy it before
1486 * we add a new one.
1487 */
1488 LIST_FOREACH(r, &rt->rt_timer, rtt_link)for((r) = ((&rt->rt_timer)->lh_first); (r)!= ((void
*)0); (r) = ((r)->rtt_link.le_next))
{
1489 if (r->rtt_func == func) {
1490 LIST_REMOVE(r, rtt_link)do { if ((r)->rtt_link.le_next != ((void *)0)) (r)->rtt_link
.le_next->rtt_link.le_prev = (r)->rtt_link.le_prev; *(r
)->rtt_link.le_prev = (r)->rtt_link.le_next; ((r)->rtt_link
.le_prev) = ((void *)-1); ((r)->rtt_link.le_next) = ((void
*)-1); } while (0)
;
1491 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next)do { if (((r)->rtt_next.tqe_next) != ((void *)0)) (r)->
rtt_next.tqe_next->rtt_next.tqe_prev = (r)->rtt_next.tqe_prev
; else (&r->rtt_queue->rtq_head)->tqh_last = (r)
->rtt_next.tqe_prev; *(r)->rtt_next.tqe_prev = (r)->
rtt_next.tqe_next; ((r)->rtt_next.tqe_prev) = ((void *)-1)
; ((r)->rtt_next.tqe_next) = ((void *)-1); } while (0)
;
1492 if (r->rtt_queue->rtq_count > 0)
1493 r->rtt_queue->rtq_count--;
1494 else
1495 printf("rt_timer_add: rtq_count reached 0\n");
1496 pool_put(&rttimer_pool, r);
1497 break; /* only one per list, so we can quit... */
1498 }
1499 }
1500
1501 r = pool_get(&rttimer_pool, PR_NOWAIT0x0002 | PR_ZERO0x0008);
1502 if (r == NULL((void *)0))
1503 return (ENOBUFS55);
1504
1505 r->rtt_rt = rt;
1506 r->rtt_time = current_time;
1507 r->rtt_func = func;
1508 r->rtt_queue = queue;
1509 r->rtt_tableid = rtableid;
1510 LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link)do { if (((r)->rtt_link.le_next = (&rt->rt_timer)->
lh_first) != ((void *)0)) (&rt->rt_timer)->lh_first
->rtt_link.le_prev = &(r)->rtt_link.le_next; (&
rt->rt_timer)->lh_first = (r); (r)->rtt_link.le_prev
= &(&rt->rt_timer)->lh_first; } while (0)
;
1511 TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next)do { (r)->rtt_next.tqe_next = ((void *)0); (r)->rtt_next
.tqe_prev = (&queue->rtq_head)->tqh_last; *(&queue
->rtq_head)->tqh_last = (r); (&queue->rtq_head)->
tqh_last = &(r)->rtt_next.tqe_next; } while (0)
;
1512 r->rtt_queue->rtq_count++;
1513
1514 return (0);
1515}
1516
1517void
1518rt_timer_timer(void *arg)
1519{
1520 struct timeout *to = (struct timeout *)arg;
1521 struct rttimer_queue *rtq;
1522 struct rttimer *r;
1523 long current_time;
1524
1525 current_time = getuptime();
1526
1527 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1528 LIST_FOREACH(rtq, &rttimer_queue_head, rtq_link)for((rtq) = ((&rttimer_queue_head)->lh_first); (rtq)!=
((void *)0); (rtq) = ((rtq)->rtq_link.le_next))
{
1529 while ((r = TAILQ_FIRST(&rtq->rtq_head)((&rtq->rtq_head)->tqh_first)) != NULL((void *)0) &&
1530 (r->rtt_time + rtq->rtq_timeout) < current_time) {
1531 LIST_REMOVE(r, rtt_link)do { if ((r)->rtt_link.le_next != ((void *)0)) (r)->rtt_link
.le_next->rtt_link.le_prev = (r)->rtt_link.le_prev; *(r
)->rtt_link.le_prev = (r)->rtt_link.le_next; ((r)->rtt_link
.le_prev) = ((void *)-1); ((r)->rtt_link.le_next) = ((void
*)-1); } while (0)
;
1532 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next)do { if (((r)->rtt_next.tqe_next) != ((void *)0)) (r)->
rtt_next.tqe_next->rtt_next.tqe_prev = (r)->rtt_next.tqe_prev
; else (&rtq->rtq_head)->tqh_last = (r)->rtt_next
.tqe_prev; *(r)->rtt_next.tqe_prev = (r)->rtt_next.tqe_next
; ((r)->rtt_next.tqe_prev) = ((void *)-1); ((r)->rtt_next
.tqe_next) = ((void *)-1); } while (0)
;
1533 RTTIMER_CALLOUT(r){ if (r->rtt_func != ((void *)0)) { (*r->rtt_func)(r->
rtt_rt, r); } else { struct ifnet *ifp; ifp = if_get(r->rtt_rt
->rt_ifidx); if (ifp != ((void *)0)) rtdeletemsg(r->rtt_rt
, ifp, r->rtt_tableid); if_put(ifp); } }
;
1534 pool_put(&rttimer_pool, r);
1535 if (rtq->rtq_count > 0)
1536 rtq->rtq_count--;
1537 else
1538 printf("rt_timer_timer: rtq_count reached 0\n");
1539 }
1540 }
1541 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1542
1543 timeout_add_sec(to, 1);
1544}
1545
1546#ifdef MPLS1
1547int
1548rt_mpls_set(struct rtentry *rt, struct sockaddr *src, uint8_t op)
1549{
1550 struct sockaddr_mpls *psa_mpls = (struct sockaddr_mpls *)src;
1551 struct rt_mpls *rt_mpls;
1552
1553 if (psa_mpls == NULL((void *)0) && op != MPLS_OP_POP0x1)
1554 return (EOPNOTSUPP45);
1555 if (psa_mpls != NULL((void *)0) && psa_mpls->smpls_len != sizeof(*psa_mpls))
1556 return (EINVAL22);
1557 if (psa_mpls != NULL((void *)0) && psa_mpls->smpls_family != AF_MPLS33)
1558 return (EAFNOSUPPORT47);
1559
1560 rt->rt_llinfo = malloc(sizeof(struct rt_mpls), M_TEMP127, M_NOWAIT0x0002|M_ZERO0x0008);
Result of 'malloc' is converted to a pointer of type 'char', which is incompatible with sizeof operand type 'struct rt_mpls'
1561 if (rt->rt_llinfo == NULL((void *)0))
1562 return (ENOMEM12);
1563
1564 rt_mpls = (struct rt_mpls *)rt->rt_llinfo;
1565 if (psa_mpls != NULL((void *)0))
1566 rt_mpls->mpls_label = psa_mpls->smpls_label;
1567 rt_mpls->mpls_operation = op;
1568 /* XXX: set experimental bits */
1569 rt->rt_flags |= RTF_MPLS0x100000;
1570
1571 return (0);
1572}
1573
1574void
1575rt_mpls_clear(struct rtentry *rt)
1576{
1577 if (rt->rt_llinfo != NULL((void *)0) && rt->rt_flags & RTF_MPLS0x100000) {
1578 free(rt->rt_llinfo, M_TEMP127, sizeof(struct rt_mpls));
1579 rt->rt_llinfo = NULL((void *)0);
1580 }
1581 rt->rt_flags &= ~RTF_MPLS0x100000;
1582}
1583#endif
1584
1585u_int16_t
1586rtlabel_name2id(char *name)
1587{
1588 struct rt_label *label, *p;
1589 u_int16_t new_id = 1;
1590
1591 if (!name[0])
1592 return (0);
1593
1594 TAILQ_FOREACH(label, &rt_labels, rtl_entry)for((label) = ((&rt_labels)->tqh_first); (label) != ((
void *)0); (label) = ((label)->rtl_entry.tqe_next))
1595 if (strcmp(name, label->rtl_name) == 0) {
1596 label->rtl_ref++;
1597 return (label->rtl_id);
1598 }
1599
1600 /*
1601 * to avoid fragmentation, we do a linear search from the beginning
1602 * and take the first free slot we find. if there is none or the list
1603 * is empty, append a new entry at the end.
1604 */
1605 TAILQ_FOREACH(p, &rt_labels, rtl_entry)for((p) = ((&rt_labels)->tqh_first); (p) != ((void *)0
); (p) = ((p)->rtl_entry.tqe_next))
{
1606 if (p->rtl_id != new_id)
1607 break;
1608 new_id = p->rtl_id + 1;
1609 }
1610 if (new_id > LABELID_MAX50000)
1611 return (0);
1612
1613 label = malloc(sizeof(*label), M_RTABLE5, M_NOWAIT0x0002|M_ZERO0x0008);
1614 if (label == NULL((void *)0))
1615 return (0);
1616 strlcpy(label->rtl_name, name, sizeof(label->rtl_name));
1617 label->rtl_id = new_id;
1618 label->rtl_ref++;
1619
1620 if (p != NULL((void *)0)) /* insert new entry before p */
1621 TAILQ_INSERT_BEFORE(p, label, rtl_entry)do { (label)->rtl_entry.tqe_prev = (p)->rtl_entry.tqe_prev
; (label)->rtl_entry.tqe_next = (p); *(p)->rtl_entry.tqe_prev
= (label); (p)->rtl_entry.tqe_prev = &(label)->rtl_entry
.tqe_next; } while (0)
;
1622 else /* either list empty or no free slot in between */
1623 TAILQ_INSERT_TAIL(&rt_labels, label, rtl_entry)do { (label)->rtl_entry.tqe_next = ((void *)0); (label)->
rtl_entry.tqe_prev = (&rt_labels)->tqh_last; *(&rt_labels
)->tqh_last = (label); (&rt_labels)->tqh_last = &
(label)->rtl_entry.tqe_next; } while (0)
;
1624
1625 return (label->rtl_id);
1626}
1627
1628const char *
1629rtlabel_id2name(u_int16_t id)
1630{
1631 struct rt_label *label;
1632
1633 TAILQ_FOREACH(label, &rt_labels, rtl_entry)for((label) = ((&rt_labels)->tqh_first); (label) != ((
void *)0); (label) = ((label)->rtl_entry.tqe_next))
1634 if (label->rtl_id == id)
1635 return (label->rtl_name);
1636
1637 return (NULL((void *)0));
1638}
1639
1640struct sockaddr *
1641rtlabel_id2sa(u_int16_t labelid, struct sockaddr_rtlabel *sa_rl)
1642{
1643 const char *label;
1644
1645 if (labelid == 0 || (label = rtlabel_id2name(labelid)) == NULL((void *)0))
1646 return (NULL((void *)0));
1647
1648 bzero(sa_rl, sizeof(*sa_rl))__builtin_bzero((sa_rl), (sizeof(*sa_rl)));
1649 sa_rl->sr_len = sizeof(*sa_rl);
1650 sa_rl->sr_family = AF_UNSPEC0;
1651 strlcpy(sa_rl->sr_label, label, sizeof(sa_rl->sr_label));
1652
1653 return ((struct sockaddr *)sa_rl);
1654}
1655
1656void
1657rtlabel_unref(u_int16_t id)
1658{
1659 struct rt_label *p, *next;
1660
1661 if (id == 0)
1662 return;
1663
1664 TAILQ_FOREACH_SAFE(p, &rt_labels, rtl_entry, next)for ((p) = ((&rt_labels)->tqh_first); (p) != ((void *)
0) && ((next) = ((p)->rtl_entry.tqe_next), 1); (p)
= (next))
{
1665 if (id == p->rtl_id) {
1666 if (--p->rtl_ref == 0) {
1667 TAILQ_REMOVE(&rt_labels, p, rtl_entry)do { if (((p)->rtl_entry.tqe_next) != ((void *)0)) (p)->
rtl_entry.tqe_next->rtl_entry.tqe_prev = (p)->rtl_entry
.tqe_prev; else (&rt_labels)->tqh_last = (p)->rtl_entry
.tqe_prev; *(p)->rtl_entry.tqe_prev = (p)->rtl_entry.tqe_next
; ((p)->rtl_entry.tqe_prev) = ((void *)-1); ((p)->rtl_entry
.tqe_next) = ((void *)-1); } while (0)
;
1668 free(p, M_RTABLE5, sizeof(*p));
1669 }
1670 break;
1671 }
1672 }
1673}
1674
1675int
1676rt_if_track(struct ifnet *ifp)
1677{
1678 unsigned int rtableid;
1679 struct rtentry *rt = NULL((void *)0);
1680 int i, error = 0;
1681
1682 for (rtableid = 0; rtableid < rtmap_limit; rtableid++) {
1683 /* skip rtables that are not in the rdomain of the ifp */
1684 if (rtable_l2(rtableid) != ifp->if_rdomainif_data.ifi_rdomain)
1685 continue;
1686 for (i = 1; i <= AF_MAX36; i++) {
1687 if (!rtable_mpath_capable(rtableid, i))
1688 continue;
1689
1690 do {
1691 error = rtable_walk(rtableid, i, &rt,
1692 rt_if_linkstate_change, ifp);
1693 if (rt != NULL((void *)0) && error == EEXIST17) {
1694 error = rtdeletemsg(rt, ifp, rtableid);
1695 if (error == 0)
1696 error = EAGAIN35;
1697 }
1698 rtfree(rt);
1699 rt = NULL((void *)0);
1700 } while (error == EAGAIN35);
1701
1702 if (error == EAFNOSUPPORT47)
1703 error = 0;
1704
1705 if (error)
1706 break;
1707 }
1708 }
1709
1710 return (error);
1711}
1712
1713int
1714rt_if_linkstate_change(struct rtentry *rt, void *arg, u_int id)
1715{
1716 struct ifnet *ifp = arg;
1717 struct sockaddr_in6 sa_mask;
1718 int error;
1719
1720 if (rt->rt_ifidx != ifp->if_index)
1721 return (0);
1722
1723 /* Local routes are always usable. */
1724 if (rt->rt_flags & RTF_LOCAL0x200000) {
1725 rt->rt_flags |= RTF_UP0x1;
1726 return (0);
1727 }
1728
1729 if (LINK_STATE_IS_UP(ifp->if_link_state)((ifp->if_data.ifi_link_state) >= 4 || (ifp->if_data
.ifi_link_state) == 0)
&& ifp->if_flags & IFF_UP0x1) {
1730 if (ISSET(rt->rt_flags, RTF_UP)((rt->rt_flags) & (0x1)))
1731 return (0);
1732
1733 /* bring route up */
1734 rt->rt_flags |= RTF_UP0x1;
1735 error = rtable_mpath_reprio(id, rt_key(rt)((rt)->rt_dest), rt_plen(rt)((rt)->rt_plen),
1736 rt->rt_priority & RTP_MASK0x7f, rt);
1737 } else {
1738 /*
1739 * Remove redirected and cloned routes (mainly ARP)
1740 * from down interfaces so we have a chance to get
1741 * new routes from a better source.
1742 */
1743 if (ISSET(rt->rt_flags, RTF_CLONED|RTF_DYNAMIC)((rt->rt_flags) & (0x10000|0x10)) &&
1744 !ISSET(rt->rt_flags, RTF_CACHED|RTF_BFD)((rt->rt_flags) & (0x20000|0x1000000))) {
1745 return (EEXIST17);
1746 }
1747
1748 if (!ISSET(rt->rt_flags, RTF_UP)((rt->rt_flags) & (0x1)))
1749 return (0);
1750
1751 /* take route down */
1752 rt->rt_flags &= ~RTF_UP0x1;
1753 error = rtable_mpath_reprio(id, rt_key(rt)((rt)->rt_dest), rt_plen(rt)((rt)->rt_plen),
1754 rt->rt_priority | RTP_DOWN0x80, rt);
1755 }
1756 if_group_routechange(rt_key(rt)((rt)->rt_dest), rt_plen2mask(rt, &sa_mask));
1757
1758 return (error);
1759}
1760
1761struct sockaddr *
1762rt_plentosa(sa_family_t af, int plen, struct sockaddr_in6 *sa_mask)
1763{
1764 struct sockaddr_in *sin = (struct sockaddr_in *)sa_mask;
1765#ifdef INET61
1766 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa_mask;
1767#endif
1768
1769 KASSERT(plen >= 0 || plen == -1)((plen >= 0 || plen == -1) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/route.c", 1769, "plen >= 0 || plen == -1"
))
;
1770
1771 if (plen == -1)
1772 return (NULL((void *)0));
1773
1774 memset(sa_mask, 0, sizeof(*sa_mask))__builtin_memset((sa_mask), (0), (sizeof(*sa_mask)));
1775
1776 switch (af) {
1777 case AF_INET2:
1778 sin->sin_family = AF_INET2;
1779 sin->sin_len = sizeof(struct sockaddr_in);
1780 in_prefixlen2mask(&sin->sin_addr, plen);
1781 break;
1782#ifdef INET61
1783 case AF_INET624:
1784 sin6->sin6_family = AF_INET624;
1785 sin6->sin6_len = sizeof(struct sockaddr_in6);
1786 in6_prefixlen2mask(&sin6->sin6_addr, plen);
1787 break;
1788#endif /* INET6 */
1789 default:
1790 return (NULL((void *)0));
1791 }
1792
1793 return ((struct sockaddr *)sa_mask);
1794}
1795
1796struct sockaddr *
1797rt_plen2mask(struct rtentry *rt, struct sockaddr_in6 *sa_mask)
1798{
1799 return (rt_plentosa(rt_key(rt)((rt)->rt_dest)->sa_family, rt_plen(rt)((rt)->rt_plen), sa_mask));
1800}
1801
1802#ifdef DDB1
1803#include <machine/db_machdep.h>
1804#include <ddb/db_output.h>
1805
1806void
1807db_print_sa(struct sockaddr *sa)
1808{
1809 int len;
1810 u_char *p;
1811
1812 if (sa == NULL((void *)0)) {
1813 db_printf("[NULL]");
1814 return;
1815 }
1816
1817 p = (u_char *)sa;
1818 len = sa->sa_len;
1819 db_printf("[");
1820 while (len > 0) {
1821 db_printf("%d", *p);
1822 p++;
1823 len--;
1824 if (len)
1825 db_printf(",");
1826 }
1827 db_printf("]\n");
1828}
1829
1830void
1831db_print_ifa(struct ifaddr *ifa)
1832{
1833 if (ifa == NULL((void *)0))
1834 return;
1835 db_printf(" ifa_addr=");
1836 db_print_sa(ifa->ifa_addr);
1837 db_printf(" ifa_dsta=");
1838 db_print_sa(ifa->ifa_dstaddr);
1839 db_printf(" ifa_mask=");
1840 db_print_sa(ifa->ifa_netmask);
1841 db_printf(" flags=0x%x, refcnt=%d, metric=%d\n",
1842 ifa->ifa_flags, ifa->ifa_refcnt, ifa->ifa_metric);
1843}
1844
1845/*
1846 * Function to pass to rtable_walk().
1847 * Return non-zero error to abort walk.
1848 */
1849int
1850db_show_rtentry(struct rtentry *rt, void *w, unsigned int id)
1851{
1852 db_printf("rtentry=%p", rt);
1853
1854 db_printf(" flags=0x%x refcnt=%d use=%llu expire=%lld rtableid=%u\n",
1855 rt->rt_flags, rt->rt_refcnt, rt->rt_usert_rmx.rmx_pksent, rt->rt_expirert_rmx.rmx_expire, id);
1856
1857 db_printf(" key="); db_print_sa(rt_key(rt)((rt)->rt_dest));
1858 db_printf(" plen=%d", rt_plen(rt)((rt)->rt_plen));
1859 db_printf(" gw="); db_print_sa(rt->rt_gateway);
1860 db_printf(" ifidx=%u ", rt->rt_ifidx);
1861 db_printf(" ifa=%p\n", rt->rt_ifa);
1862 db_print_ifa(rt->rt_ifa);
1863
1864 db_printf(" gwroute=%p llinfo=%p\n", rt->rt_gwrouteRT_gw._nh, rt->rt_llinfo);
1865 return (0);
1866}
1867
1868/*
1869 * Function to print all the route trees.
1870 * Use this from ddb: "call db_show_arptab"
1871 */
1872int
1873db_show_arptab(void)
1874{
1875 db_printf("Route tree for AF_INET\n");
1876 rtable_walk(0, AF_INET2, NULL((void *)0), db_show_rtentry, NULL((void *)0));
1877 return (0);
1878}
1879#endif /* DDB */