File: | src/usr.sbin/bgpd/rde_peer.c |
Warning: | line 387, column 7 Although the value stored to 'p' is used in the enclosing expression, the value is never actually read from 'p' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: rde_peer.c,v 1.34 2023/11/07 11:17:35 claudio Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2019 Claudio Jeker <claudio@openbsd.org> |
5 | * |
6 | * Permission to use, copy, modify, and distribute this software for any |
7 | * purpose with or without fee is hereby granted, provided that the above |
8 | * copyright notice and this permission notice appear in all copies. |
9 | * |
10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
16 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
17 | */ |
18 | #include <sys/types.h> |
19 | #include <sys/queue.h> |
20 | |
21 | #include <stdlib.h> |
22 | #include <stdio.h> |
23 | #include <string.h> |
24 | #include <unistd.h> |
25 | |
26 | #include "bgpd.h" |
27 | #include "rde.h" |
28 | |
29 | struct peer_tree peertable; |
30 | struct rde_peer *peerself; |
31 | static long imsg_pending; |
32 | |
33 | CTASSERT(sizeof(peerself->recv_eor) * 8 > AID_MAX)extern char _ctassert[(sizeof(peerself->recv_eor) * 8 > 7) ? 1 : -1 ] __attribute__((__unused__)); |
34 | CTASSERT(sizeof(peerself->sent_eor) * 8 > AID_MAX)extern char _ctassert[(sizeof(peerself->sent_eor) * 8 > 7) ? 1 : -1 ] __attribute__((__unused__)); |
35 | |
36 | struct iq { |
37 | SIMPLEQ_ENTRY(iq)struct { struct iq *sqe_next; } entry; |
38 | struct imsg imsg; |
39 | }; |
40 | |
41 | int |
42 | peer_has_as4byte(struct rde_peer *peer) |
43 | { |
44 | return (peer->capa.as4byte); |
45 | } |
46 | |
47 | int |
48 | peer_has_add_path(struct rde_peer *peer, uint8_t aid, int mode) |
49 | { |
50 | if (aid > AID_MAX7) |
51 | return 0; |
52 | if (aid == AID_UNSPEC0) { |
53 | /* check if at capability is set for at least one AID */ |
54 | for (aid = AID_MIN1; aid < AID_MAX7; aid++) |
55 | if (peer->capa.add_path[aid] & mode) |
56 | return 1; |
57 | return 0; |
58 | } |
59 | return (peer->capa.add_path[aid] & mode); |
60 | } |
61 | |
62 | int |
63 | peer_accept_no_as_set(struct rde_peer *peer) |
64 | { |
65 | return (peer->flags & PEERFLAG_NO_AS_SET0x08); |
66 | } |
67 | |
68 | void |
69 | peer_init(struct filter_head *rules) |
70 | { |
71 | struct peer_config pc; |
72 | |
73 | RB_INIT(&peertable)do { (&peertable)->rbh_root = ((void *)0); } while (0); |
74 | |
75 | memset(&pc, 0, sizeof(pc)); |
76 | snprintf(pc.descr, sizeof(pc.descr), "LOCAL"); |
77 | pc.id = PEER_ID_SELF1; |
78 | |
79 | peerself = peer_add(PEER_ID_SELF1, &pc, rules); |
80 | peerself->state = PEER_UP; |
81 | } |
82 | |
83 | void |
84 | peer_shutdown(void) |
85 | { |
86 | if (!RB_EMPTY(&peertable)((&peertable)->rbh_root == ((void *)0))) |
87 | log_warnx("%s: free non-free table", __func__); |
88 | } |
89 | |
90 | /* |
91 | * Traverse all peers calling callback for each peer. |
92 | */ |
93 | void |
94 | peer_foreach(void (*callback)(struct rde_peer *, void *), void *arg) |
95 | { |
96 | struct rde_peer *peer, *np; |
97 | |
98 | RB_FOREACH_SAFE(peer, peer_tree, &peertable, np)for ((peer) = peer_tree_RB_MINMAX(&peertable, -1); ((peer ) != ((void *)0)) && ((np) = peer_tree_RB_NEXT(peer), 1); (peer) = (np)) |
99 | callback(peer, arg); |
100 | } |
101 | |
102 | /* |
103 | * Lookup a peer by peer_id, return NULL if not found. |
104 | */ |
105 | struct rde_peer * |
106 | peer_get(uint32_t id) |
107 | { |
108 | struct rde_peer needle; |
109 | |
110 | needle.conf.id = id; |
111 | return RB_FIND(peer_tree, &peertable, &needle)peer_tree_RB_FIND(&peertable, &needle); |
112 | } |
113 | |
114 | /* |
115 | * Find next peer that matches neighbor options in *n. |
116 | * If peerid was set then pickup the lookup after that peer. |
117 | * Returns NULL if no more peers match. |
118 | */ |
119 | struct rde_peer * |
120 | peer_match(struct ctl_neighbor *n, uint32_t peerid) |
121 | { |
122 | struct rde_peer *peer; |
123 | |
124 | if (peerid != 0) { |
125 | peer = peer_get(peerid); |
126 | if (peer) |
127 | peer = RB_NEXT(peer_tree, &peertable, peer)peer_tree_RB_NEXT(peer); |
128 | } else |
129 | peer = RB_MIN(peer_tree, &peertable)peer_tree_RB_MINMAX(&peertable, -1); |
130 | |
131 | for (; peer != NULL((void *)0); peer = RB_NEXT(peer_tree, &peertable, peer)peer_tree_RB_NEXT(peer)) { |
132 | if (rde_match_peer(peer, n)) |
133 | return peer; |
134 | } |
135 | return NULL((void *)0); |
136 | } |
137 | |
138 | struct rde_peer * |
139 | peer_add(uint32_t id, struct peer_config *p_conf, struct filter_head *rules) |
140 | { |
141 | struct rde_peer *peer; |
142 | int conflict; |
143 | |
144 | if ((peer = peer_get(id))) { |
145 | memcpy(&peer->conf, p_conf, sizeof(struct peer_config)); |
146 | return (peer); |
147 | } |
148 | |
149 | peer = calloc(1, sizeof(struct rde_peer)); |
150 | if (peer == NULL((void *)0)) |
151 | fatal("peer_add"); |
152 | |
153 | memcpy(&peer->conf, p_conf, sizeof(struct peer_config)); |
154 | peer->remote_bgpid = 0; |
155 | peer->loc_rib_id = rib_find(peer->conf.rib); |
156 | if (peer->loc_rib_id == RIB_NOTFOUND0xffff) |
157 | fatalx("King Bula's new peer met an unknown RIB"); |
158 | peer->state = PEER_NONE; |
159 | peer->eval = peer->conf.eval; |
160 | peer->role = peer->conf.role; |
161 | peer->export_type = peer->conf.export_type; |
162 | peer->flags = peer->conf.flags; |
163 | SIMPLEQ_INIT(&peer->imsg_queue)do { (&peer->imsg_queue)->sqh_first = ((void *)0); ( &peer->imsg_queue)->sqh_last = &(&peer-> imsg_queue)->sqh_first; } while (0); |
164 | |
165 | peer_apply_out_filter(peer, rules); |
166 | |
167 | /* |
168 | * Assign an even random unique transmit path id. |
169 | * Odd path_id_tx numbers are for peers using add-path recv. |
170 | */ |
171 | do { |
172 | struct rde_peer *p; |
173 | |
174 | conflict = 0; |
175 | peer->path_id_tx = arc4random() << 1; |
176 | RB_FOREACH(p, peer_tree, &peertable)for ((p) = peer_tree_RB_MINMAX(&peertable, -1); (p) != (( void *)0); (p) = peer_tree_RB_NEXT(p)) { |
177 | if (p->path_id_tx == peer->path_id_tx) { |
178 | conflict = 1; |
179 | break; |
180 | } |
181 | } |
182 | } while (conflict); |
183 | |
184 | if (RB_INSERT(peer_tree, &peertable, peer)peer_tree_RB_INSERT(&peertable, peer) != NULL((void *)0)) |
185 | fatalx("rde peer table corrupted"); |
186 | |
187 | return (peer); |
188 | } |
189 | |
190 | struct filter_head * |
191 | peer_apply_out_filter(struct rde_peer *peer, struct filter_head *rules) |
192 | { |
193 | struct filter_head *old; |
194 | struct filter_rule *fr, *new; |
195 | |
196 | old = peer->out_rules; |
197 | if ((peer->out_rules = malloc(sizeof(*peer->out_rules))) == NULL((void *)0)) |
198 | fatal(NULL((void *)0)); |
199 | TAILQ_INIT(peer->out_rules)do { (peer->out_rules)->tqh_first = ((void *)0); (peer-> out_rules)->tqh_last = &(peer->out_rules)->tqh_first ; } while (0); |
200 | |
201 | TAILQ_FOREACH(fr, rules, entry)for((fr) = ((rules)->tqh_first); (fr) != ((void *)0); (fr) = ((fr)->entry.tqe_next)) { |
202 | if (rde_filter_skip_rule(peer, fr)) |
203 | continue; |
204 | |
205 | if ((new = malloc(sizeof(*new))) == NULL((void *)0)) |
206 | fatal(NULL((void *)0)); |
207 | memcpy(new, fr, sizeof(*new)); |
208 | filterset_copy(&fr->set, &new->set); |
209 | |
210 | TAILQ_INSERT_TAIL(peer->out_rules, new, entry)do { (new)->entry.tqe_next = ((void *)0); (new)->entry. tqe_prev = (peer->out_rules)->tqh_last; *(peer->out_rules )->tqh_last = (new); (peer->out_rules)->tqh_last = & (new)->entry.tqe_next; } while (0); |
211 | } |
212 | |
213 | return old; |
214 | } |
215 | |
216 | static inline int |
217 | peer_cmp(struct rde_peer *a, struct rde_peer *b) |
218 | { |
219 | if (a->conf.id > b->conf.id) |
220 | return 1; |
221 | if (a->conf.id < b->conf.id) |
222 | return -1; |
223 | return 0; |
224 | } |
225 | |
226 | RB_GENERATE(peer_tree, rde_peer, entry, peer_cmp)void peer_tree_RB_INSERT_COLOR(struct peer_tree *head, struct rde_peer *elm) { struct rde_peer *parent, *gparent, *tmp; while ((parent = (elm)->entry.rbe_parent) && (parent)-> entry.rbe_color == 1) { gparent = (parent)->entry.rbe_parent ; if (parent == (gparent)->entry.rbe_left) { tmp = (gparent )->entry.rbe_right; if (tmp && (tmp)->entry.rbe_color == 1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry .rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0 ); elm = gparent; continue; } if ((parent)->entry.rbe_right == elm) { do { (tmp) = (parent)->entry.rbe_right; if (((parent )->entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)-> entry.rbe_left)->entry.rbe_parent = (parent); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent )) { if ((parent) == ((parent)->entry.rbe_parent)->entry .rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry .rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while ( 0); } while (0); tmp = parent; parent = elm; elm = tmp; } do { (parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0); do { (tmp) = (gparent)->entry.rbe_left; if (((gparent)->entry.rbe_left = (tmp)->entry.rbe_right )) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (gparent ); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent )->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry .rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent )->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent )->entry.rbe_right = (tmp); } else (head)->rbh_root = ( tmp); (tmp)->entry.rbe_right = (gparent); (gparent)->entry .rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent )) do {} while (0); } while (0); } else { tmp = (gparent)-> entry.rbe_left; if (tmp && (tmp)->entry.rbe_color == 1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry. rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0) ; elm = gparent; continue; } if ((parent)->entry.rbe_left == elm) { do { (tmp) = (parent)->entry.rbe_left; if (((parent )->entry.rbe_left = (tmp)->entry.rbe_right)) { ((tmp)-> entry.rbe_right)->entry.rbe_parent = (parent); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent )) { if ((parent) == ((parent)->entry.rbe_parent)->entry .rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry .rbe_right = (parent); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while (0); } while (0); tmp = parent; parent = elm; elm = tmp; } do { (parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0); do { (tmp) = (gparent)->entry.rbe_right ; if (((gparent)->entry.rbe_right = (tmp)->entry.rbe_left )) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (gparent ); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent )->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry .rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent )->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent )->entry.rbe_right = (tmp); } else (head)->rbh_root = ( tmp); (tmp)->entry.rbe_left = (gparent); (gparent)->entry .rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent )) do {} while (0); } while (0); } } (head->rbh_root)-> entry.rbe_color = 0; } void peer_tree_RB_REMOVE_COLOR(struct peer_tree *head, struct rde_peer *parent, struct rde_peer *elm) { struct rde_peer *tmp; while ((elm == ((void *)0) || (elm)->entry .rbe_color == 0) && elm != (head)->rbh_root) { if ( (parent)->entry.rbe_left == elm) { tmp = (parent)->entry .rbe_right; if ((tmp)->entry.rbe_color == 1) { do { (tmp)-> entry.rbe_color = 0; (parent)->entry.rbe_color = 1; } while (0); do { (tmp) = (parent)->entry.rbe_right; if (((parent )->entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)-> entry.rbe_left)->entry.rbe_parent = (parent); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent )) { if ((parent) == ((parent)->entry.rbe_parent)->entry .rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry .rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while ( 0); } while (0); tmp = (parent)->entry.rbe_right; } if ((( tmp)->entry.rbe_left == ((void *)0) || ((tmp)->entry.rbe_left )->entry.rbe_color == 0) && ((tmp)->entry.rbe_right == ((void *)0) || ((tmp)->entry.rbe_right)->entry.rbe_color == 0)) { (tmp)->entry.rbe_color = 1; elm = parent; parent = (elm)->entry.rbe_parent; } else { if ((tmp)->entry.rbe_right == ((void *)0) || ((tmp)->entry.rbe_right)->entry.rbe_color == 0) { struct rde_peer *oleft; if ((oleft = (tmp)->entry .rbe_left)) (oleft)->entry.rbe_color = 0; (tmp)->entry. rbe_color = 1; do { (oleft) = (tmp)->entry.rbe_left; if (( (tmp)->entry.rbe_left = (oleft)->entry.rbe_right)) { (( oleft)->entry.rbe_right)->entry.rbe_parent = (tmp); } do {} while (0); if (((oleft)->entry.rbe_parent = (tmp)-> entry.rbe_parent)) { if ((tmp) == ((tmp)->entry.rbe_parent )->entry.rbe_left) ((tmp)->entry.rbe_parent)->entry. rbe_left = (oleft); else ((tmp)->entry.rbe_parent)->entry .rbe_right = (oleft); } else (head)->rbh_root = (oleft); ( oleft)->entry.rbe_right = (tmp); (tmp)->entry.rbe_parent = (oleft); do {} while (0); if (((oleft)->entry.rbe_parent )) do {} while (0); } while (0); tmp = (parent)->entry.rbe_right ; } (tmp)->entry.rbe_color = (parent)->entry.rbe_color; (parent)->entry.rbe_color = 0; if ((tmp)->entry.rbe_right ) ((tmp)->entry.rbe_right)->entry.rbe_color = 0; do { ( tmp) = (parent)->entry.rbe_right; if (((parent)->entry. rbe_right = (tmp)->entry.rbe_left)) { ((tmp)->entry.rbe_left )->entry.rbe_parent = (parent); } do {} while (0); if (((tmp )->entry.rbe_parent = (parent)->entry.rbe_parent)) { if ((parent) == ((parent)->entry.rbe_parent)->entry.rbe_left ) ((parent)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right = ( tmp); } else (head)->rbh_root = (tmp); (tmp)->entry.rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while (0); } while (0); elm = (head)->rbh_root; break; } } else { tmp = (parent )->entry.rbe_left; if ((tmp)->entry.rbe_color == 1) { do { (tmp)->entry.rbe_color = 0; (parent)->entry.rbe_color = 1; } while (0); do { (tmp) = (parent)->entry.rbe_left; if (((parent)->entry.rbe_left = (tmp)->entry.rbe_right)) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (parent); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent )->entry.rbe_parent)) { if ((parent) == ((parent)->entry .rbe_parent)->entry.rbe_left) ((parent)->entry.rbe_parent )->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent )->entry.rbe_right = (tmp); } else (head)->rbh_root = ( tmp); (tmp)->entry.rbe_right = (parent); (parent)->entry .rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent )) do {} while (0); } while (0); tmp = (parent)->entry.rbe_left ; } if (((tmp)->entry.rbe_left == ((void *)0) || ((tmp)-> entry.rbe_left)->entry.rbe_color == 0) && ((tmp)-> entry.rbe_right == ((void *)0) || ((tmp)->entry.rbe_right) ->entry.rbe_color == 0)) { (tmp)->entry.rbe_color = 1; elm = parent; parent = (elm)->entry.rbe_parent; } else { if ( (tmp)->entry.rbe_left == ((void *)0) || ((tmp)->entry.rbe_left )->entry.rbe_color == 0) { struct rde_peer *oright; if ((oright = (tmp)->entry.rbe_right)) (oright)->entry.rbe_color = 0; (tmp)->entry.rbe_color = 1; do { (oright) = (tmp)-> entry.rbe_right; if (((tmp)->entry.rbe_right = (oright)-> entry.rbe_left)) { ((oright)->entry.rbe_left)->entry.rbe_parent = (tmp); } do {} while (0); if (((oright)->entry.rbe_parent = (tmp)->entry.rbe_parent)) { if ((tmp) == ((tmp)->entry .rbe_parent)->entry.rbe_left) ((tmp)->entry.rbe_parent) ->entry.rbe_left = (oright); else ((tmp)->entry.rbe_parent )->entry.rbe_right = (oright); } else (head)->rbh_root = (oright); (oright)->entry.rbe_left = (tmp); (tmp)->entry .rbe_parent = (oright); do {} while (0); if (((oright)->entry .rbe_parent)) do {} while (0); } while (0); tmp = (parent)-> entry.rbe_left; } (tmp)->entry.rbe_color = (parent)->entry .rbe_color; (parent)->entry.rbe_color = 0; if ((tmp)->entry .rbe_left) ((tmp)->entry.rbe_left)->entry.rbe_color = 0 ; do { (tmp) = (parent)->entry.rbe_left; if (((parent)-> entry.rbe_left = (tmp)->entry.rbe_right)) { ((tmp)->entry .rbe_right)->entry.rbe_parent = (parent); } do {} while (0 ); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent )) { if ((parent) == ((parent)->entry.rbe_parent)->entry .rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry .rbe_right = (parent); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while (0); } while (0); elm = (head)->rbh_root; break; } } } if (elm) (elm)->entry.rbe_color = 0; } struct rde_peer * peer_tree_RB_REMOVE (struct peer_tree *head, struct rde_peer *elm) { struct rde_peer *child, *parent, *old = elm; int color; if ((elm)->entry. rbe_left == ((void *)0)) child = (elm)->entry.rbe_right; else if ((elm)->entry.rbe_right == ((void *)0)) child = (elm)-> entry.rbe_left; else { struct rde_peer *left; elm = (elm)-> entry.rbe_right; while ((left = (elm)->entry.rbe_left)) elm = left; child = (elm)->entry.rbe_right; parent = (elm)-> entry.rbe_parent; color = (elm)->entry.rbe_color; if (child ) (child)->entry.rbe_parent = parent; if (parent) { if ((parent )->entry.rbe_left == elm) (parent)->entry.rbe_left = child ; else (parent)->entry.rbe_right = child; do {} while (0); } else (head)->rbh_root = child; if ((elm)->entry.rbe_parent == old) parent = elm; (elm)->entry = (old)->entry; if ( (old)->entry.rbe_parent) { if (((old)->entry.rbe_parent )->entry.rbe_left == old) ((old)->entry.rbe_parent)-> entry.rbe_left = elm; else ((old)->entry.rbe_parent)->entry .rbe_right = elm; do {} while (0); } else (head)->rbh_root = elm; ((old)->entry.rbe_left)->entry.rbe_parent = elm ; if ((old)->entry.rbe_right) ((old)->entry.rbe_right)-> entry.rbe_parent = elm; if (parent) { left = parent; do { do { } while (0); } while ((left = (left)->entry.rbe_parent)); } goto color; } parent = (elm)->entry.rbe_parent; color = ( elm)->entry.rbe_color; if (child) (child)->entry.rbe_parent = parent; if (parent) { if ((parent)->entry.rbe_left == elm ) (parent)->entry.rbe_left = child; else (parent)->entry .rbe_right = child; do {} while (0); } else (head)->rbh_root = child; color: if (color == 0) peer_tree_RB_REMOVE_COLOR(head , parent, child); return (old); } struct rde_peer * peer_tree_RB_INSERT (struct peer_tree *head, struct rde_peer *elm) { struct rde_peer *tmp; struct rde_peer *parent = ((void *)0); int comp = 0; tmp = (head)->rbh_root; while (tmp) { parent = tmp; comp = (peer_cmp )(elm, parent); if (comp < 0) tmp = (tmp)->entry.rbe_left ; else if (comp > 0) tmp = (tmp)->entry.rbe_right; else return (tmp); } do { (elm)->entry.rbe_parent = parent; (elm )->entry.rbe_left = (elm)->entry.rbe_right = ((void *)0 ); (elm)->entry.rbe_color = 1; } while (0); if (parent != ( (void *)0)) { if (comp < 0) (parent)->entry.rbe_left = elm ; else (parent)->entry.rbe_right = elm; do {} while (0); } else (head)->rbh_root = elm; peer_tree_RB_INSERT_COLOR(head , elm); return (((void *)0)); } struct rde_peer * peer_tree_RB_FIND (struct peer_tree *head, struct rde_peer *elm) { struct rde_peer *tmp = (head)->rbh_root; int comp; while (tmp) { comp = peer_cmp (elm, tmp); if (comp < 0) tmp = (tmp)->entry.rbe_left; else if (comp > 0) tmp = (tmp)->entry.rbe_right; else return (tmp); } return (((void *)0)); } struct rde_peer * peer_tree_RB_NFIND (struct peer_tree *head, struct rde_peer *elm) { struct rde_peer *tmp = (head)->rbh_root; struct rde_peer *res = ((void *) 0); int comp; while (tmp) { comp = peer_cmp(elm, tmp); if (comp < 0) { res = tmp; tmp = (tmp)->entry.rbe_left; } else if (comp > 0) tmp = (tmp)->entry.rbe_right; else return ( tmp); } return (res); } struct rde_peer * peer_tree_RB_NEXT(struct rde_peer *elm) { if ((elm)->entry.rbe_right) { elm = (elm )->entry.rbe_right; while ((elm)->entry.rbe_left) elm = (elm)->entry.rbe_left; } else { if ((elm)->entry.rbe_parent && (elm == ((elm)->entry.rbe_parent)->entry.rbe_left )) elm = (elm)->entry.rbe_parent; else { while ((elm)-> entry.rbe_parent && (elm == ((elm)->entry.rbe_parent )->entry.rbe_right)) elm = (elm)->entry.rbe_parent; elm = (elm)->entry.rbe_parent; } } return (elm); } struct rde_peer * peer_tree_RB_PREV(struct rde_peer *elm) { if ((elm)->entry .rbe_left) { elm = (elm)->entry.rbe_left; while ((elm)-> entry.rbe_right) elm = (elm)->entry.rbe_right; } else { if ((elm)->entry.rbe_parent && (elm == ((elm)->entry .rbe_parent)->entry.rbe_right)) elm = (elm)->entry.rbe_parent ; else { while ((elm)->entry.rbe_parent && (elm == ((elm)->entry.rbe_parent)->entry.rbe_left)) elm = (elm )->entry.rbe_parent; elm = (elm)->entry.rbe_parent; } } return (elm); } struct rde_peer * peer_tree_RB_MINMAX(struct peer_tree *head, int val) { struct rde_peer *tmp = (head)-> rbh_root; struct rde_peer *parent = ((void *)0); while (tmp) { parent = tmp; if (val < 0) tmp = (tmp)->entry.rbe_left ; else tmp = (tmp)->entry.rbe_right; } return (parent); }; |
227 | |
228 | static void |
229 | peer_generate_update(struct rde_peer *peer, struct rib_entry *re, |
230 | struct prefix *newpath, struct prefix *oldpath, |
231 | enum eval_mode mode) |
232 | { |
233 | uint8_t aid; |
234 | |
235 | aid = re->prefix->aid; |
236 | |
237 | /* skip ourself */ |
238 | if (peer == peerself) |
239 | return; |
240 | if (peer->state != PEER_UP) |
241 | return; |
242 | /* skip peers using a different rib */ |
243 | if (peer->loc_rib_id != re->rib_id) |
244 | return; |
245 | /* check if peer actually supports the address family */ |
246 | if (peer->capa.mp[aid] == 0) |
247 | return; |
248 | /* skip peers with special export types */ |
249 | if (peer->export_type == EXPORT_NONE || |
250 | peer->export_type == EXPORT_DEFAULT_ROUTE) |
251 | return; |
252 | |
253 | /* if reconf skip peers which don't need to reconfigure */ |
254 | if (mode == EVAL_RECONF && peer->reconf_out == 0) |
255 | return; |
256 | |
257 | /* handle peers with add-path */ |
258 | if (peer_has_add_path(peer, aid, CAPA_AP_SEND0x02)) { |
259 | if (peer->eval.mode == ADDPATH_EVAL_ALL) |
260 | up_generate_addpath_all(peer, re, newpath, oldpath); |
261 | else |
262 | up_generate_addpath(peer, re); |
263 | return; |
264 | } |
265 | |
266 | /* skip regular peers if the best path didn't change */ |
267 | if (mode == EVAL_ALL && (peer->flags & PEERFLAG_EVALUATE_ALL0x04) == 0) |
268 | return; |
269 | up_generate_updates(peer, re); |
270 | } |
271 | |
272 | void |
273 | rde_generate_updates(struct rib_entry *re, struct prefix *newpath, |
274 | struct prefix *oldpath, enum eval_mode mode) |
275 | { |
276 | struct rde_peer *peer; |
277 | |
278 | RB_FOREACH(peer, peer_tree, &peertable)for ((peer) = peer_tree_RB_MINMAX(&peertable, -1); (peer) != ((void *)0); (peer) = peer_tree_RB_NEXT(peer)) |
279 | peer_generate_update(peer, re, newpath, oldpath, mode); |
280 | } |
281 | |
282 | /* |
283 | * Various RIB walker callbacks. |
284 | */ |
285 | static void |
286 | peer_adjout_clear_upcall(struct prefix *p, void *arg) |
287 | { |
288 | prefix_adjout_destroy(p); |
289 | } |
290 | |
291 | static void |
292 | peer_adjout_stale_upcall(struct prefix *p, void *arg) |
293 | { |
294 | if (p->flags & PREFIX_FLAG_DEAD0x04) { |
295 | return; |
296 | } else if (p->flags & PREFIX_FLAG_WITHDRAW0x01) { |
297 | /* no need to keep stale withdraws, they miss all attributes */ |
298 | prefix_adjout_destroy(p); |
299 | return; |
300 | } else if (p->flags & PREFIX_FLAG_UPDATE0x02) { |
301 | RB_REMOVE(prefix_tree, &prefix_peer(p)->updates[p->pt->aid], p)prefix_tree_RB_REMOVE(&prefix_peer(p)->updates[p->pt ->aid], p); |
302 | p->flags &= ~PREFIX_FLAG_UPDATE0x02; |
303 | } |
304 | p->flags |= PREFIX_FLAG_STALE0x08; |
305 | } |
306 | |
307 | struct peer_flush { |
308 | struct rde_peer *peer; |
309 | time_t staletime; |
310 | }; |
311 | |
312 | static void |
313 | peer_flush_upcall(struct rib_entry *re, void *arg) |
314 | { |
315 | struct rde_peer *peer = ((struct peer_flush *)arg)->peer; |
316 | struct rde_aspath *asp; |
317 | struct bgpd_addr addr; |
318 | struct prefix *p, *np, *rp; |
319 | time_t staletime = ((struct peer_flush *)arg)->staletime; |
320 | uint32_t i; |
321 | uint8_t prefixlen; |
322 | |
323 | pt_getaddr(re->prefix, &addr); |
324 | prefixlen = re->prefix->prefixlen; |
325 | TAILQ_FOREACH_SAFE(p, &re->prefix_h, entry.list.rib, np)for ((p) = ((&re->prefix_h)->tqh_first); (p) != ((void *)0) && ((np) = ((p)->entry.list.rib.tqe_next), 1 ); (p) = (np)) { |
326 | if (peer != prefix_peer(p)) |
327 | continue; |
328 | if (staletime && p->lastchange > staletime) |
329 | continue; |
330 | |
331 | for (i = RIB_LOC_START1; i < rib_size; i++) { |
332 | struct rib *rib = rib_byid(i); |
333 | if (rib == NULL((void *)0)) |
334 | continue; |
335 | rp = prefix_get(rib, peer, p->path_id, |
336 | &addr, prefixlen); |
337 | if (rp) { |
338 | asp = prefix_aspath(rp); |
339 | if (asp && asp->pftableid) |
340 | rde_pftable_del(asp->pftableid, rp); |
341 | |
342 | prefix_destroy(rp); |
343 | rde_update_log("flush", i, peer, NULL((void *)0), |
344 | &addr, prefixlen); |
345 | } |
346 | } |
347 | |
348 | prefix_destroy(p); |
349 | peer->stats.prefix_cnt--; |
350 | } |
351 | } |
352 | |
353 | static void |
354 | rde_up_adjout_force_upcall(struct prefix *p, void *ptr) |
355 | { |
356 | if (p->flags & PREFIX_FLAG_STALE0x08) { |
357 | /* remove stale entries */ |
358 | prefix_adjout_destroy(p); |
359 | } else if (p->flags & PREFIX_FLAG_DEAD0x04) { |
360 | /* ignore dead prefixes, they will go away soon */ |
361 | } else if ((p->flags & PREFIX_FLAG_MASK0x0f) == 0) { |
362 | /* put entries on the update queue if not allready on a queue */ |
363 | p->flags |= PREFIX_FLAG_UPDATE0x02; |
364 | if (RB_INSERT(prefix_tree, &prefix_peer(p)->updates[p->pt->aid],prefix_tree_RB_INSERT(&prefix_peer(p)->updates[p->pt ->aid], p) |
365 | p)prefix_tree_RB_INSERT(&prefix_peer(p)->updates[p->pt ->aid], p) != NULL((void *)0)) |
366 | fatalx("%s: RB tree invariant violated", __func__); |
367 | } |
368 | } |
369 | |
370 | static void |
371 | rde_up_adjout_force_done(void *ptr, uint8_t aid) |
372 | { |
373 | struct rde_peer *peer = ptr; |
374 | |
375 | /* Adj-RIB-Out ready, unthrottle peer and inject EOR */ |
376 | peer->throttled = 0; |
377 | if (peer->capa.grestart.restart) |
378 | prefix_add_eor(peer, aid); |
379 | } |
380 | |
381 | static void |
382 | rde_up_dump_upcall(struct rib_entry *re, void *ptr) |
383 | { |
384 | struct rde_peer *peer = ptr; |
385 | struct prefix *p; |
386 | |
387 | if ((p = prefix_best(re)) == NULL((void *)0)) |
Although the value stored to 'p' is used in the enclosing expression, the value is never actually read from 'p' | |
388 | /* no eligible prefix, not even for 'evaluate all' */ |
389 | return; |
390 | |
391 | peer_generate_update(peer, re, NULL((void *)0), NULL((void *)0), 0); |
392 | } |
393 | |
394 | static void |
395 | rde_up_dump_done(void *ptr, uint8_t aid) |
396 | { |
397 | struct rde_peer *peer = ptr; |
398 | |
399 | /* force out all updates of Adj-RIB-Out for this peer */ |
400 | if (prefix_dump_new(peer, aid, 0, peer, rde_up_adjout_force_upcall, |
401 | rde_up_adjout_force_done, NULL((void *)0)) == -1) |
402 | fatal("%s: prefix_dump_new", __func__); |
403 | } |
404 | |
405 | /* |
406 | * Session got established, bring peer up, load RIBs do initial table dump. |
407 | */ |
408 | void |
409 | peer_up(struct rde_peer *peer, struct session_up *sup) |
410 | { |
411 | uint8_t i; |
412 | |
413 | if (peer->state == PEER_ERR) { |
414 | /* |
415 | * There is a race condition when doing PEER_ERR -> PEER_DOWN. |
416 | * So just do a full reset of the peer here. |
417 | */ |
418 | rib_dump_terminate(peer); |
419 | peer_imsg_flush(peer); |
420 | if (prefix_dump_new(peer, AID_UNSPEC0, 0, NULL((void *)0), |
421 | peer_adjout_clear_upcall, NULL((void *)0), NULL((void *)0)) == -1) |
422 | fatal("%s: prefix_dump_new", __func__); |
423 | peer_flush(peer, AID_UNSPEC0, 0); |
424 | peer->stats.prefix_cnt = 0; |
425 | peer->stats.prefix_out_cnt = 0; |
426 | peer->state = PEER_DOWN; |
427 | } |
428 | peer->remote_bgpid = ntohl(sup->remote_bgpid)(__uint32_t)(__builtin_constant_p(sup->remote_bgpid) ? (__uint32_t )(((__uint32_t)(sup->remote_bgpid) & 0xff) << 24 | ((__uint32_t)(sup->remote_bgpid) & 0xff00) << 8 | ((__uint32_t)(sup->remote_bgpid) & 0xff0000) >> 8 | ((__uint32_t)(sup->remote_bgpid) & 0xff000000) >> 24) : __swap32md(sup->remote_bgpid)); |
429 | peer->short_as = sup->short_as; |
430 | peer->remote_addr = sup->remote_addr; |
431 | peer->local_v4_addr = sup->local_v4_addr; |
432 | peer->local_v6_addr = sup->local_v6_addr; |
433 | peer->local_if_scope = sup->if_scope; |
434 | memcpy(&peer->capa, &sup->capa, sizeof(peer->capa)); |
435 | |
436 | /* clear eor markers depending on GR flags */ |
437 | if (peer->capa.grestart.restart) { |
438 | peer->sent_eor = 0; |
439 | peer->recv_eor = 0; |
440 | } else { |
441 | /* no EOR expected */ |
442 | peer->sent_eor = ~0; |
443 | peer->recv_eor = ~0; |
444 | } |
445 | peer->state = PEER_UP; |
446 | |
447 | for (i = 0; i < AID_MAX7; i++) { |
448 | if (peer->capa.mp[i]) |
449 | peer_dump(peer, i); |
450 | } |
451 | } |
452 | |
453 | /* |
454 | * Session dropped and no graceful restart is done. Stop everything for |
455 | * this peer and clean up. |
456 | */ |
457 | void |
458 | peer_down(struct rde_peer *peer, void *bula) |
459 | { |
460 | peer->remote_bgpid = 0; |
461 | peer->state = PEER_DOWN; |
462 | /* |
463 | * stop all pending dumps which may depend on this peer |
464 | * and flush all pending imsg from the SE. |
465 | */ |
466 | rib_dump_terminate(peer); |
467 | peer_imsg_flush(peer); |
468 | |
469 | /* flush Adj-RIB-Out */ |
470 | if (prefix_dump_new(peer, AID_UNSPEC0, 0, NULL((void *)0), |
471 | peer_adjout_clear_upcall, NULL((void *)0), NULL((void *)0)) == -1) |
472 | fatal("%s: prefix_dump_new", __func__); |
473 | |
474 | /* flush Adj-RIB-In */ |
475 | peer_flush(peer, AID_UNSPEC0, 0); |
476 | peer->stats.prefix_cnt = 0; |
477 | peer->stats.prefix_out_cnt = 0; |
478 | |
479 | /* free filters */ |
480 | filterlist_free(peer->out_rules); |
481 | |
482 | RB_REMOVE(peer_tree, &peertable, peer)peer_tree_RB_REMOVE(&peertable, peer); |
483 | free(peer); |
484 | } |
485 | |
486 | /* |
487 | * Flush all routes older then staletime. If staletime is 0 all routes will |
488 | * be flushed. |
489 | */ |
490 | void |
491 | peer_flush(struct rde_peer *peer, uint8_t aid, time_t staletime) |
492 | { |
493 | struct peer_flush pf = { peer, staletime }; |
494 | |
495 | /* this dump must run synchronous, too much depends on that right now */ |
496 | if (rib_dump_new(RIB_ADJ_IN0, aid, 0, &pf, peer_flush_upcall, |
497 | NULL((void *)0), NULL((void *)0)) == -1) |
498 | fatal("%s: rib_dump_new", __func__); |
499 | |
500 | /* every route is gone so reset staletime */ |
501 | if (aid == AID_UNSPEC0) { |
502 | uint8_t i; |
503 | for (i = 0; i < AID_MAX7; i++) |
504 | peer->staletime[i] = 0; |
505 | } else { |
506 | peer->staletime[aid] = 0; |
507 | } |
508 | } |
509 | |
510 | /* |
511 | * During graceful restart mark a peer as stale if the session goes down. |
512 | * For the specified AID the Adj-RIB-Out is marked stale and the staletime |
513 | * is set to the current timestamp for identifying stale routes in Adj-RIB-In. |
514 | */ |
515 | void |
516 | peer_stale(struct rde_peer *peer, uint8_t aid, int flushall) |
517 | { |
518 | time_t now; |
519 | |
520 | /* flush the now even staler routes out */ |
521 | if (peer->staletime[aid]) |
522 | peer_flush(peer, aid, peer->staletime[aid]); |
523 | |
524 | peer->staletime[aid] = now = getmonotime(); |
525 | peer->state = PEER_DOWN; |
526 | |
527 | /* |
528 | * stop all pending dumps which may depend on this peer |
529 | * and flush all pending imsg from the SE. |
530 | */ |
531 | rib_dump_terminate(peer); |
532 | peer_imsg_flush(peer); |
533 | |
534 | if (flushall) |
535 | peer_flush(peer, aid, 0); |
536 | |
537 | /* XXX this is not quite correct */ |
538 | /* mark Adj-RIB-Out stale for this peer */ |
539 | if (prefix_dump_new(peer, aid, 0, NULL((void *)0), |
540 | peer_adjout_stale_upcall, NULL((void *)0), NULL((void *)0)) == -1) |
541 | fatal("%s: prefix_dump_new", __func__); |
542 | |
543 | /* make sure new prefixes start on a higher timestamp */ |
544 | while (now >= getmonotime()) |
545 | sleep(1); |
546 | } |
547 | |
548 | /* |
549 | * Load the Adj-RIB-Out of a peer normally called when a session is established. |
550 | * Once the Adj-RIB-Out is ready stale routes are removed from the Adj-RIB-Out |
551 | * and all routes are put on the update queue so they will be sent out. |
552 | */ |
553 | void |
554 | peer_dump(struct rde_peer *peer, uint8_t aid) |
555 | { |
556 | if (peer->capa.enhanced_rr && (peer->sent_eor & (1 << aid))) |
557 | rde_peer_send_rrefresh(peer, aid, ROUTE_REFRESH_BEGIN_RR1); |
558 | |
559 | if (peer->export_type == EXPORT_NONE) { |
560 | /* nothing to send apart from the marker */ |
561 | if (peer->capa.grestart.restart) |
562 | prefix_add_eor(peer, aid); |
563 | } else if (peer->export_type == EXPORT_DEFAULT_ROUTE) { |
564 | up_generate_default(peer, aid); |
565 | rde_up_dump_done(peer, aid); |
566 | } else if (aid == AID_FLOWSPECv45 || aid == AID_FLOWSPECv66) { |
567 | prefix_flowspec_dump(aid, peer, rde_up_dump_upcall, |
568 | rde_up_dump_done); |
569 | } else { |
570 | if (rib_dump_new(peer->loc_rib_id, aid, RDE_RUNNER_ROUNDS100, peer, |
571 | rde_up_dump_upcall, rde_up_dump_done, NULL((void *)0)) == -1) |
572 | fatal("%s: rib_dump_new", __func__); |
573 | /* throttle peer until dump is done */ |
574 | peer->throttled = 1; |
575 | } |
576 | } |
577 | |
578 | /* |
579 | * Start of an enhanced route refresh. Mark all routes as stale. |
580 | * Once the route refresh ends a End of Route Refresh message is sent |
581 | * which calls peer_flush() to remove all stale routes. |
582 | */ |
583 | void |
584 | peer_begin_rrefresh(struct rde_peer *peer, uint8_t aid) |
585 | { |
586 | time_t now; |
587 | |
588 | /* flush the now even staler routes out */ |
589 | if (peer->staletime[aid]) |
590 | peer_flush(peer, aid, peer->staletime[aid]); |
591 | |
592 | peer->staletime[aid] = now = getmonotime(); |
593 | |
594 | /* make sure new prefixes start on a higher timestamp */ |
595 | while (now >= getmonotime()) |
596 | sleep(1); |
597 | } |
598 | |
599 | /* |
600 | * move an imsg from src to dst, disconnecting any dynamic memory from src. |
601 | */ |
602 | static void |
603 | imsg_move(struct imsg *dst, struct imsg *src) |
604 | { |
605 | *dst = *src; |
606 | memset(src, 0, sizeof(*src)); |
607 | } |
608 | |
609 | /* |
610 | * push an imsg onto the peer imsg queue. |
611 | */ |
612 | void |
613 | peer_imsg_push(struct rde_peer *peer, struct imsg *imsg) |
614 | { |
615 | struct iq *iq; |
616 | |
617 | if ((iq = calloc(1, sizeof(*iq))) == NULL((void *)0)) |
618 | fatal(NULL((void *)0)); |
619 | imsg_move(&iq->imsg, imsg); |
620 | SIMPLEQ_INSERT_TAIL(&peer->imsg_queue, iq, entry)do { (iq)->entry.sqe_next = ((void *)0); *(&peer->imsg_queue )->sqh_last = (iq); (&peer->imsg_queue)->sqh_last = &(iq)->entry.sqe_next; } while (0); |
621 | imsg_pending++; |
622 | } |
623 | |
624 | /* |
625 | * pop first imsg from peer imsg queue and move it into imsg argument. |
626 | * Returns 1 if an element is returned else 0. |
627 | */ |
628 | int |
629 | peer_imsg_pop(struct rde_peer *peer, struct imsg *imsg) |
630 | { |
631 | struct iq *iq; |
632 | |
633 | iq = SIMPLEQ_FIRST(&peer->imsg_queue)((&peer->imsg_queue)->sqh_first); |
634 | if (iq == NULL((void *)0)) |
635 | return 0; |
636 | |
637 | imsg_move(imsg, &iq->imsg); |
638 | |
639 | SIMPLEQ_REMOVE_HEAD(&peer->imsg_queue, entry)do { if (((&peer->imsg_queue)->sqh_first = (&peer ->imsg_queue)->sqh_first->entry.sqe_next) == ((void * )0)) (&peer->imsg_queue)->sqh_last = &(&peer ->imsg_queue)->sqh_first; } while (0); |
640 | free(iq); |
641 | imsg_pending--; |
642 | |
643 | return 1; |
644 | } |
645 | |
646 | /* |
647 | * Check if any imsg are pending, return 0 if none are pending |
648 | */ |
649 | int |
650 | peer_imsg_pending(void) |
651 | { |
652 | return imsg_pending != 0; |
653 | } |
654 | |
655 | /* |
656 | * flush all imsg queued for a peer. |
657 | */ |
658 | void |
659 | peer_imsg_flush(struct rde_peer *peer) |
660 | { |
661 | struct iq *iq; |
662 | |
663 | while ((iq = SIMPLEQ_FIRST(&peer->imsg_queue)((&peer->imsg_queue)->sqh_first)) != NULL((void *)0)) { |
664 | SIMPLEQ_REMOVE_HEAD(&peer->imsg_queue, entry)do { if (((&peer->imsg_queue)->sqh_first = (&peer ->imsg_queue)->sqh_first->entry.sqe_next) == ((void * )0)) (&peer->imsg_queue)->sqh_last = &(&peer ->imsg_queue)->sqh_first; } while (0); |
665 | free(iq); |
666 | imsg_pending--; |
667 | } |
668 | } |