Bug Summary

File:netinet/ip_spd.c
Warning:line 767, column 7
Although the value stored to 'ipa' is used in the enclosing expression, the value is never actually read from 'ipa'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name ip_spd.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/netinet/ip_spd.c
1/* $OpenBSD: ip_spd.c,v 1.111 2022/01/04 06:32:39 yasuoka Exp $ */
2/*
3 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
4 *
5 * Copyright (c) 2000-2001 Angelos D. Keromytis.
6 *
7 * Permission to use, copy, and modify this software with or without fee
8 * is hereby granted, provided that this entire notice is included in
9 * all copies of any software which is or includes a copy or
10 * modification of this software.
11 * You may use this code under the GNU public license if you so wish. Please
12 * contribute changes back to the authors under this freer than GPL license
13 * so that we may further the use of strong encryption without limitations to
14 * all.
15 *
16 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
17 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
18 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
19 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
20 * PURPOSE.
21 */
22
23#include <sys/param.h>
24#include <sys/systm.h>
25#include <sys/mbuf.h>
26#include <sys/socket.h>
27#include <sys/kernel.h>
28#include <sys/socketvar.h>
29#include <sys/domain.h>
30#include <sys/protosw.h>
31#include <sys/pool.h>
32#include <sys/timeout.h>
33
34#include <net/route.h>
35#include <net/netisr.h>
36
37#include <netinet/in.h>
38#include <netinet/ip.h>
39#include <netinet/ip_var.h>
40#include <netinet/in_pcb.h>
41#include <netinet/ip_ipsp.h>
42#include <net/pfkeyv2.h>
43
44int ipsp_spd_inp(struct mbuf *, struct inpcb *, struct ipsec_policy *,
45 struct tdb **);
46int ipsp_acquire_sa(struct ipsec_policy *, union sockaddr_union *,
47 union sockaddr_union *, struct sockaddr_encap *, struct mbuf *);
48struct ipsec_acquire *ipsp_pending_acquire(struct ipsec_policy *,
49 union sockaddr_union *);
50void ipsp_delete_acquire_timo(void *);
51void ipsp_delete_acquire(struct ipsec_acquire *);
52
53struct pool ipsec_policy_pool;
54struct pool ipsec_acquire_pool;
55
56/*
57 * For tdb_walk() calling tdb_delete_locked() we need lock order
58 * tdb_sadb_mtx before ipo_tdb_mtx.
59 */
60struct mutex ipo_tdb_mtx = MUTEX_INITIALIZER(IPL_SOFTNET){ ((void *)0), ((((0x5)) > 0x0 && ((0x5)) < 0x9
) ? 0x9 : ((0x5))), 0x0 }
;
61
62/* Protected by the NET_LOCK(). */
63struct radix_node_head **spd_tables;
64unsigned int spd_table_max;
65TAILQ_HEAD(ipsec_acquire_head, ipsec_acquire)struct ipsec_acquire_head { struct ipsec_acquire *tqh_first; struct
ipsec_acquire **tqh_last; }
ipsec_acquire_head =
66 TAILQ_HEAD_INITIALIZER(ipsec_acquire_head){ ((void *)0), &(ipsec_acquire_head).tqh_first };
67
68struct radix_node_head *
69spd_table_get(unsigned int rtableid)
70{
71 unsigned int rdomain;
72
73 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
74
75 if (spd_tables == NULL((void *)0))
76 return (NULL((void *)0));
77
78 rdomain = rtable_l2(rtableid);
79 if (rdomain > spd_table_max)
80 return (NULL((void *)0));
81
82 return (spd_tables[rdomain]);
83}
84
85struct radix_node_head *
86spd_table_add(unsigned int rtableid)
87{
88 struct radix_node_head *rnh = NULL((void *)0);
89 unsigned int rdomain;
90 void *p;
91
92 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
93
94 rdomain = rtable_l2(rtableid);
95 if (spd_tables == NULL((void *)0) || rdomain > spd_table_max) {
96 if ((p = mallocarray(rdomain + 1, sizeof(*rnh),
97 M_RTABLE5, M_NOWAIT0x0002|M_ZERO0x0008)) == NULL((void *)0))
98 return (NULL((void *)0));
99
100 if (spd_tables != NULL((void *)0)) {
101 memcpy(p, spd_tables, sizeof(*rnh) * (spd_table_max+1))__builtin_memcpy((p), (spd_tables), (sizeof(*rnh) * (spd_table_max
+1)))
;
102 free(spd_tables, M_RTABLE5,
103 sizeof(*rnh) * (spd_table_max+1));
104 }
105 spd_tables = p;
106 spd_table_max = rdomain;
107 }
108
109 if (spd_tables[rdomain] == NULL((void *)0)) {
110 if (rn_inithead((void **)&rnh,
111 offsetof(struct sockaddr_encap, sen_type)__builtin_offsetof(struct sockaddr_encap, sen_type)) == 0)
112 rnh = NULL((void *)0);
113 spd_tables[rdomain] = rnh;
114 }
115
116 return (spd_tables[rdomain]);
117}
118
119int
120spd_table_walk(unsigned int rtableid,
121 int (*func)(struct ipsec_policy *, void *, unsigned int), void *arg)
122{
123 struct radix_node_head *rnh;
124 int (*walker)(struct radix_node *, void *, u_int) = (void *)func;
125 int error;
126
127 rnh = spd_table_get(rtableid);
128 if (rnh == NULL((void *)0))
129 return (0);
130
131 /* EGAIN means the tree changed. */
132 while ((error = rn_walktree(rnh, walker, arg)) == EAGAIN35)
133 continue;
134
135 return (error);
136}
137
138/*
139 * Lookup at the SPD based on the headers contained on the mbuf. The second
140 * argument indicates what protocol family the header at the beginning of
141 * the mbuf is. hlen is the offset of the transport protocol header
142 * in the mbuf.
143 *
144 * Return combinations (of return value and *tdbout):
145 * - -EINVAL -> silently drop the packet
146 * - errno -> drop packet and return error
147 * - 0/NULL -> no IPsec required on packet
148 * - 0/TDB -> do IPsec
149 *
150 * In the case of incoming flows, only the first three combinations are
151 * returned.
152 */
153int
154ipsp_spd_lookup(struct mbuf *m, int af, int hlen, int direction,
155 struct tdb *tdbp, struct inpcb *inp, struct tdb **tdbout,
156 struct ipsec_ids *ipsecflowinfo_ids)
157{
158 struct radix_node_head *rnh;
159 struct radix_node *rn;
160 union sockaddr_union sdst, ssrc;
161 struct sockaddr_encap *ddst, dst;
162 struct ipsec_policy *ipo;
163 struct ipsec_ids *ids = NULL((void *)0);
164 int error, signore = 0, dignore = 0;
165 u_int rdomain = rtable_l2(m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid);
166
167 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
168
169 /*
170 * If there are no flows in place, there's no point
171 * continuing with the SPD lookup.
172 */
173 if (!ipsec_in_use && inp == NULL((void *)0)) {
174 if (tdbout != NULL((void *)0))
175 *tdbout = NULL((void *)0);
176 return 0;
177 }
178
179 /*
180 * If an input packet is destined to a BYPASS socket, just accept it.
181 */
182 if ((inp != NULL((void *)0)) && (direction == IPSP_DIRECTION_IN0x1) &&
183 (inp->inp_seclevel[SL_ESP_TRANS1] == IPSEC_LEVEL_BYPASS0x00) &&
184 (inp->inp_seclevel[SL_ESP_NETWORK2] == IPSEC_LEVEL_BYPASS0x00) &&
185 (inp->inp_seclevel[SL_AUTH0] == IPSEC_LEVEL_BYPASS0x00)) {
186 if (tdbout != NULL((void *)0))
187 *tdbout = NULL((void *)0);
188 return 0;
189 }
190
191 memset(&dst, 0, sizeof(dst))__builtin_memset((&dst), (0), (sizeof(dst)));
192 memset(&sdst, 0, sizeof(union sockaddr_union))__builtin_memset((&sdst), (0), (sizeof(union sockaddr_union
)))
;
193 memset(&ssrc, 0, sizeof(union sockaddr_union))__builtin_memset((&ssrc), (0), (sizeof(union sockaddr_union
)))
;
194 ddst = (struct sockaddr_encap *)&dst;
195 ddst->sen_family = PF_KEY30;
196 ddst->sen_len = SENT_LENsizeof(struct sockaddr_encap);
197
198 switch (af) {
199 case AF_INET2:
200 if (hlen < sizeof (struct ip) || m->m_pkthdrM_dat.MH.MH_pkthdr.len < hlen)
201 return EINVAL22;
202
203 ddst->sen_directionSen.Sip4.Direction = direction;
204 ddst->sen_type = SENT_IP40x0001;
205
206 m_copydata(m, offsetof(struct ip, ip_src)__builtin_offsetof(struct ip, ip_src),
207 sizeof(struct in_addr), (caddr_t) &(ddst->sen_ip_srcSen.Sip4.Src));
208 m_copydata(m, offsetof(struct ip, ip_dst)__builtin_offsetof(struct ip, ip_dst),
209 sizeof(struct in_addr), (caddr_t) &(ddst->sen_ip_dstSen.Sip4.Dst));
210 m_copydata(m, offsetof(struct ip, ip_p)__builtin_offsetof(struct ip, ip_p), sizeof(u_int8_t),
211 (caddr_t) &(ddst->sen_protoSen.Sip4.Proto));
212
213 sdst.sin.sin_family = ssrc.sin.sin_family = AF_INET2;
214 sdst.sin.sin_len = ssrc.sin.sin_len =
215 sizeof(struct sockaddr_in);
216 ssrc.sin.sin_addr = ddst->sen_ip_srcSen.Sip4.Src;
217 sdst.sin.sin_addr = ddst->sen_ip_dstSen.Sip4.Dst;
218
219 /*
220 * If TCP/UDP, extract the port numbers to use in the lookup.
221 */
222 switch (ddst->sen_protoSen.Sip4.Proto) {
223 case IPPROTO_UDP17:
224 case IPPROTO_TCP6:
225 /* Make sure there's enough data in the packet. */
226 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < hlen + 2 * sizeof(u_int16_t))
227 return EINVAL22;
228
229 /*
230 * Luckily, the offset of the src/dst ports in
231 * both the UDP and TCP headers is the same (first
232 * two 16-bit values in the respective headers),
233 * so we can just copy them.
234 */
235 m_copydata(m, hlen, sizeof(u_int16_t),
236 (caddr_t) &(ddst->sen_sportSen.Sip4.Sport));
237 m_copydata(m, hlen + sizeof(u_int16_t), sizeof(u_int16_t),
238 (caddr_t) &(ddst->sen_dportSen.Sip4.Dport));
239 break;
240
241 default:
242 ddst->sen_sportSen.Sip4.Sport = 0;
243 ddst->sen_dportSen.Sip4.Dport = 0;
244 }
245
246 break;
247
248#ifdef INET61
249 case AF_INET624:
250 if (hlen < sizeof (struct ip6_hdr) || m->m_pkthdrM_dat.MH.MH_pkthdr.len < hlen)
251 return EINVAL22;
252
253 ddst->sen_type = SENT_IP60x0002;
254 ddst->sen_ip6_directionSen.Sip6.Direction = direction;
255
256 m_copydata(m, offsetof(struct ip6_hdr, ip6_src)__builtin_offsetof(struct ip6_hdr, ip6_src),
257 sizeof(struct in6_addr),
258 (caddr_t) &(ddst->sen_ip6_srcSen.Sip6.Src));
259 m_copydata(m, offsetof(struct ip6_hdr, ip6_dst)__builtin_offsetof(struct ip6_hdr, ip6_dst),
260 sizeof(struct in6_addr),
261 (caddr_t) &(ddst->sen_ip6_dstSen.Sip6.Dst));
262 m_copydata(m, offsetof(struct ip6_hdr, ip6_nxt)__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_nxt
)
,
263 sizeof(u_int8_t),
264 (caddr_t) &(ddst->sen_ip6_protoSen.Sip6.Proto));
265
266 sdst.sin6.sin6_family = ssrc.sin6.sin6_family = AF_INET624;
267 sdst.sin6.sin6_len = ssrc.sin6.sin6_len =
268 sizeof(struct sockaddr_in6);
269 in6_recoverscope(&ssrc.sin6, &ddst->sen_ip6_srcSen.Sip6.Src);
270 in6_recoverscope(&sdst.sin6, &ddst->sen_ip6_dstSen.Sip6.Dst);
271
272 /*
273 * If TCP/UDP, extract the port numbers to use in the lookup.
274 */
275 switch (ddst->sen_ip6_protoSen.Sip6.Proto) {
276 case IPPROTO_UDP17:
277 case IPPROTO_TCP6:
278 /* Make sure there's enough data in the packet. */
279 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < hlen + 2 * sizeof(u_int16_t))
280 return EINVAL22;
281
282 /*
283 * Luckily, the offset of the src/dst ports in
284 * both the UDP and TCP headers is the same
285 * (first two 16-bit values in the respective
286 * headers), so we can just copy them.
287 */
288 m_copydata(m, hlen, sizeof(u_int16_t),
289 (caddr_t) &(ddst->sen_ip6_sportSen.Sip6.Sport));
290 m_copydata(m, hlen + sizeof(u_int16_t), sizeof(u_int16_t),
291 (caddr_t) &(ddst->sen_ip6_dportSen.Sip6.Dport));
292 break;
293
294 default:
295 ddst->sen_ip6_sportSen.Sip6.Sport = 0;
296 ddst->sen_ip6_dportSen.Sip6.Dport = 0;
297 }
298
299 break;
300#endif /* INET6 */
301
302 default:
303 return EAFNOSUPPORT47;
304 }
305
306 /* Actual SPD lookup. */
307 if ((rnh = spd_table_get(rdomain)) == NULL((void *)0) ||
308 (rn = rn_match((caddr_t)&dst, rnh)) == NULL((void *)0)) {
309 /*
310 * Return whatever the socket requirements are, there are no
311 * system-wide policies.
312 */
313 return ipsp_spd_inp(m, inp, NULL((void *)0), tdbout);
314 }
315 ipo = (struct ipsec_policy *)rn;
316
317 switch (ipo->ipo_type) {
318 case IPSP_PERMIT3:
319 return ipsp_spd_inp(m, inp, ipo, tdbout);
320
321 case IPSP_DENY4:
322 return EHOSTUNREACH65;
323
324 case IPSP_IPSEC_USE0:
325 case IPSP_IPSEC_ACQUIRE1:
326 case IPSP_IPSEC_REQUIRE2:
327 case IPSP_IPSEC_DONTACQ5:
328 /* Nothing more needed here. */
329 break;
330
331 default:
332 return EINVAL22;
333 }
334
335 /* Check for non-specific destination in the policy. */
336 switch (ipo->ipo_dst.sa.sa_family) {
337 case AF_INET2:
338 if ((ipo->ipo_dst.sin.sin_addr.s_addr == INADDR_ANY((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000000
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000000)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000000)
) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000000
))))
) ||
339 (ipo->ipo_dst.sin.sin_addr.s_addr == INADDR_BROADCAST((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0xffffffff
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0xffffffff)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0xffffffff)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0xffffffff)) &
0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0xffffffff)
) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0xffffffff
))))
))
340 dignore = 1;
341 break;
342
343#ifdef INET61
344 case AF_INET624:
345 if ((IN6_IS_ADDR_UNSPECIFIED(&ipo->ipo_dst.sin6.sin6_addr)((*(const u_int32_t *)(const void *)(&(&ipo->ipo_dst
.sin6.sin6_addr)->__u6_addr.__u6_addr8[0]) == 0) &&
(*(const u_int32_t *)(const void *)(&(&ipo->ipo_dst
.sin6.sin6_addr)->__u6_addr.__u6_addr8[4]) == 0) &&
(*(const u_int32_t *)(const void *)(&(&ipo->ipo_dst
.sin6.sin6_addr)->__u6_addr.__u6_addr8[8]) == 0) &&
(*(const u_int32_t *)(const void *)(&(&ipo->ipo_dst
.sin6.sin6_addr)->__u6_addr.__u6_addr8[12]) == 0))
) ||
346 (memcmp(&ipo->ipo_dst.sin6.sin6_addr, &in6mask128,__builtin_memcmp((&ipo->ipo_dst.sin6.sin6_addr), (&
in6mask128), (sizeof(in6mask128)))
347 sizeof(in6mask128))__builtin_memcmp((&ipo->ipo_dst.sin6.sin6_addr), (&
in6mask128), (sizeof(in6mask128)))
== 0))
348 dignore = 1;
349 break;
350#endif /* INET6 */
351 }
352
353 /* Likewise for source. */
354 switch (ipo->ipo_src.sa.sa_family) {
355 case AF_INET2:
356 if (ipo->ipo_src.sin.sin_addr.s_addr == INADDR_ANY((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000000
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000000)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000000)
) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000000
))))
)
357 signore = 1;
358 break;
359
360#ifdef INET61
361 case AF_INET624:
362 if (IN6_IS_ADDR_UNSPECIFIED(&ipo->ipo_src.sin6.sin6_addr)((*(const u_int32_t *)(const void *)(&(&ipo->ipo_src
.sin6.sin6_addr)->__u6_addr.__u6_addr8[0]) == 0) &&
(*(const u_int32_t *)(const void *)(&(&ipo->ipo_src
.sin6.sin6_addr)->__u6_addr.__u6_addr8[4]) == 0) &&
(*(const u_int32_t *)(const void *)(&(&ipo->ipo_src
.sin6.sin6_addr)->__u6_addr.__u6_addr8[8]) == 0) &&
(*(const u_int32_t *)(const void *)(&(&ipo->ipo_src
.sin6.sin6_addr)->__u6_addr.__u6_addr8[12]) == 0))
)
363 signore = 1;
364 break;
365#endif /* INET6 */
366 }
367
368 /* Do we have a cached entry ? If so, check if it's still valid. */
369 mtx_enter(&ipo_tdb_mtx);
370 if (ipo->ipo_tdb != NULL((void *)0) &&
371 (ipo->ipo_tdb->tdb_flags & TDBF_INVALID0x00010)) {
372 TAILQ_REMOVE(&ipo->ipo_tdb->tdb_policy_head, ipo,do { if (((ipo)->ipo_tdb_next.tqe_next) != ((void *)0)) (ipo
)->ipo_tdb_next.tqe_next->ipo_tdb_next.tqe_prev = (ipo)
->ipo_tdb_next.tqe_prev; else (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last = (ipo)->ipo_tdb_next.tqe_prev; *(ipo)->
ipo_tdb_next.tqe_prev = (ipo)->ipo_tdb_next.tqe_next; ((ipo
)->ipo_tdb_next.tqe_prev) = ((void *)-1); ((ipo)->ipo_tdb_next
.tqe_next) = ((void *)-1); } while (0)
373 ipo_tdb_next)do { if (((ipo)->ipo_tdb_next.tqe_next) != ((void *)0)) (ipo
)->ipo_tdb_next.tqe_next->ipo_tdb_next.tqe_prev = (ipo)
->ipo_tdb_next.tqe_prev; else (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last = (ipo)->ipo_tdb_next.tqe_prev; *(ipo)->
ipo_tdb_next.tqe_prev = (ipo)->ipo_tdb_next.tqe_next; ((ipo
)->ipo_tdb_next.tqe_prev) = ((void *)-1); ((ipo)->ipo_tdb_next
.tqe_next) = ((void *)-1); } while (0)
;
374 tdb_unref(ipo->ipo_tdb);
375 ipo->ipo_tdb = NULL((void *)0);
376 }
377 mtx_leave(&ipo_tdb_mtx);
378
379 /* Outgoing packet policy check. */
380 if (direction == IPSP_DIRECTION_OUT0x2) {
381 /*
382 * If the packet is destined for the policy-specified
383 * gateway/endhost, and the socket has the BYPASS
384 * option set, skip IPsec processing.
385 */
386 if ((inp != NULL((void *)0)) &&
387 (inp->inp_seclevel[SL_ESP_TRANS1] == IPSEC_LEVEL_BYPASS0x00) &&
388 (inp->inp_seclevel[SL_ESP_NETWORK2] ==
389 IPSEC_LEVEL_BYPASS0x00) &&
390 (inp->inp_seclevel[SL_AUTH0] == IPSEC_LEVEL_BYPASS0x00)) {
391 /* Direct match. */
392 if (dignore ||
393 !memcmp(&sdst, &ipo->ipo_dst, sdst.sa.sa_len)__builtin_memcmp((&sdst), (&ipo->ipo_dst), (sdst.sa
.sa_len))
) {
394 if (tdbout != NULL((void *)0))
395 *tdbout = NULL((void *)0);
396 return 0;
397 }
398 }
399
400 /* Check that the cached TDB (if present), is appropriate. */
401 mtx_enter(&ipo_tdb_mtx);
402 if (ipo->ipo_tdb != NULL((void *)0)) {
403 if ((ipo->ipo_last_searched <= ipsec_last_added) ||
404 (ipo->ipo_sproto != ipo->ipo_tdb->tdb_sproto) ||
405 memcmp(dignore ? &sdst : &ipo->ipo_dst,__builtin_memcmp((dignore ? &sdst : &ipo->ipo_dst)
, (&ipo->ipo_tdb->tdb_dst), (ipo->ipo_tdb->tdb_dst
.sa.sa_len))
406 &ipo->ipo_tdb->tdb_dst,__builtin_memcmp((dignore ? &sdst : &ipo->ipo_dst)
, (&ipo->ipo_tdb->tdb_dst), (ipo->ipo_tdb->tdb_dst
.sa.sa_len))
407 ipo->ipo_tdb->tdb_dst.sa.sa_len)__builtin_memcmp((dignore ? &sdst : &ipo->ipo_dst)
, (&ipo->ipo_tdb->tdb_dst), (ipo->ipo_tdb->tdb_dst
.sa.sa_len))
)
408 goto nomatchout;
409
410 if (!ipsp_aux_match(ipo->ipo_tdb,
411 ipsecflowinfo_ids? ipsecflowinfo_ids: ipo->ipo_ids,
412 &ipo->ipo_addr, &ipo->ipo_mask))
413 goto nomatchout;
414
415 /* Cached entry is good. */
416 error = ipsp_spd_inp(m, inp, ipo, tdbout);
417 mtx_leave(&ipo_tdb_mtx);
418 return error;
419
420 nomatchout:
421 /* Cached TDB was not good. */
422 TAILQ_REMOVE(&ipo->ipo_tdb->tdb_policy_head, ipo,do { if (((ipo)->ipo_tdb_next.tqe_next) != ((void *)0)) (ipo
)->ipo_tdb_next.tqe_next->ipo_tdb_next.tqe_prev = (ipo)
->ipo_tdb_next.tqe_prev; else (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last = (ipo)->ipo_tdb_next.tqe_prev; *(ipo)->
ipo_tdb_next.tqe_prev = (ipo)->ipo_tdb_next.tqe_next; ((ipo
)->ipo_tdb_next.tqe_prev) = ((void *)-1); ((ipo)->ipo_tdb_next
.tqe_next) = ((void *)-1); } while (0)
423 ipo_tdb_next)do { if (((ipo)->ipo_tdb_next.tqe_next) != ((void *)0)) (ipo
)->ipo_tdb_next.tqe_next->ipo_tdb_next.tqe_prev = (ipo)
->ipo_tdb_next.tqe_prev; else (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last = (ipo)->ipo_tdb_next.tqe_prev; *(ipo)->
ipo_tdb_next.tqe_prev = (ipo)->ipo_tdb_next.tqe_next; ((ipo
)->ipo_tdb_next.tqe_prev) = ((void *)-1); ((ipo)->ipo_tdb_next
.tqe_next) = ((void *)-1); } while (0)
;
424 tdb_unref(ipo->ipo_tdb);
425 ipo->ipo_tdb = NULL((void *)0);
426 ipo->ipo_last_searched = 0;
427 }
428
429 /*
430 * If no SA has been added since the last time we did a
431 * lookup, there's no point searching for one. However, if the
432 * destination gateway is left unspecified (or is all-1's),
433 * always lookup since this is a generic-match rule
434 * (otherwise, we can have situations where SAs to some
435 * destinations exist but are not used, possibly leading to an
436 * explosion in the number of acquired SAs).
437 */
438 if (ipo->ipo_last_searched <= ipsec_last_added) {
439 struct tdb *tdbp_new;
440
441 /* "Touch" the entry. */
442 if (dignore == 0)
443 ipo->ipo_last_searched = getuptime();
444
445 /* gettdb() takes tdb_sadb_mtx, preserve lock order */
446 mtx_leave(&ipo_tdb_mtx);
447 /* Find an appropriate SA from the existing ones. */
448 tdbp_new = gettdbbydst(rdomain,
449 dignore ? &sdst : &ipo->ipo_dst,
450 ipo->ipo_sproto,
451 ipsecflowinfo_ids? ipsecflowinfo_ids: ipo->ipo_ids,
452 &ipo->ipo_addr, &ipo->ipo_mask);
453 ids = NULL((void *)0);
454 mtx_enter(&ipo_tdb_mtx);
455 if ((tdbp_new != NULL((void *)0)) &&
456 (tdbp_new->tdb_flags & TDBF_DELETED0x00040)) {
457 /*
458 * After tdb_delete() has released ipo_tdb_mtx
459 * in tdb_unlink(), never add a new one.
460 * tdb_cleanspd() has to catch all of them.
461 */
462 tdb_unref(tdbp_new);
463 tdbp_new = NULL((void *)0);
464 }
465 if (ipo->ipo_tdb != NULL((void *)0)) {
466 /* Remove cached TDB from parallel thread. */
467 TAILQ_REMOVE(&ipo->ipo_tdb->tdb_policy_head,do { if (((ipo)->ipo_tdb_next.tqe_next) != ((void *)0)) (ipo
)->ipo_tdb_next.tqe_next->ipo_tdb_next.tqe_prev = (ipo)
->ipo_tdb_next.tqe_prev; else (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last = (ipo)->ipo_tdb_next.tqe_prev; *(ipo)->
ipo_tdb_next.tqe_prev = (ipo)->ipo_tdb_next.tqe_next; ((ipo
)->ipo_tdb_next.tqe_prev) = ((void *)-1); ((ipo)->ipo_tdb_next
.tqe_next) = ((void *)-1); } while (0)
468 ipo, ipo_tdb_next)do { if (((ipo)->ipo_tdb_next.tqe_next) != ((void *)0)) (ipo
)->ipo_tdb_next.tqe_next->ipo_tdb_next.tqe_prev = (ipo)
->ipo_tdb_next.tqe_prev; else (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last = (ipo)->ipo_tdb_next.tqe_prev; *(ipo)->
ipo_tdb_next.tqe_prev = (ipo)->ipo_tdb_next.tqe_next; ((ipo
)->ipo_tdb_next.tqe_prev) = ((void *)-1); ((ipo)->ipo_tdb_next
.tqe_next) = ((void *)-1); } while (0)
;
469 tdb_unref(ipo->ipo_tdb);
470 }
471 ipo->ipo_tdb = tdbp_new;
472 if (ipo->ipo_tdb != NULL((void *)0)) {
473 /* gettdbbydst() has already refcounted tdb */
474 TAILQ_INSERT_TAIL(do { (ipo)->ipo_tdb_next.tqe_next = ((void *)0); (ipo)->
ipo_tdb_next.tqe_prev = (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last; *(&ipo->ipo_tdb->tdb_policy_head)->
tqh_last = (ipo); (&ipo->ipo_tdb->tdb_policy_head)->
tqh_last = &(ipo)->ipo_tdb_next.tqe_next; } while (0)
475 &ipo->ipo_tdb->tdb_policy_head,do { (ipo)->ipo_tdb_next.tqe_next = ((void *)0); (ipo)->
ipo_tdb_next.tqe_prev = (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last; *(&ipo->ipo_tdb->tdb_policy_head)->
tqh_last = (ipo); (&ipo->ipo_tdb->tdb_policy_head)->
tqh_last = &(ipo)->ipo_tdb_next.tqe_next; } while (0)
476 ipo, ipo_tdb_next)do { (ipo)->ipo_tdb_next.tqe_next = ((void *)0); (ipo)->
ipo_tdb_next.tqe_prev = (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last; *(&ipo->ipo_tdb->tdb_policy_head)->
tqh_last = (ipo); (&ipo->ipo_tdb->tdb_policy_head)->
tqh_last = &(ipo)->ipo_tdb_next.tqe_next; } while (0)
;
477 error = ipsp_spd_inp(m, inp, ipo, tdbout);
478 mtx_leave(&ipo_tdb_mtx);
479 return error;
480 }
481 }
482 mtx_leave(&ipo_tdb_mtx);
483
484 /* So, we don't have an SA -- just a policy. */
485 switch (ipo->ipo_type) {
486 case IPSP_IPSEC_REQUIRE2:
487 /* Acquire SA through key management. */
488 if (ipsp_acquire_sa(ipo,
489 dignore ? &sdst : &ipo->ipo_dst,
490 signore ? NULL((void *)0) : &ipo->ipo_src, ddst, m) != 0) {
491 return EACCES13;
492 }
493
494 /* FALLTHROUGH */
495 case IPSP_IPSEC_DONTACQ5:
496 return -EINVAL22; /* Silently drop packet. */
497
498 case IPSP_IPSEC_ACQUIRE1:
499 /* Acquire SA through key management. */
500 ipsp_acquire_sa(ipo, dignore ? &sdst : &ipo->ipo_dst,
501 signore ? NULL((void *)0) : &ipo->ipo_src, ddst, NULL((void *)0));
502
503 /* FALLTHROUGH */
504 case IPSP_IPSEC_USE0:
505 return ipsp_spd_inp(m, inp, ipo, tdbout);
506 }
507 } else { /* IPSP_DIRECTION_IN */
508 if (tdbp != NULL((void *)0)) {
509 /*
510 * Special case for bundled IPcomp/ESP SAs:
511 * 1) only IPcomp flows are loaded into kernel
512 * 2) input processing processes ESP SA first
513 * 3) then optional IPcomp processing happens
514 * 4) we only update m_tag for ESP
515 * => 'tdbp' is always set to ESP SA
516 * => flow has ipo_proto for IPcomp
517 * So if 'tdbp' points to an ESP SA and this 'tdbp' is
518 * bundled with an IPcomp SA, then we replace 'tdbp'
519 * with the IPcomp SA at tdbp->tdb_inext.
520 */
521 if (ipo->ipo_sproto == IPPROTO_IPCOMP108 &&
522 tdbp->tdb_sproto == IPPROTO_ESP50 &&
523 tdbp->tdb_inext != NULL((void *)0) &&
524 tdbp->tdb_inext->tdb_sproto == IPPROTO_IPCOMP108)
525 tdbp = tdbp->tdb_inext;
526
527 /* Direct match in the cache. */
528 mtx_enter(&ipo_tdb_mtx);
529 if (ipo->ipo_tdb == tdbp) {
530 error = ipsp_spd_inp(m, inp, ipo, tdbout);
531 mtx_leave(&ipo_tdb_mtx);
532 return error;
533 }
534 mtx_leave(&ipo_tdb_mtx);
535
536 if (memcmp(dignore ? &ssrc : &ipo->ipo_dst,__builtin_memcmp((dignore ? &ssrc : &ipo->ipo_dst)
, (&tdbp->tdb_src), (tdbp->tdb_src.sa.sa_len))
537 &tdbp->tdb_src, tdbp->tdb_src.sa.sa_len)__builtin_memcmp((dignore ? &ssrc : &ipo->ipo_dst)
, (&tdbp->tdb_src), (tdbp->tdb_src.sa.sa_len))
||
538 (ipo->ipo_sproto != tdbp->tdb_sproto))
539 goto nomatchin;
540
541 /* Match source/dest IDs. */
542 if (ipo->ipo_ids)
543 if (tdbp->tdb_ids == NULL((void *)0) ||
544 !ipsp_ids_match(ipo->ipo_ids, tdbp->tdb_ids))
545 goto nomatchin;
546
547 /* Add it to the cache. */
548 mtx_enter(&ipo_tdb_mtx);
549 if (ipo->ipo_tdb != NULL((void *)0)) {
550 TAILQ_REMOVE(&ipo->ipo_tdb->tdb_policy_head,do { if (((ipo)->ipo_tdb_next.tqe_next) != ((void *)0)) (ipo
)->ipo_tdb_next.tqe_next->ipo_tdb_next.tqe_prev = (ipo)
->ipo_tdb_next.tqe_prev; else (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last = (ipo)->ipo_tdb_next.tqe_prev; *(ipo)->
ipo_tdb_next.tqe_prev = (ipo)->ipo_tdb_next.tqe_next; ((ipo
)->ipo_tdb_next.tqe_prev) = ((void *)-1); ((ipo)->ipo_tdb_next
.tqe_next) = ((void *)-1); } while (0)
551 ipo, ipo_tdb_next)do { if (((ipo)->ipo_tdb_next.tqe_next) != ((void *)0)) (ipo
)->ipo_tdb_next.tqe_next->ipo_tdb_next.tqe_prev = (ipo)
->ipo_tdb_next.tqe_prev; else (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last = (ipo)->ipo_tdb_next.tqe_prev; *(ipo)->
ipo_tdb_next.tqe_prev = (ipo)->ipo_tdb_next.tqe_next; ((ipo
)->ipo_tdb_next.tqe_prev) = ((void *)-1); ((ipo)->ipo_tdb_next
.tqe_next) = ((void *)-1); } while (0)
;
552 tdb_unref(ipo->ipo_tdb);
553 }
554 ipo->ipo_tdb = tdb_ref(tdbp);
555 TAILQ_INSERT_TAIL(&tdbp->tdb_policy_head, ipo,do { (ipo)->ipo_tdb_next.tqe_next = ((void *)0); (ipo)->
ipo_tdb_next.tqe_prev = (&tdbp->tdb_policy_head)->tqh_last
; *(&tdbp->tdb_policy_head)->tqh_last = (ipo); (&
tdbp->tdb_policy_head)->tqh_last = &(ipo)->ipo_tdb_next
.tqe_next; } while (0)
556 ipo_tdb_next)do { (ipo)->ipo_tdb_next.tqe_next = ((void *)0); (ipo)->
ipo_tdb_next.tqe_prev = (&tdbp->tdb_policy_head)->tqh_last
; *(&tdbp->tdb_policy_head)->tqh_last = (ipo); (&
tdbp->tdb_policy_head)->tqh_last = &(ipo)->ipo_tdb_next
.tqe_next; } while (0)
;
557 error = ipsp_spd_inp(m, inp, ipo, tdbout);
558 mtx_leave(&ipo_tdb_mtx);
559 return error;
560
561 nomatchin: /* Nothing needed here, falling through */
562 ;
563 }
564
565 /* Check whether cached entry applies. */
566 mtx_enter(&ipo_tdb_mtx);
567 if (ipo->ipo_tdb != NULL((void *)0)) {
568 /*
569 * We only need to check that the correct
570 * security protocol and security gateway are
571 * set; IDs will be the same since the cached
572 * entry is linked on this policy.
573 */
574 if (ipo->ipo_sproto == ipo->ipo_tdb->tdb_sproto &&
575 !memcmp(&ipo->ipo_tdb->tdb_src,__builtin_memcmp((&ipo->ipo_tdb->tdb_src), (dignore
? &ssrc : &ipo->ipo_dst), (ipo->ipo_tdb->tdb_src
.sa.sa_len))
576 dignore ? &ssrc : &ipo->ipo_dst,__builtin_memcmp((&ipo->ipo_tdb->tdb_src), (dignore
? &ssrc : &ipo->ipo_dst), (ipo->ipo_tdb->tdb_src
.sa.sa_len))
577 ipo->ipo_tdb->tdb_src.sa.sa_len)__builtin_memcmp((&ipo->ipo_tdb->tdb_src), (dignore
? &ssrc : &ipo->ipo_dst), (ipo->ipo_tdb->tdb_src
.sa.sa_len))
)
578 goto skipinputsearch;
579
580 /* Not applicable, unlink. */
581 TAILQ_REMOVE(&ipo->ipo_tdb->tdb_policy_head, ipo,do { if (((ipo)->ipo_tdb_next.tqe_next) != ((void *)0)) (ipo
)->ipo_tdb_next.tqe_next->ipo_tdb_next.tqe_prev = (ipo)
->ipo_tdb_next.tqe_prev; else (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last = (ipo)->ipo_tdb_next.tqe_prev; *(ipo)->
ipo_tdb_next.tqe_prev = (ipo)->ipo_tdb_next.tqe_next; ((ipo
)->ipo_tdb_next.tqe_prev) = ((void *)-1); ((ipo)->ipo_tdb_next
.tqe_next) = ((void *)-1); } while (0)
582 ipo_tdb_next)do { if (((ipo)->ipo_tdb_next.tqe_next) != ((void *)0)) (ipo
)->ipo_tdb_next.tqe_next->ipo_tdb_next.tqe_prev = (ipo)
->ipo_tdb_next.tqe_prev; else (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last = (ipo)->ipo_tdb_next.tqe_prev; *(ipo)->
ipo_tdb_next.tqe_prev = (ipo)->ipo_tdb_next.tqe_next; ((ipo
)->ipo_tdb_next.tqe_prev) = ((void *)-1); ((ipo)->ipo_tdb_next
.tqe_next) = ((void *)-1); } while (0)
;
583 tdb_unref(ipo->ipo_tdb);
584 ipo->ipo_tdb = NULL((void *)0);
585 ipo->ipo_last_searched = 0;
586 }
587
588 /* Find whether there exists an appropriate SA. */
589 if (ipo->ipo_last_searched <= ipsec_last_added) {
590 struct tdb *tdbp_new;
591
592 if (dignore == 0)
593 ipo->ipo_last_searched = getuptime();
594
595 /* gettdb() takes tdb_sadb_mtx, preserve lock order */
596 mtx_leave(&ipo_tdb_mtx);
597 tdbp_new = gettdbbysrc(rdomain,
598 dignore ? &ssrc : &ipo->ipo_dst,
599 ipo->ipo_sproto, ipo->ipo_ids,
600 &ipo->ipo_addr, &ipo->ipo_mask);
601 mtx_enter(&ipo_tdb_mtx);
602 if ((tdbp_new != NULL((void *)0)) &&
603 (tdbp_new->tdb_flags & TDBF_DELETED0x00040)) {
604 /*
605 * After tdb_delete() has released ipo_tdb_mtx
606 * in tdb_unlink(), never add a new one.
607 * tdb_cleanspd() has to catch all of them.
608 */
609 tdb_unref(tdbp_new);
610 tdbp_new = NULL((void *)0);
611 }
612 if (ipo->ipo_tdb != NULL((void *)0)) {
613 /* Remove cached TDB from parallel thread. */
614 TAILQ_REMOVE(&ipo->ipo_tdb->tdb_policy_head,do { if (((ipo)->ipo_tdb_next.tqe_next) != ((void *)0)) (ipo
)->ipo_tdb_next.tqe_next->ipo_tdb_next.tqe_prev = (ipo)
->ipo_tdb_next.tqe_prev; else (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last = (ipo)->ipo_tdb_next.tqe_prev; *(ipo)->
ipo_tdb_next.tqe_prev = (ipo)->ipo_tdb_next.tqe_next; ((ipo
)->ipo_tdb_next.tqe_prev) = ((void *)-1); ((ipo)->ipo_tdb_next
.tqe_next) = ((void *)-1); } while (0)
615 ipo, ipo_tdb_next)do { if (((ipo)->ipo_tdb_next.tqe_next) != ((void *)0)) (ipo
)->ipo_tdb_next.tqe_next->ipo_tdb_next.tqe_prev = (ipo)
->ipo_tdb_next.tqe_prev; else (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last = (ipo)->ipo_tdb_next.tqe_prev; *(ipo)->
ipo_tdb_next.tqe_prev = (ipo)->ipo_tdb_next.tqe_next; ((ipo
)->ipo_tdb_next.tqe_prev) = ((void *)-1); ((ipo)->ipo_tdb_next
.tqe_next) = ((void *)-1); } while (0)
;
616 tdb_unref(ipo->ipo_tdb);
617 }
618 ipo->ipo_tdb = tdbp_new;
619 if (ipo->ipo_tdb != NULL((void *)0)) {
620 /* gettdbbysrc() has already refcounted tdb */
621 TAILQ_INSERT_TAIL(do { (ipo)->ipo_tdb_next.tqe_next = ((void *)0); (ipo)->
ipo_tdb_next.tqe_prev = (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last; *(&ipo->ipo_tdb->tdb_policy_head)->
tqh_last = (ipo); (&ipo->ipo_tdb->tdb_policy_head)->
tqh_last = &(ipo)->ipo_tdb_next.tqe_next; } while (0)
622 &ipo->ipo_tdb->tdb_policy_head,do { (ipo)->ipo_tdb_next.tqe_next = ((void *)0); (ipo)->
ipo_tdb_next.tqe_prev = (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last; *(&ipo->ipo_tdb->tdb_policy_head)->
tqh_last = (ipo); (&ipo->ipo_tdb->tdb_policy_head)->
tqh_last = &(ipo)->ipo_tdb_next.tqe_next; } while (0)
623 ipo, ipo_tdb_next)do { (ipo)->ipo_tdb_next.tqe_next = ((void *)0); (ipo)->
ipo_tdb_next.tqe_prev = (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last; *(&ipo->ipo_tdb->tdb_policy_head)->
tqh_last = (ipo); (&ipo->ipo_tdb->tdb_policy_head)->
tqh_last = &(ipo)->ipo_tdb_next.tqe_next; } while (0)
;
624 }
625 }
626 skipinputsearch:
627 mtx_leave(&ipo_tdb_mtx);
628
629 switch (ipo->ipo_type) {
630 case IPSP_IPSEC_REQUIRE2:
631 /* If appropriate SA exists, don't acquire another. */
632 if (ipo->ipo_tdb != NULL((void *)0))
633 return -EINVAL22; /* Silently drop packet. */
634
635 /* Acquire SA through key management. */
636 if ((error = ipsp_acquire_sa(ipo,
637 dignore ? &ssrc : &ipo->ipo_dst,
638 signore ? NULL((void *)0) : &ipo->ipo_src, ddst, m)) != 0)
639 return error;
640
641 /* FALLTHROUGH */
642 case IPSP_IPSEC_DONTACQ5:
643 return -EINVAL22; /* Silently drop packet. */
644
645 case IPSP_IPSEC_ACQUIRE1:
646 /* If appropriate SA exists, don't acquire another. */
647 if (ipo->ipo_tdb != NULL((void *)0))
648 return ipsp_spd_inp(m, inp, ipo, tdbout);
649
650 /* Acquire SA through key management. */
651 ipsp_acquire_sa(ipo, dignore ? &ssrc : &ipo->ipo_dst,
652 signore ? NULL((void *)0) : &ipo->ipo_src, ddst, NULL((void *)0));
653
654 /* FALLTHROUGH */
655 case IPSP_IPSEC_USE0:
656 return ipsp_spd_inp(m, inp, ipo, tdbout);
657 }
658 }
659
660 /* Shouldn't ever get this far. */
661 return EINVAL22;
662}
663
664/*
665 * Delete a policy from the SPD.
666 */
667int
668ipsec_delete_policy(struct ipsec_policy *ipo)
669{
670 struct ipsec_acquire *ipa;
671 struct radix_node_head *rnh;
672 struct radix_node *rn = (struct radix_node *)ipo;
673 int err = 0;
674
675 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
676
677 if (--ipo->ipo_ref_count > 0)
678 return 0;
679
680 /* Delete from SPD. */
681 if ((rnh = spd_table_get(ipo->ipo_rdomain)) == NULL((void *)0) ||
682 rn_delete(&ipo->ipo_addr, &ipo->ipo_mask, rnh, rn) == NULL((void *)0))
683 return (ESRCH3);
684
685 mtx_enter(&ipo_tdb_mtx);
686 if (ipo->ipo_tdb != NULL((void *)0)) {
687 TAILQ_REMOVE(&ipo->ipo_tdb->tdb_policy_head, ipo,do { if (((ipo)->ipo_tdb_next.tqe_next) != ((void *)0)) (ipo
)->ipo_tdb_next.tqe_next->ipo_tdb_next.tqe_prev = (ipo)
->ipo_tdb_next.tqe_prev; else (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last = (ipo)->ipo_tdb_next.tqe_prev; *(ipo)->
ipo_tdb_next.tqe_prev = (ipo)->ipo_tdb_next.tqe_next; ((ipo
)->ipo_tdb_next.tqe_prev) = ((void *)-1); ((ipo)->ipo_tdb_next
.tqe_next) = ((void *)-1); } while (0)
688 ipo_tdb_next)do { if (((ipo)->ipo_tdb_next.tqe_next) != ((void *)0)) (ipo
)->ipo_tdb_next.tqe_next->ipo_tdb_next.tqe_prev = (ipo)
->ipo_tdb_next.tqe_prev; else (&ipo->ipo_tdb->tdb_policy_head
)->tqh_last = (ipo)->ipo_tdb_next.tqe_prev; *(ipo)->
ipo_tdb_next.tqe_prev = (ipo)->ipo_tdb_next.tqe_next; ((ipo
)->ipo_tdb_next.tqe_prev) = ((void *)-1); ((ipo)->ipo_tdb_next
.tqe_next) = ((void *)-1); } while (0)
;
689 tdb_unref(ipo->ipo_tdb);
690 ipo->ipo_tdb = NULL((void *)0);
691 }
692 mtx_leave(&ipo_tdb_mtx);
693
694 while ((ipa = TAILQ_FIRST(&ipo->ipo_acquires)((&ipo->ipo_acquires)->tqh_first)) != NULL((void *)0))
695 ipsp_delete_acquire(ipa);
696
697 TAILQ_REMOVE(&ipsec_policy_head, ipo, ipo_list)do { if (((ipo)->ipo_list.tqe_next) != ((void *)0)) (ipo)->
ipo_list.tqe_next->ipo_list.tqe_prev = (ipo)->ipo_list.
tqe_prev; else (&ipsec_policy_head)->tqh_last = (ipo)->
ipo_list.tqe_prev; *(ipo)->ipo_list.tqe_prev = (ipo)->ipo_list
.tqe_next; ((ipo)->ipo_list.tqe_prev) = ((void *)-1); ((ipo
)->ipo_list.tqe_next) = ((void *)-1); } while (0)
;
698
699 if (ipo->ipo_ids)
700 ipsp_ids_free(ipo->ipo_ids);
701
702 ipsec_in_use--;
703
704 pool_put(&ipsec_policy_pool, ipo);
705
706 return err;
707}
708
709void
710ipsp_delete_acquire_timo(void *v)
711{
712 struct ipsec_acquire *ipa = v;
713
714 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
715 ipsp_delete_acquire(ipa);
716 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
717}
718
719/*
720 * Delete a pending IPsec acquire record.
721 */
722void
723ipsp_delete_acquire(struct ipsec_acquire *ipa)
724{
725 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
726
727 timeout_del(&ipa->ipa_timeout);
728 TAILQ_REMOVE(&ipsec_acquire_head, ipa, ipa_next)do { if (((ipa)->ipa_next.tqe_next) != ((void *)0)) (ipa)->
ipa_next.tqe_next->ipa_next.tqe_prev = (ipa)->ipa_next.
tqe_prev; else (&ipsec_acquire_head)->tqh_last = (ipa)
->ipa_next.tqe_prev; *(ipa)->ipa_next.tqe_prev = (ipa)->
ipa_next.tqe_next; ((ipa)->ipa_next.tqe_prev) = ((void *)-
1); ((ipa)->ipa_next.tqe_next) = ((void *)-1); } while (0)
;
729 if (ipa->ipa_policy != NULL((void *)0))
730 TAILQ_REMOVE(&ipa->ipa_policy->ipo_acquires, ipa,do { if (((ipa)->ipa_ipo_next.tqe_next) != ((void *)0)) (ipa
)->ipa_ipo_next.tqe_next->ipa_ipo_next.tqe_prev = (ipa)
->ipa_ipo_next.tqe_prev; else (&ipa->ipa_policy->
ipo_acquires)->tqh_last = (ipa)->ipa_ipo_next.tqe_prev;
*(ipa)->ipa_ipo_next.tqe_prev = (ipa)->ipa_ipo_next.tqe_next
; ((ipa)->ipa_ipo_next.tqe_prev) = ((void *)-1); ((ipa)->
ipa_ipo_next.tqe_next) = ((void *)-1); } while (0)
731 ipa_ipo_next)do { if (((ipa)->ipa_ipo_next.tqe_next) != ((void *)0)) (ipa
)->ipa_ipo_next.tqe_next->ipa_ipo_next.tqe_prev = (ipa)
->ipa_ipo_next.tqe_prev; else (&ipa->ipa_policy->
ipo_acquires)->tqh_last = (ipa)->ipa_ipo_next.tqe_prev;
*(ipa)->ipa_ipo_next.tqe_prev = (ipa)->ipa_ipo_next.tqe_next
; ((ipa)->ipa_ipo_next.tqe_prev) = ((void *)-1); ((ipa)->
ipa_ipo_next.tqe_next) = ((void *)-1); } while (0)
;
732 pool_put(&ipsec_acquire_pool, ipa);
733}
734
735/*
736 * Find out if there's an ACQUIRE pending.
737 * XXX Need a better structure.
738 */
739struct ipsec_acquire *
740ipsp_pending_acquire(struct ipsec_policy *ipo, union sockaddr_union *gw)
741{
742 struct ipsec_acquire *ipa;
743
744 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
745
746 TAILQ_FOREACH (ipa, &ipo->ipo_acquires, ipa_ipo_next)for((ipa) = ((&ipo->ipo_acquires)->tqh_first); (ipa
) != ((void *)0); (ipa) = ((ipa)->ipa_ipo_next.tqe_next))
{
747 if (!memcmp(gw, &ipa->ipa_addr, gw->sa.sa_len)__builtin_memcmp((gw), (&ipa->ipa_addr), (gw->sa.sa_len
))
)
748 return ipa;
749 }
750
751 return NULL((void *)0);
752}
753
754/*
755 * Signal key management that we need an SA.
756 * XXX For outgoing policies, we could try to hold on to the mbuf.
757 */
758int
759ipsp_acquire_sa(struct ipsec_policy *ipo, union sockaddr_union *gw,
760 union sockaddr_union *laddr, struct sockaddr_encap *ddst, struct mbuf *m)
761{
762 struct ipsec_acquire *ipa;
763
764 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
765
766 /* Check whether request has been made already. */
767 if ((ipa = ipsp_pending_acquire(ipo, gw)) != NULL((void *)0))
Although the value stored to 'ipa' is used in the enclosing expression, the value is never actually read from 'ipa'
768 return 0;
769
770 /* Add request in cache and proceed. */
771 ipa = pool_get(&ipsec_acquire_pool, PR_NOWAIT0x0002|PR_ZERO0x0008);
772 if (ipa == NULL((void *)0))
773 return ENOMEM12;
774
775 ipa->ipa_addr = *gw;
776
777 timeout_set_proc(&ipa->ipa_timeout, ipsp_delete_acquire_timo, ipa);
778
779 ipa->ipa_info.sen_len = ipa->ipa_mask.sen_len = SENT_LENsizeof(struct sockaddr_encap);
780 ipa->ipa_info.sen_family = ipa->ipa_mask.sen_family = PF_KEY30;
781
782 /* Just copy the right information. */
783 switch (ipo->ipo_addr.sen_type) {
784 case SENT_IP40x0001:
785 ipa->ipa_info.sen_type = ipa->ipa_mask.sen_type = SENT_IP40x0001;
786 ipa->ipa_info.sen_directionSen.Sip4.Direction = ipo->ipo_addr.sen_directionSen.Sip4.Direction;
787 ipa->ipa_mask.sen_directionSen.Sip4.Direction = ipo->ipo_mask.sen_directionSen.Sip4.Direction;
788
789 if (ipsp_is_unspecified(ipo->ipo_dst)) {
790 ipa->ipa_info.sen_ip_srcSen.Sip4.Src = ddst->sen_ip_srcSen.Sip4.Src;
791 ipa->ipa_mask.sen_ip_srcSen.Sip4.Src.s_addr = INADDR_BROADCAST((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0xffffffff
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0xffffffff)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0xffffffff)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0xffffffff)) &
0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0xffffffff)
) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0xffffffff
))))
;
792
793 ipa->ipa_info.sen_ip_dstSen.Sip4.Dst = ddst->sen_ip_dstSen.Sip4.Dst;
794 ipa->ipa_mask.sen_ip_dstSen.Sip4.Dst.s_addr = INADDR_BROADCAST((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0xffffffff
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0xffffffff)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0xffffffff)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0xffffffff)) &
0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0xffffffff)
) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0xffffffff
))))
;
795 } else {
796 ipa->ipa_info.sen_ip_srcSen.Sip4.Src = ipo->ipo_addr.sen_ip_srcSen.Sip4.Src;
797 ipa->ipa_mask.sen_ip_srcSen.Sip4.Src = ipo->ipo_mask.sen_ip_srcSen.Sip4.Src;
798
799 ipa->ipa_info.sen_ip_dstSen.Sip4.Dst = ipo->ipo_addr.sen_ip_dstSen.Sip4.Dst;
800 ipa->ipa_mask.sen_ip_dstSen.Sip4.Dst = ipo->ipo_mask.sen_ip_dstSen.Sip4.Dst;
801 }
802
803 ipa->ipa_info.sen_protoSen.Sip4.Proto = ipo->ipo_addr.sen_protoSen.Sip4.Proto;
804 ipa->ipa_mask.sen_protoSen.Sip4.Proto = ipo->ipo_mask.sen_protoSen.Sip4.Proto;
805
806 if (ipo->ipo_addr.sen_protoSen.Sip4.Proto) {
807 ipa->ipa_info.sen_sportSen.Sip4.Sport = ipo->ipo_addr.sen_sportSen.Sip4.Sport;
808 ipa->ipa_mask.sen_sportSen.Sip4.Sport = ipo->ipo_mask.sen_sportSen.Sip4.Sport;
809
810 ipa->ipa_info.sen_dportSen.Sip4.Dport = ipo->ipo_addr.sen_dportSen.Sip4.Dport;
811 ipa->ipa_mask.sen_dportSen.Sip4.Dport = ipo->ipo_mask.sen_dportSen.Sip4.Dport;
812 }
813 break;
814
815#ifdef INET61
816 case SENT_IP60x0002:
817 ipa->ipa_info.sen_type = ipa->ipa_mask.sen_type = SENT_IP60x0002;
818 ipa->ipa_info.sen_ip6_directionSen.Sip6.Direction =
819 ipo->ipo_addr.sen_ip6_directionSen.Sip6.Direction;
820 ipa->ipa_mask.sen_ip6_directionSen.Sip6.Direction =
821 ipo->ipo_mask.sen_ip6_directionSen.Sip6.Direction;
822
823 if (ipsp_is_unspecified(ipo->ipo_dst)) {
824 ipa->ipa_info.sen_ip6_srcSen.Sip6.Src = ddst->sen_ip6_srcSen.Sip6.Src;
825 ipa->ipa_mask.sen_ip6_srcSen.Sip6.Src = in6mask128;
826
827 ipa->ipa_info.sen_ip6_dstSen.Sip6.Dst = ddst->sen_ip6_dstSen.Sip6.Dst;
828 ipa->ipa_mask.sen_ip6_dstSen.Sip6.Dst = in6mask128;
829 } else {
830 ipa->ipa_info.sen_ip6_srcSen.Sip6.Src = ipo->ipo_addr.sen_ip6_srcSen.Sip6.Src;
831 ipa->ipa_mask.sen_ip6_srcSen.Sip6.Src = ipo->ipo_mask.sen_ip6_srcSen.Sip6.Src;
832
833 ipa->ipa_info.sen_ip6_dstSen.Sip6.Dst = ipo->ipo_addr.sen_ip6_dstSen.Sip6.Dst;
834 ipa->ipa_mask.sen_ip6_dstSen.Sip6.Dst = ipo->ipo_mask.sen_ip6_dstSen.Sip6.Dst;
835 }
836
837 ipa->ipa_info.sen_ip6_protoSen.Sip6.Proto = ipo->ipo_addr.sen_ip6_protoSen.Sip6.Proto;
838 ipa->ipa_mask.sen_ip6_protoSen.Sip6.Proto = ipo->ipo_mask.sen_ip6_protoSen.Sip6.Proto;
839
840 if (ipo->ipo_mask.sen_ip6_protoSen.Sip6.Proto) {
841 ipa->ipa_info.sen_ip6_sportSen.Sip6.Sport =
842 ipo->ipo_addr.sen_ip6_sportSen.Sip6.Sport;
843 ipa->ipa_mask.sen_ip6_sportSen.Sip6.Sport =
844 ipo->ipo_mask.sen_ip6_sportSen.Sip6.Sport;
845 ipa->ipa_info.sen_ip6_dportSen.Sip6.Dport =
846 ipo->ipo_addr.sen_ip6_dportSen.Sip6.Dport;
847 ipa->ipa_mask.sen_ip6_dportSen.Sip6.Dport =
848 ipo->ipo_mask.sen_ip6_dportSen.Sip6.Dport;
849 }
850 break;
851#endif /* INET6 */
852
853 default:
854 pool_put(&ipsec_acquire_pool, ipa);
855 return 0;
856 }
857
858#ifdef IPSEC1
859 timeout_add_sec(&ipa->ipa_timeout, ipsec_expire_acquire);
860#endif
861
862 TAILQ_INSERT_TAIL(&ipsec_acquire_head, ipa, ipa_next)do { (ipa)->ipa_next.tqe_next = ((void *)0); (ipa)->ipa_next
.tqe_prev = (&ipsec_acquire_head)->tqh_last; *(&ipsec_acquire_head
)->tqh_last = (ipa); (&ipsec_acquire_head)->tqh_last
= &(ipa)->ipa_next.tqe_next; } while (0)
;
863 TAILQ_INSERT_TAIL(&ipo->ipo_acquires, ipa, ipa_ipo_next)do { (ipa)->ipa_ipo_next.tqe_next = ((void *)0); (ipa)->
ipa_ipo_next.tqe_prev = (&ipo->ipo_acquires)->tqh_last
; *(&ipo->ipo_acquires)->tqh_last = (ipa); (&ipo
->ipo_acquires)->tqh_last = &(ipa)->ipa_ipo_next
.tqe_next; } while (0)
;
864 ipa->ipa_policy = ipo;
865
866 /* PF_KEYv2 notification message. */
867 return pfkeyv2_acquire(ipo, gw, laddr, &ipa->ipa_seq, ddst);
868}
869
870/*
871 * Deal with PCB security requirements.
872 */
873int
874ipsp_spd_inp(struct mbuf *m, struct inpcb *inp, struct ipsec_policy *ipo,
875 struct tdb **tdbout)
876{
877 /* Sanity check. */
878 if (inp == NULL((void *)0))
879 goto justreturn;
880
881 /* We only support IPSEC_LEVEL_BYPASS or IPSEC_LEVEL_AVAIL */
882
883 if (inp->inp_seclevel[SL_ESP_TRANS1] == IPSEC_LEVEL_BYPASS0x00 &&
884 inp->inp_seclevel[SL_ESP_NETWORK2] == IPSEC_LEVEL_BYPASS0x00 &&
885 inp->inp_seclevel[SL_AUTH0] == IPSEC_LEVEL_BYPASS0x00)
886 goto justreturn;
887
888 if (inp->inp_seclevel[SL_ESP_TRANS1] == IPSEC_LEVEL_AVAIL0x01 &&
889 inp->inp_seclevel[SL_ESP_NETWORK2] == IPSEC_LEVEL_AVAIL0x01 &&
890 inp->inp_seclevel[SL_AUTH0] == IPSEC_LEVEL_AVAIL0x01)
891 goto justreturn;
892
893 return -EINVAL22; /* Silently drop packet. */
894
895 justreturn:
896 if (tdbout != NULL((void *)0)) {
897 if (ipo != NULL((void *)0))
898 *tdbout = tdb_ref(ipo->ipo_tdb);
899 else
900 *tdbout = NULL((void *)0);
901 }
902 return 0;
903}
904
905/*
906 * Find a pending ACQUIRE record based on its sequence number.
907 * XXX Need to use a better data structure.
908 */
909struct ipsec_acquire *
910ipsec_get_acquire(u_int32_t seq)
911{
912 struct ipsec_acquire *ipa;
913
914 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
915
916 TAILQ_FOREACH (ipa, &ipsec_acquire_head, ipa_next)for((ipa) = ((&ipsec_acquire_head)->tqh_first); (ipa) !=
((void *)0); (ipa) = ((ipa)->ipa_next.tqe_next))
917 if (ipa->ipa_seq == seq)
918 return ipa;
919
920 return NULL((void *)0);
921}