Bug Summary

File:net/if_wg.c
Warning:line 1883, column 41
Access to field 't_done' results in a dereference of a null pointer

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name if_wg.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/net/if_wg.c
1/* $OpenBSD: if_wg.c,v 1.20 2022/01/02 22:36:04 jsg Exp $ */
2
3/*
4 * Copyright (C) 2015-2020 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
5 * Copyright (C) 2019-2020 Matt Dunwoodie <ncon@noconroy.net>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#include "bpfilter.h"
21#include "pf.h"
22
23#include <sys/types.h>
24#include <sys/systm.h>
25#include <sys/param.h>
26#include <sys/pool.h>
27
28#include <sys/socket.h>
29#include <sys/socketvar.h>
30#include <sys/percpu.h>
31#include <sys/ioctl.h>
32#include <sys/mbuf.h>
33#include <sys/protosw.h>
34
35#include <net/if.h>
36#include <net/if_var.h>
37#include <net/if_types.h>
38#include <net/if_wg.h>
39
40#include <net/wg_noise.h>
41#include <net/wg_cookie.h>
42
43#include <net/pfvar.h>
44#include <net/route.h>
45#include <net/bpf.h>
46
47#include <netinet/ip.h>
48#include <netinet/ip6.h>
49#include <netinet/udp.h>
50#include <netinet/in_pcb.h>
51
52#include <crypto/siphash.h>
53
54#define DEFAULT_MTU1420 1420
55
56#define MAX_STAGED_PKT128 128
57#define MAX_QUEUED_PKT1024 1024
58#define MAX_QUEUED_PKT_MASK(1024 - 1) (MAX_QUEUED_PKT1024 - 1)
59
60#define MAX_QUEUED_HANDSHAKES4096 4096
61
62#define HASHTABLE_PEER_SIZE(1 << 11) (1 << 11)
63#define HASHTABLE_INDEX_SIZE(1 << 13) (1 << 13)
64#define MAX_PEERS_PER_IFACE(1 << 20) (1 << 20)
65
66#define REKEY_TIMEOUT5 5
67#define REKEY_TIMEOUT_JITTER334 334 /* 1/3 sec, round for arc4random_uniform */
68#define KEEPALIVE_TIMEOUT10 10
69#define MAX_TIMER_HANDSHAKES(90 / 5) (90 / REKEY_TIMEOUT5)
70#define NEW_HANDSHAKE_TIMEOUT(5 + 10) (REKEY_TIMEOUT5 + KEEPALIVE_TIMEOUT10)
71#define UNDERLOAD_TIMEOUT1 1
72
73#define DPRINTF(sc, str, ...)do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
str, (sc)->sc_if.if_xname, ...); } while (0)
do { if (ISSET((sc)->sc_if.if_flags, IFF_DEBUG)(((sc)->sc_if.if_flags) & (0x4)))\
74 printf("%s: " str, (sc)->sc_if.if_xname, ##__VA_ARGS__); } while (0)
75
76#define CONTAINER_OF(ptr, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = (ptr); (
type *)( (char *)__mptr - __builtin_offsetof(type, member) );
})
({ \
77 const __typeof( ((type *)0)->member ) *__mptr = (ptr); \
78 (type *)( (char *)__mptr - offsetof(type,member)__builtin_offsetof(type, member) );})
79
80/* First byte indicating packet type on the wire */
81#define WG_PKT_INITIATION((__uint32_t)(1)) htole32(1)((__uint32_t)(1))
82#define WG_PKT_RESPONSE((__uint32_t)(2)) htole32(2)((__uint32_t)(2))
83#define WG_PKT_COOKIE((__uint32_t)(3)) htole32(3)((__uint32_t)(3))
84#define WG_PKT_DATA((__uint32_t)(4)) htole32(4)((__uint32_t)(4))
85
86#define WG_PKT_WITH_PADDING(n)(((n) + (16-1)) & (~(16-1))) (((n) + (16-1)) & (~(16-1)))
87#define WG_KEY_SIZE32 WG_KEY_LEN32
88
89struct wg_pkt_initiation {
90 uint32_t t;
91 uint32_t s_idx;
92 uint8_t ue[NOISE_PUBLIC_KEY_LEN32];
93 uint8_t es[NOISE_PUBLIC_KEY_LEN32 + NOISE_AUTHTAG_LEN16];
94 uint8_t ets[NOISE_TIMESTAMP_LEN(sizeof(uint64_t) + sizeof(uint32_t)) + NOISE_AUTHTAG_LEN16];
95 struct cookie_macs m;
96};
97
98struct wg_pkt_response {
99 uint32_t t;
100 uint32_t s_idx;
101 uint32_t r_idx;
102 uint8_t ue[NOISE_PUBLIC_KEY_LEN32];
103 uint8_t en[0 + NOISE_AUTHTAG_LEN16];
104 struct cookie_macs m;
105};
106
107struct wg_pkt_cookie {
108 uint32_t t;
109 uint32_t r_idx;
110 uint8_t nonce[COOKIE_NONCE_SIZE24];
111 uint8_t ec[COOKIE_ENCRYPTED_SIZE(16 + 16)];
112};
113
114struct wg_pkt_data {
115 uint32_t t;
116 uint32_t r_idx;
117 uint8_t nonce[sizeof(uint64_t)];
118 uint8_t buf[];
119};
120
121struct wg_endpoint {
122 union {
123 struct sockaddr r_sa;
124 struct sockaddr_in r_sin;
125#ifdef INET61
126 struct sockaddr_in6 r_sin6;
127#endif
128 } e_remote;
129 union {
130 struct in_addr l_in;
131#ifdef INET61
132 struct in6_pktinfo l_pktinfo6;
133#define l_in6l_pktinfo6.ipi6_addr l_pktinfo6.ipi6_addr
134#endif
135 } e_local;
136};
137
138struct wg_tag {
139 struct wg_endpoint t_endpoint;
140 struct wg_peer *t_peer;
141 struct mbuf *t_mbuf;
142 int t_done;
143 int t_mtu;
144};
145
146struct wg_index {
147 LIST_ENTRY(wg_index)struct { struct wg_index *le_next; struct wg_index **le_prev;
}
i_entry;
148 SLIST_ENTRY(wg_index)struct { struct wg_index *sle_next; } i_unused_entry;
149 uint32_t i_key;
150 struct noise_remote *i_value;
151};
152
153struct wg_timers {
154 /* t_lock is for blocking wg_timers_event_* when setting t_disabled. */
155 struct rwlock t_lock;
156
157 int t_disabled;
158 int t_need_another_keepalive;
159 uint16_t t_persistent_keepalive_interval;
160 struct timeout t_new_handshake;
161 struct timeout t_send_keepalive;
162 struct timeout t_retry_handshake;
163 struct timeout t_zero_key_material;
164 struct timeout t_persistent_keepalive;
165
166 struct mutex t_handshake_mtx;
167 struct timespec t_handshake_last_sent; /* nanouptime */
168 struct timespec t_handshake_complete; /* nanotime */
169 int t_handshake_retries;
170};
171
172struct wg_aip {
173 struct art_node a_node;
174 LIST_ENTRY(wg_aip)struct { struct wg_aip *le_next; struct wg_aip **le_prev; } a_entry;
175 struct wg_peer *a_peer;
176 struct wg_aip_io a_data;
177};
178
179struct wg_queue {
180 struct mutex q_mtx;
181 struct mbuf_list q_list;
182};
183
184struct wg_ring {
185 struct mutex r_mtx;
186 uint32_t r_head;
187 uint32_t r_tail;
188 struct mbuf *r_buf[MAX_QUEUED_PKT1024];
189};
190
191struct wg_peer {
192 LIST_ENTRY(wg_peer)struct { struct wg_peer *le_next; struct wg_peer **le_prev; } p_pubkey_entry;
193 TAILQ_ENTRY(wg_peer)struct { struct wg_peer *tqe_next; struct wg_peer **tqe_prev;
}
p_seq_entry;
194 uint64_t p_id;
195 struct wg_softc *p_sc;
196
197 struct noise_remote p_remote;
198 struct cookie_maker p_cookie;
199 struct wg_timers p_timers;
200
201 struct mutex p_counters_mtx;
202 uint64_t p_counters_tx;
203 uint64_t p_counters_rx;
204
205 struct mutex p_endpoint_mtx;
206 struct wg_endpoint p_endpoint;
207
208 struct task p_send_initiation;
209 struct task p_send_keepalive;
210 struct task p_clear_secrets;
211 struct task p_deliver_out;
212 struct task p_deliver_in;
213
214 struct mbuf_queue p_stage_queue;
215 struct wg_queue p_encap_queue;
216 struct wg_queue p_decap_queue;
217
218 SLIST_HEAD(,wg_index)struct { struct wg_index *slh_first; } p_unused_index;
219 struct wg_index p_index[3];
220
221 LIST_HEAD(,wg_aip)struct { struct wg_aip *lh_first; } p_aip;
222
223 SLIST_ENTRY(wg_peer)struct { struct wg_peer *sle_next; } p_start_list;
224 int p_start_onlist;
225};
226
227struct wg_softc {
228 struct ifnet sc_if;
229 SIPHASH_KEY sc_secret;
230
231 struct rwlock sc_lock;
232 struct noise_local sc_local;
233 struct cookie_checker sc_cookie;
234 in_port_t sc_udp_port;
235 int sc_udp_rtable;
236
237 struct rwlock sc_so_lock;
238 struct socket *sc_so4;
239#ifdef INET61
240 struct socket *sc_so6;
241#endif
242
243 size_t sc_aip_num;
244 struct art_root *sc_aip4;
245#ifdef INET61
246 struct art_root *sc_aip6;
247#endif
248
249 struct rwlock sc_peer_lock;
250 size_t sc_peer_num;
251 LIST_HEAD(,wg_peer)struct { struct wg_peer *lh_first; } *sc_peer;
252 TAILQ_HEAD(,wg_peer)struct { struct wg_peer *tqh_first; struct wg_peer **tqh_last
; }
sc_peer_seq;
253 u_long sc_peer_mask;
254
255 struct mutex sc_index_mtx;
256 LIST_HEAD(,wg_index)struct { struct wg_index *lh_first; } *sc_index;
257 u_long sc_index_mask;
258
259 struct task sc_handshake;
260 struct mbuf_queue sc_handshake_queue;
261
262 struct task sc_encap;
263 struct task sc_decap;
264 struct wg_ring sc_encap_ring;
265 struct wg_ring sc_decap_ring;
266};
267
268struct wg_peer *
269 wg_peer_create(struct wg_softc *, uint8_t[WG_KEY_SIZE32]);
270struct wg_peer *
271 wg_peer_lookup(struct wg_softc *, const uint8_t[WG_KEY_SIZE32]);
272void wg_peer_destroy(struct wg_peer *);
273void wg_peer_set_endpoint_from_tag(struct wg_peer *, struct wg_tag *);
274void wg_peer_set_sockaddr(struct wg_peer *, struct sockaddr *);
275int wg_peer_get_sockaddr(struct wg_peer *, struct sockaddr *);
276void wg_peer_clear_src(struct wg_peer *);
277void wg_peer_get_endpoint(struct wg_peer *, struct wg_endpoint *);
278void wg_peer_counters_add(struct wg_peer *, uint64_t, uint64_t);
279
280int wg_aip_add(struct wg_softc *, struct wg_peer *, struct wg_aip_io *);
281struct wg_peer *
282 wg_aip_lookup(struct art_root *, void *);
283int wg_aip_remove(struct wg_softc *, struct wg_peer *,
284 struct wg_aip_io *);
285
286int wg_socket_open(struct socket **, int, in_port_t *, int *, void *);
287void wg_socket_close(struct socket **);
288int wg_bind(struct wg_softc *, in_port_t *, int *);
289void wg_unbind(struct wg_softc *);
290int wg_send(struct wg_softc *, struct wg_endpoint *, struct mbuf *);
291void wg_send_buf(struct wg_softc *, struct wg_endpoint *, uint8_t *,
292 size_t);
293
294struct wg_tag *
295 wg_tag_get(struct mbuf *);
296
297void wg_timers_init(struct wg_timers *);
298void wg_timers_enable(struct wg_timers *);
299void wg_timers_disable(struct wg_timers *);
300void wg_timers_set_persistent_keepalive(struct wg_timers *, uint16_t);
301int wg_timers_get_persistent_keepalive(struct wg_timers *, uint16_t *);
302void wg_timers_get_last_handshake(struct wg_timers *, struct timespec *);
303int wg_timers_expired_handshake_last_sent(struct wg_timers *);
304int wg_timers_check_handshake_last_sent(struct wg_timers *);
305
306void wg_timers_event_data_sent(struct wg_timers *);
307void wg_timers_event_data_received(struct wg_timers *);
308void wg_timers_event_any_authenticated_packet_sent(struct wg_timers *);
309void wg_timers_event_any_authenticated_packet_received(struct wg_timers *);
310void wg_timers_event_handshake_initiated(struct wg_timers *);
311void wg_timers_event_handshake_responded(struct wg_timers *);
312void wg_timers_event_handshake_complete(struct wg_timers *);
313void wg_timers_event_session_derived(struct wg_timers *);
314void wg_timers_event_any_authenticated_packet_traversal(struct wg_timers *);
315void wg_timers_event_want_initiation(struct wg_timers *);
316void wg_timers_event_reset_handshake_last_sent(struct wg_timers *);
317
318void wg_timers_run_send_initiation(void *, int);
319void wg_timers_run_retry_handshake(void *);
320void wg_timers_run_send_keepalive(void *);
321void wg_timers_run_new_handshake(void *);
322void wg_timers_run_zero_key_material(void *);
323void wg_timers_run_persistent_keepalive(void *);
324
325void wg_peer_send_buf(struct wg_peer *, uint8_t *, size_t);
326void wg_send_initiation(void *);
327void wg_send_response(struct wg_peer *);
328void wg_send_cookie(struct wg_softc *, struct cookie_macs *, uint32_t,
329 struct wg_endpoint *e);
330void wg_send_keepalive(void *);
331void wg_peer_clear_secrets(void *);
332void wg_handshake(struct wg_softc *, struct mbuf *);
333void wg_handshake_worker(void *);
334
335void wg_encap(struct wg_softc *, struct mbuf *);
336void wg_decap(struct wg_softc *, struct mbuf *);
337void wg_encap_worker(void *);
338void wg_decap_worker(void *);
339void wg_deliver_out(void *);
340void wg_deliver_in(void *);
341
342int wg_queue_in(struct wg_softc *, struct wg_peer *, struct mbuf *);
343void wg_queue_out(struct wg_softc *, struct wg_peer *);
344struct mbuf *
345 wg_ring_dequeue(struct wg_ring *);
346struct mbuf *
347 wg_queue_dequeue(struct wg_queue *, struct wg_tag **);
348size_t wg_queue_len(struct wg_queue *);
349
350struct noise_remote *
351 wg_remote_get(void *, uint8_t[NOISE_PUBLIC_KEY_LEN32]);
352uint32_t
353 wg_index_set(void *, struct noise_remote *);
354struct noise_remote *
355 wg_index_get(void *, uint32_t);
356void wg_index_drop(void *, uint32_t);
357
358struct mbuf *
359 wg_input(void *, struct mbuf *, struct ip *, struct ip6_hdr *, void *,
360 int);
361int wg_output(struct ifnet *, struct mbuf *, struct sockaddr *,
362 struct rtentry *);
363int wg_ioctl_set(struct wg_softc *, struct wg_data_io *);
364int wg_ioctl_get(struct wg_softc *, struct wg_data_io *);
365int wg_ioctl(struct ifnet *, u_long, caddr_t);
366int wg_up(struct wg_softc *);
367void wg_down(struct wg_softc *);
368
369int wg_clone_create(struct if_clone *, int);
370int wg_clone_destroy(struct ifnet *);
371void wgattach(int);
372
373uint64_t peer_counter = 0;
374uint64_t keypair_counter = 0;
375struct pool wg_aip_pool;
376struct pool wg_peer_pool;
377struct pool wg_ratelimit_pool;
378struct timeval underload_interval = { UNDERLOAD_TIMEOUT1, 0 };
379
380size_t wg_counter = 0;
381struct taskq *wg_handshake_taskq;
382struct taskq *wg_crypt_taskq;
383
384struct if_clone wg_cloner =
385 IF_CLONE_INITIALIZER("wg", wg_clone_create, wg_clone_destroy){ .ifc_list = { ((void *)0), ((void *)0) }, .ifc_name = "wg",
.ifc_namelen = sizeof("wg") - 1, .ifc_create = wg_clone_create
, .ifc_destroy = wg_clone_destroy, }
;
386
387struct wg_peer *
388wg_peer_create(struct wg_softc *sc, uint8_t public[WG_KEY_SIZE32])
389{
390 struct wg_peer *peer;
391 uint64_t idx;
392
393 rw_assert_wrlock(&sc->sc_lock);
394
395 if (sc->sc_peer_num >= MAX_PEERS_PER_IFACE(1 << 20))
396 return NULL((void *)0);
397
398 if ((peer = pool_get(&wg_peer_pool, PR_NOWAIT0x0002)) == NULL((void *)0))
399 return NULL((void *)0);
400
401 peer->p_id = peer_counter++;
402 peer->p_sc = sc;
403
404 noise_remote_init(&peer->p_remote, public, &sc->sc_local);
405 cookie_maker_init(&peer->p_cookie, public);
406 wg_timers_init(&peer->p_timers);
407
408 mtx_init(&peer->p_counters_mtx, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&peer->
p_counters_mtx), ((((0x7)) > 0x0 && ((0x7)) < 0x9
) ? 0x9 : ((0x7)))); } while (0)
;
409 peer->p_counters_tx = 0;
410 peer->p_counters_rx = 0;
411
412 mtx_init(&peer->p_endpoint_mtx, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&peer->
p_endpoint_mtx), ((((0x7)) > 0x0 && ((0x7)) < 0x9
) ? 0x9 : ((0x7)))); } while (0)
;
413 bzero(&peer->p_endpoint, sizeof(peer->p_endpoint))__builtin_bzero((&peer->p_endpoint), (sizeof(peer->
p_endpoint)))
;
414
415 task_set(&peer->p_send_initiation, wg_send_initiation, peer);
416 task_set(&peer->p_send_keepalive, wg_send_keepalive, peer);
417 task_set(&peer->p_clear_secrets, wg_peer_clear_secrets, peer);
418 task_set(&peer->p_deliver_out, wg_deliver_out, peer);
419 task_set(&peer->p_deliver_in, wg_deliver_in, peer);
420
421 mq_init(&peer->p_stage_queue, MAX_STAGED_PKT128, IPL_NET0x7);
422 mtx_init(&peer->p_encap_queue.q_mtx, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&peer->
p_encap_queue.q_mtx), ((((0x7)) > 0x0 && ((0x7)) <
0x9) ? 0x9 : ((0x7)))); } while (0)
;
423 ml_init(&peer->p_encap_queue.q_list);
424 mtx_init(&peer->p_decap_queue.q_mtx, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&peer->
p_decap_queue.q_mtx), ((((0x7)) > 0x0 && ((0x7)) <
0x9) ? 0x9 : ((0x7)))); } while (0)
;
425 ml_init(&peer->p_decap_queue.q_list);
426
427 SLIST_INIT(&peer->p_unused_index){ ((&peer->p_unused_index)->slh_first) = ((void *)0
); }
;
428 SLIST_INSERT_HEAD(&peer->p_unused_index, &peer->p_index[0],do { (&peer->p_index[0])->i_unused_entry.sle_next =
(&peer->p_unused_index)->slh_first; (&peer->
p_unused_index)->slh_first = (&peer->p_index[0]); }
while (0)
429 i_unused_entry)do { (&peer->p_index[0])->i_unused_entry.sle_next =
(&peer->p_unused_index)->slh_first; (&peer->
p_unused_index)->slh_first = (&peer->p_index[0]); }
while (0)
;
430 SLIST_INSERT_HEAD(&peer->p_unused_index, &peer->p_index[1],do { (&peer->p_index[1])->i_unused_entry.sle_next =
(&peer->p_unused_index)->slh_first; (&peer->
p_unused_index)->slh_first = (&peer->p_index[1]); }
while (0)
431 i_unused_entry)do { (&peer->p_index[1])->i_unused_entry.sle_next =
(&peer->p_unused_index)->slh_first; (&peer->
p_unused_index)->slh_first = (&peer->p_index[1]); }
while (0)
;
432 SLIST_INSERT_HEAD(&peer->p_unused_index, &peer->p_index[2],do { (&peer->p_index[2])->i_unused_entry.sle_next =
(&peer->p_unused_index)->slh_first; (&peer->
p_unused_index)->slh_first = (&peer->p_index[2]); }
while (0)
433 i_unused_entry)do { (&peer->p_index[2])->i_unused_entry.sle_next =
(&peer->p_unused_index)->slh_first; (&peer->
p_unused_index)->slh_first = (&peer->p_index[2]); }
while (0)
;
434
435 LIST_INIT(&peer->p_aip)do { ((&peer->p_aip)->lh_first) = ((void *)0); } while
(0)
;
436
437 peer->p_start_onlist = 0;
438
439 idx = SipHash24(&sc->sc_secret, public, WG_KEY_SIZE)SipHash((&sc->sc_secret), 2, 4, (public), (32));
440 idx &= sc->sc_peer_mask;
441
442 rw_enter_write(&sc->sc_peer_lock);
443 LIST_INSERT_HEAD(&sc->sc_peer[idx], peer, p_pubkey_entry)do { if (((peer)->p_pubkey_entry.le_next = (&sc->sc_peer
[idx])->lh_first) != ((void *)0)) (&sc->sc_peer[idx
])->lh_first->p_pubkey_entry.le_prev = &(peer)->
p_pubkey_entry.le_next; (&sc->sc_peer[idx])->lh_first
= (peer); (peer)->p_pubkey_entry.le_prev = &(&sc->
sc_peer[idx])->lh_first; } while (0)
;
444 TAILQ_INSERT_TAIL(&sc->sc_peer_seq, peer, p_seq_entry)do { (peer)->p_seq_entry.tqe_next = ((void *)0); (peer)->
p_seq_entry.tqe_prev = (&sc->sc_peer_seq)->tqh_last
; *(&sc->sc_peer_seq)->tqh_last = (peer); (&sc->
sc_peer_seq)->tqh_last = &(peer)->p_seq_entry.tqe_next
; } while (0)
;
445 sc->sc_peer_num++;
446 rw_exit_write(&sc->sc_peer_lock);
447
448 DPRINTF(sc, "Peer %llu created\n", peer->p_id)do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Peer %llu created\n", (sc)->sc_if.if_xname, peer->p_id
); } while (0)
;
449 return peer;
450}
451
452struct wg_peer *
453wg_peer_lookup(struct wg_softc *sc, const uint8_t public[WG_KEY_SIZE32])
454{
455 uint8_t peer_key[WG_KEY_SIZE32];
456 struct wg_peer *peer;
457 uint64_t idx;
458
459 idx = SipHash24(&sc->sc_secret, public, WG_KEY_SIZE)SipHash((&sc->sc_secret), 2, 4, (public), (32));
460 idx &= sc->sc_peer_mask;
461
462 rw_enter_read(&sc->sc_peer_lock);
463 LIST_FOREACH(peer, &sc->sc_peer[idx], p_pubkey_entry)for((peer) = ((&sc->sc_peer[idx])->lh_first); (peer
)!= ((void *)0); (peer) = ((peer)->p_pubkey_entry.le_next)
)
{
464 noise_remote_keys(&peer->p_remote, peer_key, NULL((void *)0));
465 if (timingsafe_bcmp(peer_key, public, WG_KEY_SIZE32) == 0)
466 goto done;
467 }
468 peer = NULL((void *)0);
469done:
470 rw_exit_read(&sc->sc_peer_lock);
471 return peer;
472}
473
474void
475wg_peer_destroy(struct wg_peer *peer)
476{
477 struct wg_softc *sc = peer->p_sc;
478 struct wg_aip *aip, *taip;
479
480 rw_assert_wrlock(&sc->sc_lock);
481
482 /*
483 * Remove peer from the pubkey hashtable and disable all timeouts.
484 * After this, and flushing wg_handshake_taskq, then no more handshakes
485 * can be started.
486 */
487 rw_enter_write(&sc->sc_peer_lock);
488 LIST_REMOVE(peer, p_pubkey_entry)do { if ((peer)->p_pubkey_entry.le_next != ((void *)0)) (peer
)->p_pubkey_entry.le_next->p_pubkey_entry.le_prev = (peer
)->p_pubkey_entry.le_prev; *(peer)->p_pubkey_entry.le_prev
= (peer)->p_pubkey_entry.le_next; ((peer)->p_pubkey_entry
.le_prev) = ((void *)-1); ((peer)->p_pubkey_entry.le_next)
= ((void *)-1); } while (0)
;
489 TAILQ_REMOVE(&sc->sc_peer_seq, peer, p_seq_entry)do { if (((peer)->p_seq_entry.tqe_next) != ((void *)0)) (peer
)->p_seq_entry.tqe_next->p_seq_entry.tqe_prev = (peer)->
p_seq_entry.tqe_prev; else (&sc->sc_peer_seq)->tqh_last
= (peer)->p_seq_entry.tqe_prev; *(peer)->p_seq_entry.tqe_prev
= (peer)->p_seq_entry.tqe_next; ((peer)->p_seq_entry.tqe_prev
) = ((void *)-1); ((peer)->p_seq_entry.tqe_next) = ((void *
)-1); } while (0)
;
490 sc->sc_peer_num--;
491 rw_exit_write(&sc->sc_peer_lock);
492
493 wg_timers_disable(&peer->p_timers);
494
495 taskq_barrier(wg_handshake_taskq);
496
497 /*
498 * Now we drop all allowed ips, to drop all outgoing packets to the
499 * peer. Then drop all the indexes to drop all incoming packets to the
500 * peer. Then we can flush if_snd, wg_crypt_taskq and then nettq to
501 * ensure no more references to the peer exist.
502 */
503 LIST_FOREACH_SAFE(aip, &peer->p_aip, a_entry, taip)for ((aip) = ((&peer->p_aip)->lh_first); (aip) &&
((taip) = ((aip)->a_entry.le_next), 1); (aip) = (taip))
504 wg_aip_remove(sc, peer, &aip->a_data);
505
506 noise_remote_clear(&peer->p_remote);
507
508 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
509 while (!ifq_empty(&sc->sc_if.if_snd)(((&sc->sc_if.if_snd)->ifq_len) == 0)) {
510 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
511 tsleep_nsec(sc, PWAIT32, "wg_ifq", 1000);
512 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
513 }
514 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
515
516 taskq_barrier(wg_crypt_taskq);
517 taskq_barrier(net_tq(sc->sc_if.if_index));
518
519 DPRINTF(sc, "Peer %llu destroyed\n", peer->p_id)do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Peer %llu destroyed\n", (sc)->sc_if.if_xname, peer->p_id
); } while (0)
;
520 explicit_bzero(peer, sizeof(*peer));
521 pool_put(&wg_peer_pool, peer);
522}
523
524void
525wg_peer_set_endpoint_from_tag(struct wg_peer *peer, struct wg_tag *t)
526{
527 if (memcmp(&t->t_endpoint, &peer->p_endpoint,__builtin_memcmp((&t->t_endpoint), (&peer->p_endpoint
), (sizeof(t->t_endpoint)))
528 sizeof(t->t_endpoint))__builtin_memcmp((&t->t_endpoint), (&peer->p_endpoint
), (sizeof(t->t_endpoint)))
== 0)
529 return;
530
531 mtx_enter(&peer->p_endpoint_mtx);
532 peer->p_endpoint = t->t_endpoint;
533 mtx_leave(&peer->p_endpoint_mtx);
534}
535
536void
537wg_peer_set_sockaddr(struct wg_peer *peer, struct sockaddr *remote)
538{
539 mtx_enter(&peer->p_endpoint_mtx);
540 memcpy(&peer->p_endpoint.e_remote, remote,__builtin_memcpy((&peer->p_endpoint.e_remote), (remote
), (sizeof(peer->p_endpoint.e_remote)))
541 sizeof(peer->p_endpoint.e_remote))__builtin_memcpy((&peer->p_endpoint.e_remote), (remote
), (sizeof(peer->p_endpoint.e_remote)))
;
542 bzero(&peer->p_endpoint.e_local, sizeof(peer->p_endpoint.e_local))__builtin_bzero((&peer->p_endpoint.e_local), (sizeof(peer
->p_endpoint.e_local)))
;
543 mtx_leave(&peer->p_endpoint_mtx);
544}
545
546int
547wg_peer_get_sockaddr(struct wg_peer *peer, struct sockaddr *remote)
548{
549 int ret = 0;
550
551 mtx_enter(&peer->p_endpoint_mtx);
552 if (peer->p_endpoint.e_remote.r_sa.sa_family != AF_UNSPEC0)
553 memcpy(remote, &peer->p_endpoint.e_remote,__builtin_memcpy((remote), (&peer->p_endpoint.e_remote
), (sizeof(peer->p_endpoint.e_remote)))
554 sizeof(peer->p_endpoint.e_remote))__builtin_memcpy((remote), (&peer->p_endpoint.e_remote
), (sizeof(peer->p_endpoint.e_remote)))
;
555 else
556 ret = ENOENT2;
557 mtx_leave(&peer->p_endpoint_mtx);
558 return ret;
559}
560
561void
562wg_peer_clear_src(struct wg_peer *peer)
563{
564 mtx_enter(&peer->p_endpoint_mtx);
565 bzero(&peer->p_endpoint.e_local, sizeof(peer->p_endpoint.e_local))__builtin_bzero((&peer->p_endpoint.e_local), (sizeof(peer
->p_endpoint.e_local)))
;
566 mtx_leave(&peer->p_endpoint_mtx);
567}
568
569void
570wg_peer_get_endpoint(struct wg_peer *peer, struct wg_endpoint *endpoint)
571{
572 mtx_enter(&peer->p_endpoint_mtx);
573 memcpy(endpoint, &peer->p_endpoint, sizeof(*endpoint))__builtin_memcpy((endpoint), (&peer->p_endpoint), (sizeof
(*endpoint)))
;
574 mtx_leave(&peer->p_endpoint_mtx);
575}
576
577void
578wg_peer_counters_add(struct wg_peer *peer, uint64_t tx, uint64_t rx)
579{
580 mtx_enter(&peer->p_counters_mtx);
581 peer->p_counters_tx += tx;
582 peer->p_counters_rx += rx;
583 mtx_leave(&peer->p_counters_mtx);
584}
585
586int
587wg_aip_add(struct wg_softc *sc, struct wg_peer *peer, struct wg_aip_io *d)
588{
589 struct art_root *root;
590 struct art_node *node;
591 struct wg_aip *aip;
592 int ret = 0;
593
594 switch (d->a_af) {
595 case AF_INET2: root = sc->sc_aip4; break;
596#ifdef INET61
597 case AF_INET624: root = sc->sc_aip6; break;
598#endif
599 default: return EAFNOSUPPORT47;
600 }
601
602 if ((aip = pool_get(&wg_aip_pool, PR_NOWAIT0x0002|PR_ZERO0x0008)) == NULL((void *)0))
603 return ENOBUFS55;
604
605 rw_enter_write(&root->ar_lock);
606 node = art_insert(root, &aip->a_node, &d->a_addr, d->a_cidr);
607
608 if (node == &aip->a_node) {
609 aip->a_peer = peer;
610 aip->a_data = *d;
611 LIST_INSERT_HEAD(&peer->p_aip, aip, a_entry)do { if (((aip)->a_entry.le_next = (&peer->p_aip)->
lh_first) != ((void *)0)) (&peer->p_aip)->lh_first->
a_entry.le_prev = &(aip)->a_entry.le_next; (&peer->
p_aip)->lh_first = (aip); (aip)->a_entry.le_prev = &
(&peer->p_aip)->lh_first; } while (0)
;
612 sc->sc_aip_num++;
613 } else {
614 pool_put(&wg_aip_pool, aip);
615 aip = (struct wg_aip *) node;
616 if (aip->a_peer != peer) {
617 LIST_REMOVE(aip, a_entry)do { if ((aip)->a_entry.le_next != ((void *)0)) (aip)->
a_entry.le_next->a_entry.le_prev = (aip)->a_entry.le_prev
; *(aip)->a_entry.le_prev = (aip)->a_entry.le_next; ((aip
)->a_entry.le_prev) = ((void *)-1); ((aip)->a_entry.le_next
) = ((void *)-1); } while (0)
;
618 LIST_INSERT_HEAD(&peer->p_aip, aip, a_entry)do { if (((aip)->a_entry.le_next = (&peer->p_aip)->
lh_first) != ((void *)0)) (&peer->p_aip)->lh_first->
a_entry.le_prev = &(aip)->a_entry.le_next; (&peer->
p_aip)->lh_first = (aip); (aip)->a_entry.le_prev = &
(&peer->p_aip)->lh_first; } while (0)
;
619 aip->a_peer = peer;
620 }
621 }
622 rw_exit_write(&root->ar_lock);
623 return ret;
624}
625
626struct wg_peer *
627wg_aip_lookup(struct art_root *root, void *addr)
628{
629 struct srp_ref sr;
630 struct art_node *node;
631
632 node = art_match(root, addr, &sr);
633 srp_leave(&sr);
634
635 return node == NULL((void *)0) ? NULL((void *)0) : ((struct wg_aip *) node)->a_peer;
636}
637
638int
639wg_aip_remove(struct wg_softc *sc, struct wg_peer *peer, struct wg_aip_io *d)
640{
641 struct srp_ref sr;
642 struct art_root *root;
643 struct art_node *node;
644 struct wg_aip *aip;
645 int ret = 0;
646
647 switch (d->a_af) {
648 case AF_INET2: root = sc->sc_aip4; break;
649#ifdef INET61
650 case AF_INET624: root = sc->sc_aip6; break;
651#endif
652 default: return EAFNOSUPPORT47;
653 }
654
655 rw_enter_write(&root->ar_lock);
656 if ((node = art_lookup(root, &d->a_addr, d->a_cidr, &sr)) == NULL((void *)0)) {
657 ret = ENOENT2;
658 } else if (((struct wg_aip *) node)->a_peer != peer) {
659 ret = EXDEV18;
660 } else {
661 aip = (struct wg_aip *)node;
662 if (art_delete(root, node, &d->a_addr, d->a_cidr) == NULL((void *)0))
663 panic("art_delete failed to delete node %p", node);
664
665 sc->sc_aip_num--;
666 LIST_REMOVE(aip, a_entry)do { if ((aip)->a_entry.le_next != ((void *)0)) (aip)->
a_entry.le_next->a_entry.le_prev = (aip)->a_entry.le_prev
; *(aip)->a_entry.le_prev = (aip)->a_entry.le_next; ((aip
)->a_entry.le_prev) = ((void *)-1); ((aip)->a_entry.le_next
) = ((void *)-1); } while (0)
;
667 pool_put(&wg_aip_pool, aip);
668 }
669
670 srp_leave(&sr);
671 rw_exit_write(&root->ar_lock);
672 return ret;
673}
674
675int
676wg_socket_open(struct socket **so, int af, in_port_t *port,
677 int *rtable, void *upcall_arg)
678{
679 struct mbuf mhostnam, mrtable;
680#ifdef INET61
681 struct sockaddr_in6 *sin6;
682#endif
683 struct sockaddr_in *sin;
684 int ret, s;
685
686 m_inithdr(&mhostnam);
687 m_inithdr(&mrtable);
688
689 bzero(mtod(&mrtable, u_int *), sizeof(u_int))__builtin_bzero((((u_int *)((&mrtable)->m_hdr.mh_data)
)), (sizeof(u_int)))
;
690 *mtod(&mrtable, u_int *)((u_int *)((&mrtable)->m_hdr.mh_data)) = *rtable;
691 mrtable.m_lenm_hdr.mh_len = sizeof(u_int);
692
693 if (af == AF_INET2) {
694 sin = mtod(&mhostnam, struct sockaddr_in *)((struct sockaddr_in *)((&mhostnam)->m_hdr.mh_data));
695 bzero(sin, sizeof(*sin))__builtin_bzero((sin), (sizeof(*sin)));
696 sin->sin_len = sizeof(*sin);
697 sin->sin_family = AF_INET2;
698 sin->sin_port = *port;
699 sin->sin_addr.s_addr = INADDR_ANY((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000000
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000000)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000000)
) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000000
))))
;
700 mhostnam.m_lenm_hdr.mh_len = sin->sin_len;
701#ifdef INET61
702 } else if (af == AF_INET624) {
703 sin6 = mtod(&mhostnam, struct sockaddr_in6 *)((struct sockaddr_in6 *)((&mhostnam)->m_hdr.mh_data));
704 bzero(sin6, sizeof(*sin6))__builtin_bzero((sin6), (sizeof(*sin6)));
705 sin6->sin6_len = sizeof(*sin6);
706 sin6->sin6_family = AF_INET624;
707 sin6->sin6_port = *port;
708 sin6->sin6_addr = (struct in6_addr) { .s6_addr__u6_addr.__u6_addr8 = { 0 } };
709 mhostnam.m_lenm_hdr.mh_len = sin6->sin6_len;
710#endif
711 } else {
712 return EAFNOSUPPORT47;
713 }
714
715 if ((ret = socreate(af, so, SOCK_DGRAM2, 0)) != 0)
716 return ret;
717
718 s = solock(*so);
719 sotoinpcb(*so)((struct inpcb *)(*so)->so_pcb)->inp_upcall = wg_input;
720 sotoinpcb(*so)((struct inpcb *)(*so)->so_pcb)->inp_upcall_arg = upcall_arg;
721
722 if ((ret = sosetopt(*so, SOL_SOCKET0xffff, SO_RTABLE0x1021, &mrtable)) == 0) {
723 if ((ret = sobind(*so, &mhostnam, curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
)) == 0) {
724 *port = sotoinpcb(*so)((struct inpcb *)(*so)->so_pcb)->inp_lport;
725 *rtable = sotoinpcb(*so)((struct inpcb *)(*so)->so_pcb)->inp_rtableid;
726 }
727 }
728 sounlock(*so, s);
729
730 if (ret != 0)
731 wg_socket_close(so);
732
733 return ret;
734}
735
736void
737wg_socket_close(struct socket **so)
738{
739 if (*so != NULL((void *)0) && soclose(*so, 0) != 0)
740 panic("Unable to close wg socket");
741 *so = NULL((void *)0);
742}
743
744int
745wg_bind(struct wg_softc *sc, in_port_t *portp, int *rtablep)
746{
747 int ret = 0, rtable = *rtablep;
748 in_port_t port = *portp;
749 struct socket *so4;
750#ifdef INET61
751 struct socket *so6;
752 int retries = 0;
753retry:
754#endif
755 if ((ret = wg_socket_open(&so4, AF_INET2, &port, &rtable, sc)) != 0)
756 return ret;
757
758#ifdef INET61
759 if ((ret = wg_socket_open(&so6, AF_INET624, &port, &rtable, sc)) != 0) {
760 if (ret == EADDRINUSE48 && *portp == 0 && retries++ < 100)
761 goto retry;
762 wg_socket_close(&so4);
763 return ret;
764 }
765#endif
766
767 rw_enter_write(&sc->sc_so_lock);
768 wg_socket_close(&sc->sc_so4);
769 sc->sc_so4 = so4;
770#ifdef INET61
771 wg_socket_close(&sc->sc_so6);
772 sc->sc_so6 = so6;
773#endif
774 rw_exit_write(&sc->sc_so_lock);
775
776 *portp = port;
777 *rtablep = rtable;
778 return 0;
779}
780
781void
782wg_unbind(struct wg_softc *sc)
783{
784 rw_enter_write(&sc->sc_so_lock);
785 wg_socket_close(&sc->sc_so4);
786#ifdef INET61
787 wg_socket_close(&sc->sc_so6);
788#endif
789 rw_exit_write(&sc->sc_so_lock);
790}
791
792int
793wg_send(struct wg_softc *sc, struct wg_endpoint *e, struct mbuf *m)
794{
795 struct mbuf peernam, *control = NULL((void *)0);
796 int ret;
797
798 /* Get local control address before locking */
799 if (e->e_remote.r_sa.sa_family == AF_INET2) {
800 if (e->e_local.l_in.s_addr != INADDR_ANY((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000000
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000000)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000000)
) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000000
))))
)
801 control = sbcreatecontrol(&e->e_local.l_in,
802 sizeof(struct in_addr), IP_SENDSRCADDR7,
803 IPPROTO_IP0);
804#ifdef INET61
805 } else if (e->e_remote.r_sa.sa_family == AF_INET624) {
806 if (!IN6_IS_ADDR_UNSPECIFIED(&e->e_local.l_in6)((*(const u_int32_t *)(const void *)(&(&e->e_local
.l_pktinfo6.ipi6_addr)->__u6_addr.__u6_addr8[0]) == 0) &&
(*(const u_int32_t *)(const void *)(&(&e->e_local
.l_pktinfo6.ipi6_addr)->__u6_addr.__u6_addr8[4]) == 0) &&
(*(const u_int32_t *)(const void *)(&(&e->e_local
.l_pktinfo6.ipi6_addr)->__u6_addr.__u6_addr8[8]) == 0) &&
(*(const u_int32_t *)(const void *)(&(&e->e_local
.l_pktinfo6.ipi6_addr)->__u6_addr.__u6_addr8[12]) == 0))
)
807 control = sbcreatecontrol(&e->e_local.l_pktinfo6,
808 sizeof(struct in6_pktinfo), IPV6_PKTINFO46,
809 IPPROTO_IPV641);
810#endif
811 } else {
812 m_freem(m);
813 return EAFNOSUPPORT47;
814 }
815
816 /* Get remote address */
817 peernam.m_typem_hdr.mh_type = MT_SONAME3;
818 peernam.m_nextm_hdr.mh_next = NULL((void *)0);
819 peernam.m_nextpktm_hdr.mh_nextpkt = NULL((void *)0);
820 peernam.m_datam_hdr.mh_data = (void *)&e->e_remote.r_sa;
821 peernam.m_lenm_hdr.mh_len = e->e_remote.r_sa.sa_len;
822 peernam.m_flagsm_hdr.mh_flags = 0;
823
824 rw_enter_read(&sc->sc_so_lock);
825 if (e->e_remote.r_sa.sa_family == AF_INET2 && sc->sc_so4 != NULL((void *)0))
826 ret = sosend(sc->sc_so4, &peernam, NULL((void *)0), m, control, 0);
827#ifdef INET61
828 else if (e->e_remote.r_sa.sa_family == AF_INET624 && sc->sc_so6 != NULL((void *)0))
829 ret = sosend(sc->sc_so6, &peernam, NULL((void *)0), m, control, 0);
830#endif
831 else {
832 ret = ENOTCONN57;
833 m_freem(control);
834 m_freem(m);
835 }
836 rw_exit_read(&sc->sc_so_lock);
837
838 return ret;
839}
840
841void
842wg_send_buf(struct wg_softc *sc, struct wg_endpoint *e, uint8_t *buf,
843 size_t len)
844{
845 struct mbuf *m;
846 int ret = 0;
847
848retry:
849 m = m_gethdr(M_WAIT0x0001, MT_DATA1);
850 m->m_lenm_hdr.mh_len = 0;
851 m_copyback(m, 0, len, buf, M_WAIT0x0001);
852
853 /* As we're sending a handshake packet here, we want high priority */
854 m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = IFQ_MAXPRIO8 - 1;
855
856 if (ret == 0) {
857 ret = wg_send(sc, e, m);
858 /* Retry if we couldn't bind to e->e_local */
859 if (ret == EADDRNOTAVAIL49) {
860 bzero(&e->e_local, sizeof(e->e_local))__builtin_bzero((&e->e_local), (sizeof(e->e_local))
)
;
861 goto retry;
862 }
863 } else {
864 ret = wg_send(sc, e, m);
865 if (ret != 0)
866 DPRINTF(sc, "Unable to send packet\n")do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Unable to send packet\n", (sc)->sc_if.if_xname); } while
(0)
;
867 }
868}
869
870struct wg_tag *
871wg_tag_get(struct mbuf *m)
872{
873 struct m_tag *mtag;
874
875 if ((mtag = m_tag_find(m, PACKET_TAG_WIREGUARD0x0040, NULL((void *)0))) == NULL((void *)0)) {
876 mtag = m_tag_get(PACKET_TAG_WIREGUARD0x0040, sizeof(struct wg_tag),
877 M_NOWAIT0x0002);
878 if (mtag == NULL((void *)0))
879 return (NULL((void *)0));
880 bzero(mtag + 1, sizeof(struct wg_tag))__builtin_bzero((mtag + 1), (sizeof(struct wg_tag)));
881 m_tag_prepend(m, mtag);
882 }
883 return ((struct wg_tag *)(mtag + 1));
884}
885
886/*
887 * The following section handles the timeout callbacks for a WireGuard session.
888 * These functions provide an "event based" model for controlling wg(8) session
889 * timers. All function calls occur after the specified event below.
890 *
891 * wg_timers_event_data_sent:
892 * tx: data
893 * wg_timers_event_data_received:
894 * rx: data
895 * wg_timers_event_any_authenticated_packet_sent:
896 * tx: keepalive, data, handshake
897 * wg_timers_event_any_authenticated_packet_received:
898 * rx: keepalive, data, handshake
899 * wg_timers_event_any_authenticated_packet_traversal:
900 * tx, rx: keepalive, data, handshake
901 * wg_timers_event_handshake_initiated:
902 * tx: initiation
903 * wg_timers_event_handshake_responded:
904 * tx: response
905 * wg_timers_event_handshake_complete:
906 * rx: response, confirmation data
907 * wg_timers_event_session_derived:
908 * tx: response, rx: response
909 * wg_timers_event_want_initiation:
910 * tx: data failed, old keys expiring
911 * wg_timers_event_reset_handshake_last_sent:
912 * anytime we may immediately want a new handshake
913 */
914void
915wg_timers_init(struct wg_timers *t)
916{
917 bzero(t, sizeof(*t))__builtin_bzero((t), (sizeof(*t)));
918 rw_init(&t->t_lock, "wg_timers")_rw_init_flags(&t->t_lock, "wg_timers", 0, ((void *)0)
)
;
919 mtx_init(&t->t_handshake_mtx, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&t->t_handshake_mtx
), ((((0x7)) > 0x0 && ((0x7)) < 0x9) ? 0x9 : ((
0x7)))); } while (0)
;
920
921 timeout_set(&t->t_new_handshake, wg_timers_run_new_handshake, t);
922 timeout_set(&t->t_send_keepalive, wg_timers_run_send_keepalive, t);
923 timeout_set(&t->t_retry_handshake, wg_timers_run_retry_handshake, t);
924 timeout_set(&t->t_persistent_keepalive,
925 wg_timers_run_persistent_keepalive, t);
926 timeout_set(&t->t_zero_key_material,
927 wg_timers_run_zero_key_material, t);
928}
929
930void
931wg_timers_enable(struct wg_timers *t)
932{
933 rw_enter_write(&t->t_lock);
934 t->t_disabled = 0;
935 rw_exit_write(&t->t_lock);
936 wg_timers_run_persistent_keepalive(t);
937}
938
939void
940wg_timers_disable(struct wg_timers *t)
941{
942 rw_enter_write(&t->t_lock);
943 t->t_disabled = 1;
944 t->t_need_another_keepalive = 0;
945 rw_exit_write(&t->t_lock);
946
947 timeout_del_barrier(&t->t_new_handshake);
948 timeout_del_barrier(&t->t_send_keepalive);
949 timeout_del_barrier(&t->t_retry_handshake);
950 timeout_del_barrier(&t->t_persistent_keepalive);
951 timeout_del_barrier(&t->t_zero_key_material);
952}
953
954void
955wg_timers_set_persistent_keepalive(struct wg_timers *t, uint16_t interval)
956{
957 rw_enter_read(&t->t_lock);
958 if (!t->t_disabled) {
959 t->t_persistent_keepalive_interval = interval;
960 wg_timers_run_persistent_keepalive(t);
961 }
962 rw_exit_read(&t->t_lock);
963}
964
965int
966wg_timers_get_persistent_keepalive(struct wg_timers *t, uint16_t *interval)
967{
968 *interval = t->t_persistent_keepalive_interval;
969 return *interval > 0 ? 0 : ENOENT2;
970}
971
972void
973wg_timers_get_last_handshake(struct wg_timers *t, struct timespec *time)
974{
975 mtx_enter(&t->t_handshake_mtx);
976 *time = t->t_handshake_complete;
977 mtx_leave(&t->t_handshake_mtx);
978}
979
980int
981wg_timers_expired_handshake_last_sent(struct wg_timers *t)
982{
983 struct timespec uptime;
984 struct timespec expire = { .tv_sec = REKEY_TIMEOUT5, .tv_nsec = 0 };
985
986 getnanouptime(&uptime);
987 timespecadd(&t->t_handshake_last_sent, &expire, &expire)do { (&expire)->tv_sec = (&t->t_handshake_last_sent
)->tv_sec + (&expire)->tv_sec; (&expire)->tv_nsec
= (&t->t_handshake_last_sent)->tv_nsec + (&expire
)->tv_nsec; if ((&expire)->tv_nsec >= 1000000000L
) { (&expire)->tv_sec++; (&expire)->tv_nsec -= 1000000000L
; } } while (0)
;
988 return timespeccmp(&uptime, &expire, >)(((&uptime)->tv_sec == (&expire)->tv_sec) ? ((&
uptime)->tv_nsec > (&expire)->tv_nsec) : ((&
uptime)->tv_sec > (&expire)->tv_sec))
? ETIMEDOUT60 : 0;
989}
990
991int
992wg_timers_check_handshake_last_sent(struct wg_timers *t)
993{
994 int ret;
995 mtx_enter(&t->t_handshake_mtx);
996 if ((ret = wg_timers_expired_handshake_last_sent(t)) == ETIMEDOUT60)
997 getnanouptime(&t->t_handshake_last_sent);
998 mtx_leave(&t->t_handshake_mtx);
999 return ret;
1000}
1001
1002void
1003wg_timers_event_data_sent(struct wg_timers *t)
1004{
1005 int msecs = NEW_HANDSHAKE_TIMEOUT(5 + 10) * 1000;
1006 msecs += arc4random_uniform(REKEY_TIMEOUT_JITTER334);
1007
1008 rw_enter_read(&t->t_lock);
1009 if (!t->t_disabled && !timeout_pending(&t->t_new_handshake)((&t->t_new_handshake)->to_flags & 0x02))
1010 timeout_add_msec(&t->t_new_handshake, msecs);
1011 rw_exit_read(&t->t_lock);
1012}
1013
1014void
1015wg_timers_event_data_received(struct wg_timers *t)
1016{
1017 rw_enter_read(&t->t_lock);
1018 if (!t->t_disabled) {
1019 if (!timeout_pending(&t->t_send_keepalive)((&t->t_send_keepalive)->to_flags & 0x02))
1020 timeout_add_sec(&t->t_send_keepalive,
1021 KEEPALIVE_TIMEOUT10);
1022 else
1023 t->t_need_another_keepalive = 1;
1024 }
1025 rw_exit_read(&t->t_lock);
1026}
1027
1028void
1029wg_timers_event_any_authenticated_packet_sent(struct wg_timers *t)
1030{
1031 timeout_del(&t->t_send_keepalive);
1032}
1033
1034void
1035wg_timers_event_any_authenticated_packet_received(struct wg_timers *t)
1036{
1037 timeout_del(&t->t_new_handshake);
1038}
1039
1040void
1041wg_timers_event_any_authenticated_packet_traversal(struct wg_timers *t)
1042{
1043 rw_enter_read(&t->t_lock);
1044 if (!t->t_disabled && t->t_persistent_keepalive_interval > 0)
1045 timeout_add_sec(&t->t_persistent_keepalive,
1046 t->t_persistent_keepalive_interval);
1047 rw_exit_read(&t->t_lock);
1048}
1049
1050void
1051wg_timers_event_handshake_initiated(struct wg_timers *t)
1052{
1053 int msecs = REKEY_TIMEOUT5 * 1000;
1054 msecs += arc4random_uniform(REKEY_TIMEOUT_JITTER334);
1055
1056 rw_enter_read(&t->t_lock);
1057 if (!t->t_disabled)
1058 timeout_add_msec(&t->t_retry_handshake, msecs);
1059 rw_exit_read(&t->t_lock);
1060}
1061
1062void
1063wg_timers_event_handshake_responded(struct wg_timers *t)
1064{
1065 mtx_enter(&t->t_handshake_mtx);
1066 getnanouptime(&t->t_handshake_last_sent);
1067 mtx_leave(&t->t_handshake_mtx);
1068}
1069
1070void
1071wg_timers_event_handshake_complete(struct wg_timers *t)
1072{
1073 rw_enter_read(&t->t_lock);
1074 if (!t->t_disabled) {
1075 mtx_enter(&t->t_handshake_mtx);
1076 timeout_del(&t->t_retry_handshake);
1077 t->t_handshake_retries = 0;
1078 getnanotime(&t->t_handshake_complete);
1079 mtx_leave(&t->t_handshake_mtx);
1080 wg_timers_run_send_keepalive(t);
1081 }
1082 rw_exit_read(&t->t_lock);
1083}
1084
1085void
1086wg_timers_event_session_derived(struct wg_timers *t)
1087{
1088 rw_enter_read(&t->t_lock);
1089 if (!t->t_disabled)
1090 timeout_add_sec(&t->t_zero_key_material, REJECT_AFTER_TIME180 * 3);
1091 rw_exit_read(&t->t_lock);
1092}
1093
1094void
1095wg_timers_event_want_initiation(struct wg_timers *t)
1096{
1097 rw_enter_read(&t->t_lock);
1098 if (!t->t_disabled)
1099 wg_timers_run_send_initiation(t, 0);
1100 rw_exit_read(&t->t_lock);
1101}
1102
1103void
1104wg_timers_event_reset_handshake_last_sent(struct wg_timers *t)
1105{
1106 mtx_enter(&t->t_handshake_mtx);
1107 t->t_handshake_last_sent.tv_sec -= (REKEY_TIMEOUT5 + 1);
1108 mtx_leave(&t->t_handshake_mtx);
1109}
1110
1111void
1112wg_timers_run_send_initiation(void *_t, int is_retry)
1113{
1114 struct wg_timers *t = _t;
1115 struct wg_peer *peer = CONTAINER_OF(t, struct wg_peer, p_timers)({ const __typeof( ((struct wg_peer *)0)->p_timers ) *__mptr
= (t); (struct wg_peer *)( (char *)__mptr - __builtin_offsetof
(struct wg_peer, p_timers) );})
;
1116 if (!is_retry)
1117 t->t_handshake_retries = 0;
1118 if (wg_timers_expired_handshake_last_sent(t) == ETIMEDOUT60)
1119 task_add(wg_handshake_taskq, &peer->p_send_initiation);
1120}
1121
1122void
1123wg_timers_run_retry_handshake(void *_t)
1124{
1125 struct wg_timers *t = _t;
1126 struct wg_peer *peer = CONTAINER_OF(t, struct wg_peer, p_timers)({ const __typeof( ((struct wg_peer *)0)->p_timers ) *__mptr
= (t); (struct wg_peer *)( (char *)__mptr - __builtin_offsetof
(struct wg_peer, p_timers) );})
;
1127
1128 mtx_enter(&t->t_handshake_mtx);
1129 if (t->t_handshake_retries <= MAX_TIMER_HANDSHAKES(90 / 5)) {
1130 t->t_handshake_retries++;
1131 mtx_leave(&t->t_handshake_mtx);
1132
1133 DPRINTF(peer->p_sc, "Handshake for peer %llu did not complete "do { if ((((peer->p_sc)->sc_if.if_flags) & (0x4))) printf
("%s: " "Handshake for peer %llu did not complete " "after %d seconds, retrying (try %d)\n"
, (peer->p_sc)->sc_if.if_xname, peer->p_id, 5, t->
t_handshake_retries + 1); } while (0)
1134 "after %d seconds, retrying (try %d)\n", peer->p_id,do { if ((((peer->p_sc)->sc_if.if_flags) & (0x4))) printf
("%s: " "Handshake for peer %llu did not complete " "after %d seconds, retrying (try %d)\n"
, (peer->p_sc)->sc_if.if_xname, peer->p_id, 5, t->
t_handshake_retries + 1); } while (0)
1135 REKEY_TIMEOUT, t->t_handshake_retries + 1)do { if ((((peer->p_sc)->sc_if.if_flags) & (0x4))) printf
("%s: " "Handshake for peer %llu did not complete " "after %d seconds, retrying (try %d)\n"
, (peer->p_sc)->sc_if.if_xname, peer->p_id, 5, t->
t_handshake_retries + 1); } while (0)
;
1136 wg_peer_clear_src(peer);
1137 wg_timers_run_send_initiation(t, 1);
1138 } else {
1139 mtx_leave(&t->t_handshake_mtx);
1140
1141 DPRINTF(peer->p_sc, "Handshake for peer %llu did not complete "do { if ((((peer->p_sc)->sc_if.if_flags) & (0x4))) printf
("%s: " "Handshake for peer %llu did not complete " "after %d retries, giving up\n"
, (peer->p_sc)->sc_if.if_xname, peer->p_id, (90 / 5)
+ 2); } while (0)
1142 "after %d retries, giving up\n", peer->p_id,do { if ((((peer->p_sc)->sc_if.if_flags) & (0x4))) printf
("%s: " "Handshake for peer %llu did not complete " "after %d retries, giving up\n"
, (peer->p_sc)->sc_if.if_xname, peer->p_id, (90 / 5)
+ 2); } while (0)
1143 MAX_TIMER_HANDSHAKES + 2)do { if ((((peer->p_sc)->sc_if.if_flags) & (0x4))) printf
("%s: " "Handshake for peer %llu did not complete " "after %d retries, giving up\n"
, (peer->p_sc)->sc_if.if_xname, peer->p_id, (90 / 5)
+ 2); } while (0)
;
1144
1145 timeout_del(&t->t_send_keepalive);
1146 mq_purge(&peer->p_stage_queue);
1147 if (!timeout_pending(&t->t_zero_key_material)((&t->t_zero_key_material)->to_flags & 0x02))
1148 timeout_add_sec(&t->t_zero_key_material,
1149 REJECT_AFTER_TIME180 * 3);
1150 }
1151}
1152
1153void
1154wg_timers_run_send_keepalive(void *_t)
1155{
1156 struct wg_timers *t = _t;
1157 struct wg_peer *peer = CONTAINER_OF(t, struct wg_peer, p_timers)({ const __typeof( ((struct wg_peer *)0)->p_timers ) *__mptr
= (t); (struct wg_peer *)( (char *)__mptr - __builtin_offsetof
(struct wg_peer, p_timers) );})
;
1158
1159 task_add(wg_crypt_taskq, &peer->p_send_keepalive);
1160 if (t->t_need_another_keepalive) {
1161 t->t_need_another_keepalive = 0;
1162 timeout_add_sec(&t->t_send_keepalive, KEEPALIVE_TIMEOUT10);
1163 }
1164}
1165
1166void
1167wg_timers_run_new_handshake(void *_t)
1168{
1169 struct wg_timers *t = _t;
1170 struct wg_peer *peer = CONTAINER_OF(t, struct wg_peer, p_timers)({ const __typeof( ((struct wg_peer *)0)->p_timers ) *__mptr
= (t); (struct wg_peer *)( (char *)__mptr - __builtin_offsetof
(struct wg_peer, p_timers) );})
;
1171
1172 DPRINTF(peer->p_sc, "Retrying handshake with peer %llu because we "do { if ((((peer->p_sc)->sc_if.if_flags) & (0x4))) printf
("%s: " "Retrying handshake with peer %llu because we " "stopped hearing back after %d seconds\n"
, (peer->p_sc)->sc_if.if_xname, peer->p_id, (5 + 10)
); } while (0)
1173 "stopped hearing back after %d seconds\n",do { if ((((peer->p_sc)->sc_if.if_flags) & (0x4))) printf
("%s: " "Retrying handshake with peer %llu because we " "stopped hearing back after %d seconds\n"
, (peer->p_sc)->sc_if.if_xname, peer->p_id, (5 + 10)
); } while (0)
1174 peer->p_id, NEW_HANDSHAKE_TIMEOUT)do { if ((((peer->p_sc)->sc_if.if_flags) & (0x4))) printf
("%s: " "Retrying handshake with peer %llu because we " "stopped hearing back after %d seconds\n"
, (peer->p_sc)->sc_if.if_xname, peer->p_id, (5 + 10)
); } while (0)
;
1175 wg_peer_clear_src(peer);
1176
1177 wg_timers_run_send_initiation(t, 0);
1178}
1179
1180void
1181wg_timers_run_zero_key_material(void *_t)
1182{
1183 struct wg_timers *t = _t;
1184 struct wg_peer *peer = CONTAINER_OF(t, struct wg_peer, p_timers)({ const __typeof( ((struct wg_peer *)0)->p_timers ) *__mptr
= (t); (struct wg_peer *)( (char *)__mptr - __builtin_offsetof
(struct wg_peer, p_timers) );})
;
1185
1186 DPRINTF(peer->p_sc, "Zeroing out keys for peer %llu\n", peer->p_id)do { if ((((peer->p_sc)->sc_if.if_flags) & (0x4))) printf
("%s: " "Zeroing out keys for peer %llu\n", (peer->p_sc)->
sc_if.if_xname, peer->p_id); } while (0)
;
1187 task_add(wg_handshake_taskq, &peer->p_clear_secrets);
1188}
1189
1190void
1191wg_timers_run_persistent_keepalive(void *_t)
1192{
1193 struct wg_timers *t = _t;
1194 struct wg_peer *peer = CONTAINER_OF(t, struct wg_peer, p_timers)({ const __typeof( ((struct wg_peer *)0)->p_timers ) *__mptr
= (t); (struct wg_peer *)( (char *)__mptr - __builtin_offsetof
(struct wg_peer, p_timers) );})
;
1195 if (t->t_persistent_keepalive_interval != 0)
1196 task_add(wg_crypt_taskq, &peer->p_send_keepalive);
1197}
1198
1199/* The following functions handle handshakes */
1200void
1201wg_peer_send_buf(struct wg_peer *peer, uint8_t *buf, size_t len)
1202{
1203 struct wg_endpoint endpoint;
1204
1205 wg_peer_counters_add(peer, len, 0);
1206 wg_timers_event_any_authenticated_packet_traversal(&peer->p_timers);
1207 wg_timers_event_any_authenticated_packet_sent(&peer->p_timers);
1208 wg_peer_get_endpoint(peer, &endpoint);
1209 wg_send_buf(peer->p_sc, &endpoint, buf, len);
1210}
1211
1212void
1213wg_send_initiation(void *_peer)
1214{
1215 struct wg_peer *peer = _peer;
1216 struct wg_pkt_initiation pkt;
1217
1218 if (wg_timers_check_handshake_last_sent(&peer->p_timers) != ETIMEDOUT60)
1219 return;
1220
1221 DPRINTF(peer->p_sc, "Sending handshake initiation to peer %llu\n",do { if ((((peer->p_sc)->sc_if.if_flags) & (0x4))) printf
("%s: " "Sending handshake initiation to peer %llu\n", (peer->
p_sc)->sc_if.if_xname, peer->p_id); } while (0)
1222 peer->p_id)do { if ((((peer->p_sc)->sc_if.if_flags) & (0x4))) printf
("%s: " "Sending handshake initiation to peer %llu\n", (peer->
p_sc)->sc_if.if_xname, peer->p_id); } while (0)
;
1223
1224 if (noise_create_initiation(&peer->p_remote, &pkt.s_idx, pkt.ue, pkt.es,
1225 pkt.ets) != 0)
1226 return;
1227 pkt.t = WG_PKT_INITIATION((__uint32_t)(1));
1228 cookie_maker_mac(&peer->p_cookie, &pkt.m, &pkt,
1229 sizeof(pkt)-sizeof(pkt.m));
1230 wg_peer_send_buf(peer, (uint8_t *)&pkt, sizeof(pkt));
1231 wg_timers_event_handshake_initiated(&peer->p_timers);
1232}
1233
1234void
1235wg_send_response(struct wg_peer *peer)
1236{
1237 struct wg_pkt_response pkt;
1238
1239 DPRINTF(peer->p_sc, "Sending handshake response to peer %llu\n",do { if ((((peer->p_sc)->sc_if.if_flags) & (0x4))) printf
("%s: " "Sending handshake response to peer %llu\n", (peer->
p_sc)->sc_if.if_xname, peer->p_id); } while (0)
1240 peer->p_id)do { if ((((peer->p_sc)->sc_if.if_flags) & (0x4))) printf
("%s: " "Sending handshake response to peer %llu\n", (peer->
p_sc)->sc_if.if_xname, peer->p_id); } while (0)
;
1241
1242 if (noise_create_response(&peer->p_remote, &pkt.s_idx, &pkt.r_idx,
1243 pkt.ue, pkt.en) != 0)
1244 return;
1245 if (noise_remote_begin_session(&peer->p_remote) != 0)
1246 return;
1247 wg_timers_event_session_derived(&peer->p_timers);
1248 pkt.t = WG_PKT_RESPONSE((__uint32_t)(2));
1249 cookie_maker_mac(&peer->p_cookie, &pkt.m, &pkt,
1250 sizeof(pkt)-sizeof(pkt.m));
1251 wg_timers_event_handshake_responded(&peer->p_timers);
1252 wg_peer_send_buf(peer, (uint8_t *)&pkt, sizeof(pkt));
1253}
1254
1255void
1256wg_send_cookie(struct wg_softc *sc, struct cookie_macs *cm, uint32_t idx,
1257 struct wg_endpoint *e)
1258{
1259 struct wg_pkt_cookie pkt;
1260
1261 DPRINTF(sc, "Sending cookie response for denied handshake message\n")do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Sending cookie response for denied handshake message\n", (sc
)->sc_if.if_xname); } while (0)
;
1262
1263 pkt.t = WG_PKT_COOKIE((__uint32_t)(3));
1264 pkt.r_idx = idx;
1265
1266 cookie_checker_create_payload(&sc->sc_cookie, cm, pkt.nonce,
1267 pkt.ec, &e->e_remote.r_sa);
1268
1269 wg_send_buf(sc, e, (uint8_t *)&pkt, sizeof(pkt));
1270}
1271
1272void
1273wg_send_keepalive(void *_peer)
1274{
1275 struct wg_peer *peer = _peer;
1276 struct wg_softc *sc = peer->p_sc;
1277 struct wg_tag *t;
1278 struct mbuf *m;
1279
1280 if (!mq_empty(&peer->p_stage_queue)((&(&peer->p_stage_queue)->mq_list)->ml_len ==
0)
)
1281 goto send;
1282
1283 if ((m = m_gethdr(M_NOWAIT0x0002, MT_DATA1)) == NULL((void *)0))
1284 return;
1285
1286 if ((t = wg_tag_get(m)) == NULL((void *)0)) {
1287 m_freem(m);
1288 return;
1289 }
1290
1291 m->m_lenm_hdr.mh_len = 0;
1292 m_calchdrlen(m);
1293
1294 t->t_peer = peer;
1295 t->t_mbuf = NULL((void *)0);
1296 t->t_done = 0;
1297 t->t_mtu = 0; /* MTU == 0 OK for keepalive */
1298
1299 mq_push(&peer->p_stage_queue, m);
1300send:
1301 if (noise_remote_ready(&peer->p_remote) == 0) {
1302 wg_queue_out(sc, peer);
1303 task_add(wg_crypt_taskq, &sc->sc_encap);
1304 } else {
1305 wg_timers_event_want_initiation(&peer->p_timers);
1306 }
1307}
1308
1309void
1310wg_peer_clear_secrets(void *_peer)
1311{
1312 struct wg_peer *peer = _peer;
1313 noise_remote_clear(&peer->p_remote);
1314}
1315
1316void
1317wg_handshake(struct wg_softc *sc, struct mbuf *m)
1318{
1319 struct wg_tag *t;
1320 struct wg_pkt_initiation *init;
1321 struct wg_pkt_response *resp;
1322 struct wg_pkt_cookie *cook;
1323 struct wg_peer *peer;
1324 struct noise_remote *remote;
1325 int res, underload = 0;
1326 static struct timeval wg_last_underload; /* microuptime */
1327
1328 if (mq_len(&sc->sc_handshake_queue)((&(&sc->sc_handshake_queue)->mq_list)->ml_len
)
>= MAX_QUEUED_HANDSHAKES4096/8) {
1329 getmicrouptime(&wg_last_underload);
1330 underload = 1;
1331 } else if (wg_last_underload.tv_sec != 0) {
1332 if (!ratecheck(&wg_last_underload, &underload_interval))
1333 underload = 1;
1334 else
1335 bzero(&wg_last_underload, sizeof(wg_last_underload))__builtin_bzero((&wg_last_underload), (sizeof(wg_last_underload
)))
;
1336 }
1337
1338 t = wg_tag_get(m);
1339
1340 switch (*mtod(m, uint32_t *)((uint32_t *)((m)->m_hdr.mh_data))) {
1341 case WG_PKT_INITIATION((__uint32_t)(1)):
1342 init = mtod(m, struct wg_pkt_initiation *)((struct wg_pkt_initiation *)((m)->m_hdr.mh_data));
1343
1344 res = cookie_checker_validate_macs(&sc->sc_cookie, &init->m,
1345 init, sizeof(*init) - sizeof(init->m),
1346 underload, &t->t_endpoint.e_remote.r_sa);
1347
1348 if (res == EINVAL22) {
1349 DPRINTF(sc, "Invalid initiation MAC\n")do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Invalid initiation MAC\n", (sc)->sc_if.if_xname); } while
(0)
;
1350 goto error;
1351 } else if (res == ECONNREFUSED61) {
1352 DPRINTF(sc, "Handshake ratelimited\n")do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Handshake ratelimited\n", (sc)->sc_if.if_xname); } while
(0)
;
1353 goto error;
1354 } else if (res == EAGAIN35) {
1355 wg_send_cookie(sc, &init->m, init->s_idx,
1356 &t->t_endpoint);
1357 goto error;
1358 } else if (res != 0) {
1359 panic("unexpected response: %d", res);
1360 }
1361
1362 if (noise_consume_initiation(&sc->sc_local, &remote,
1363 init->s_idx, init->ue, init->es, init->ets) != 0) {
1364 DPRINTF(sc, "Invalid handshake initiation\n")do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Invalid handshake initiation\n", (sc)->sc_if.if_xname); }
while (0)
;
1365 goto error;
1366 }
1367
1368 peer = CONTAINER_OF(remote, struct wg_peer, p_remote)({ const __typeof( ((struct wg_peer *)0)->p_remote ) *__mptr
= (remote); (struct wg_peer *)( (char *)__mptr - __builtin_offsetof
(struct wg_peer, p_remote) );})
;
1369
1370 DPRINTF(sc, "Receiving handshake initiation from peer %llu\n",do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Receiving handshake initiation from peer %llu\n", (sc)->
sc_if.if_xname, peer->p_id); } while (0)
1371 peer->p_id)do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Receiving handshake initiation from peer %llu\n", (sc)->
sc_if.if_xname, peer->p_id); } while (0)
;
1372
1373 wg_peer_counters_add(peer, 0, sizeof(*init));
1374 wg_peer_set_endpoint_from_tag(peer, t);
1375 wg_send_response(peer);
1376 break;
1377 case WG_PKT_RESPONSE((__uint32_t)(2)):
1378 resp = mtod(m, struct wg_pkt_response *)((struct wg_pkt_response *)((m)->m_hdr.mh_data));
1379
1380 res = cookie_checker_validate_macs(&sc->sc_cookie, &resp->m,
1381 resp, sizeof(*resp) - sizeof(resp->m),
1382 underload, &t->t_endpoint.e_remote.r_sa);
1383
1384 if (res == EINVAL22) {
1385 DPRINTF(sc, "Invalid response MAC\n")do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Invalid response MAC\n", (sc)->sc_if.if_xname); } while (
0)
;
1386 goto error;
1387 } else if (res == ECONNREFUSED61) {
1388 DPRINTF(sc, "Handshake ratelimited\n")do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Handshake ratelimited\n", (sc)->sc_if.if_xname); } while
(0)
;
1389 goto error;
1390 } else if (res == EAGAIN35) {
1391 wg_send_cookie(sc, &resp->m, resp->s_idx,
1392 &t->t_endpoint);
1393 goto error;
1394 } else if (res != 0) {
1395 panic("unexpected response: %d", res);
1396 }
1397
1398 if ((remote = wg_index_get(sc, resp->r_idx)) == NULL((void *)0)) {
1399 DPRINTF(sc, "Unknown handshake response\n")do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Unknown handshake response\n", (sc)->sc_if.if_xname); } while
(0)
;
1400 goto error;
1401 }
1402
1403 peer = CONTAINER_OF(remote, struct wg_peer, p_remote)({ const __typeof( ((struct wg_peer *)0)->p_remote ) *__mptr
= (remote); (struct wg_peer *)( (char *)__mptr - __builtin_offsetof
(struct wg_peer, p_remote) );})
;
1404
1405 if (noise_consume_response(remote, resp->s_idx, resp->r_idx,
1406 resp->ue, resp->en) != 0) {
1407 DPRINTF(sc, "Invalid handshake response\n")do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Invalid handshake response\n", (sc)->sc_if.if_xname); } while
(0)
;
1408 goto error;
1409 }
1410
1411 DPRINTF(sc, "Receiving handshake response from peer %llu\n",do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Receiving handshake response from peer %llu\n", (sc)->sc_if
.if_xname, peer->p_id); } while (0)
1412 peer->p_id)do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Receiving handshake response from peer %llu\n", (sc)->sc_if
.if_xname, peer->p_id); } while (0)
;
1413
1414 wg_peer_counters_add(peer, 0, sizeof(*resp));
1415 wg_peer_set_endpoint_from_tag(peer, t);
1416 if (noise_remote_begin_session(&peer->p_remote) == 0) {
1417 wg_timers_event_session_derived(&peer->p_timers);
1418 wg_timers_event_handshake_complete(&peer->p_timers);
1419 }
1420 break;
1421 case WG_PKT_COOKIE((__uint32_t)(3)):
1422 cook = mtod(m, struct wg_pkt_cookie *)((struct wg_pkt_cookie *)((m)->m_hdr.mh_data));
1423
1424 if ((remote = wg_index_get(sc, cook->r_idx)) == NULL((void *)0)) {
1425 DPRINTF(sc, "Unknown cookie index\n")do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Unknown cookie index\n", (sc)->sc_if.if_xname); } while (
0)
;
1426 goto error;
1427 }
1428
1429 peer = CONTAINER_OF(remote, struct wg_peer, p_remote)({ const __typeof( ((struct wg_peer *)0)->p_remote ) *__mptr
= (remote); (struct wg_peer *)( (char *)__mptr - __builtin_offsetof
(struct wg_peer, p_remote) );})
;
1430
1431 if (cookie_maker_consume_payload(&peer->p_cookie,
1432 cook->nonce, cook->ec) != 0) {
1433 DPRINTF(sc, "Could not decrypt cookie response\n")do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Could not decrypt cookie response\n", (sc)->sc_if.if_xname
); } while (0)
;
1434 goto error;
1435 }
1436
1437 DPRINTF(sc, "Receiving cookie response\n")do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Receiving cookie response\n", (sc)->sc_if.if_xname); } while
(0)
;
1438 goto error;
1439 default:
1440 panic("invalid packet in handshake queue");
1441 }
1442
1443 wg_timers_event_any_authenticated_packet_received(&peer->p_timers);
1444 wg_timers_event_any_authenticated_packet_traversal(&peer->p_timers);
1445error:
1446 m_freem(m);
1447}
1448
1449void
1450wg_handshake_worker(void *_sc)
1451{
1452 struct mbuf *m;
1453 struct wg_softc *sc = _sc;
1454 while ((m = mq_dequeue(&sc->sc_handshake_queue)) != NULL((void *)0))
1455 wg_handshake(sc, m);
1456}
1457
1458/*
1459 * The following functions handle encapsulation (encryption) and
1460 * decapsulation (decryption). The wg_{en,de}cap functions will run in the
1461 * sc_crypt_taskq, while wg_deliver_{in,out} must be serialised and will run
1462 * in nettq.
1463 *
1464 * The packets are tracked in two queues, a serial queue and a parallel queue.
1465 * - The parallel queue is used to distribute the encryption across multiple
1466 * threads.
1467 * - The serial queue ensures that packets are not reordered and are
1468 * delievered in sequence.
1469 * The wg_tag attached to the packet contains two flags to help the two queues
1470 * interact.
1471 * - t_done: The parallel queue has finished with the packet, now the serial
1472 * queue can do it's work.
1473 * - t_mbuf: Used to store the *crypted packet. in the case of encryption,
1474 * this is a newly allocated packet, and in the case of decryption,
1475 * it is a pointer to the same packet, that has been decrypted and
1476 * truncated. If t_mbuf is NULL, then *cryption failed and this
1477 * packet should not be passed.
1478 * wg_{en,de}cap work on the parallel queue, while wg_deliver_{in,out} work
1479 * on the serial queue.
1480 */
1481void
1482wg_encap(struct wg_softc *sc, struct mbuf *m)
1483{
1484 int res = 0;
1485 struct wg_pkt_data *data;
1486 struct wg_peer *peer;
1487 struct wg_tag *t;
1488 struct mbuf *mc;
1489 size_t padding_len, plaintext_len, out_len;
1490 uint64_t nonce;
1491
1492 t = wg_tag_get(m);
1493 peer = t->t_peer;
1494
1495 plaintext_len = min(WG_PKT_WITH_PADDING(m->m_pkthdr.len)(((m->M_dat.MH.MH_pkthdr.len) + (16-1)) & (~(16-1))), t->t_mtu);
1496 padding_len = plaintext_len - m->m_pkthdrM_dat.MH.MH_pkthdr.len;
1497 out_len = sizeof(struct wg_pkt_data) + plaintext_len + NOISE_AUTHTAG_LEN16;
1498
1499 /*
1500 * For the time being we allocate a new packet with sufficient size to
1501 * hold the encrypted data and headers. It would be difficult to
1502 * overcome as p_encap_queue (mbuf_list) holds a reference to the mbuf.
1503 * If we m_makespace or similar, we risk corrupting that list.
1504 * Additionally, we only pass a buf and buf length to
1505 * noise_remote_encrypt. Technically it would be possible to teach
1506 * noise_remote_encrypt about mbufs, but we would need to sort out the
1507 * p_encap_queue situation first.
1508 */
1509 if ((mc = m_clget(NULL((void *)0), M_NOWAIT0x0002, out_len)) == NULL((void *)0))
1510 goto error;
1511
1512 data = mtod(mc, struct wg_pkt_data *)((struct wg_pkt_data *)((mc)->m_hdr.mh_data));
1513 m_copydata(m, 0, m->m_pkthdrM_dat.MH.MH_pkthdr.len, data->buf);
1514 bzero(data->buf + m->m_pkthdr.len, padding_len)__builtin_bzero((data->buf + m->M_dat.MH.MH_pkthdr.len)
, (padding_len))
;
1515 data->t = WG_PKT_DATA((__uint32_t)(4));
1516
1517 /*
1518 * Copy the flow hash from the inner packet to the outer packet, so
1519 * that fq_codel can property separate streams, rather than falling
1520 * back to random buckets.
1521 */
1522 mc->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid = m->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid;
1523
1524 res = noise_remote_encrypt(&peer->p_remote, &data->r_idx, &nonce,
1525 data->buf, plaintext_len);
1526 nonce = htole64(nonce)((__uint64_t)(nonce)); /* Wire format is little endian. */
1527 memcpy(data->nonce, &nonce, sizeof(data->nonce))__builtin_memcpy((data->nonce), (&nonce), (sizeof(data
->nonce)))
;
1528
1529 if (__predict_false(res == EINVAL)__builtin_expect(((res == 22) != 0), 0)) {
1530 m_freem(mc);
1531 goto error;
1532 } else if (__predict_false(res == ESTALE)__builtin_expect(((res == 70) != 0), 0)) {
1533 wg_timers_event_want_initiation(&peer->p_timers);
1534 } else if (__predict_false(res != 0)__builtin_expect(((res != 0) != 0), 0)) {
1535 panic("unexpected result: %d", res);
1536 }
1537
1538 /* A packet with length 0 is a keepalive packet */
1539 if (__predict_false(m->m_pkthdr.len == 0)__builtin_expect(((m->M_dat.MH.MH_pkthdr.len == 0) != 0), 0
)
)
1540 DPRINTF(sc, "Sending keepalive packet to peer %llu\n",do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Sending keepalive packet to peer %llu\n", (sc)->sc_if.if_xname
, peer->p_id); } while (0)
1541 peer->p_id)do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Sending keepalive packet to peer %llu\n", (sc)->sc_if.if_xname
, peer->p_id); } while (0)
;
1542
1543 mc->m_pkthdrM_dat.MH.MH_pkthdr.ph_loopcnt = m->m_pkthdrM_dat.MH.MH_pkthdr.ph_loopcnt;
1544 mc->m_flagsm_hdr.mh_flags &= ~(M_MCAST0x0200 | M_BCAST0x0100);
1545 mc->m_lenm_hdr.mh_len = out_len;
1546 m_calchdrlen(mc);
1547
1548 /*
1549 * We would count ifc_opackets, ifc_obytes of m here, except if_snd
1550 * already does that for us, so no need to worry about it.
1551 counters_pkt(sc->sc_if.if_counters, ifc_opackets, ifc_obytes,
1552 m->m_pkthdr.len);
1553 */
1554 wg_peer_counters_add(peer, mc->m_pkthdrM_dat.MH.MH_pkthdr.len, 0);
1555
1556 t->t_mbuf = mc;
1557error:
1558 t->t_done = 1;
1559 task_add(net_tq(sc->sc_if.if_index), &peer->p_deliver_out);
1560}
1561
1562void
1563wg_decap(struct wg_softc *sc, struct mbuf *m)
1564{
1565 int res, len;
1566 struct ip *ip;
1567 struct ip6_hdr *ip6;
1568 struct wg_pkt_data *data;
1569 struct wg_peer *peer, *allowed_peer;
1570 struct wg_tag *t;
1571 size_t payload_len;
1572 uint64_t nonce;
1573
1574 t = wg_tag_get(m);
1575 peer = t->t_peer;
1576
1577 /*
1578 * Likewise to wg_encap, we pass a buf and buf length to
1579 * noise_remote_decrypt. Again, possible to teach it about mbufs
1580 * but need to get over the p_decap_queue situation first. However,
1581 * we do not need to allocate a new mbuf as the decrypted packet is
1582 * strictly smaller than encrypted. We just set t_mbuf to m and
1583 * wg_deliver_in knows how to deal with that.
1584 */
1585 data = mtod(m, struct wg_pkt_data *)((struct wg_pkt_data *)((m)->m_hdr.mh_data));
1586 payload_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len - sizeof(struct wg_pkt_data);
1587 memcpy(&nonce, data->nonce, sizeof(nonce))__builtin_memcpy((&nonce), (data->nonce), (sizeof(nonce
)))
;
1588 nonce = le64toh(nonce)((__uint64_t)(nonce)); /* Wire format is little endian. */
1589 res = noise_remote_decrypt(&peer->p_remote, data->r_idx, nonce,
1590 data->buf, payload_len);
1591
1592 if (__predict_false(res == EINVAL)__builtin_expect(((res == 22) != 0), 0)) {
1593 goto error;
1594 } else if (__predict_false(res == ECONNRESET)__builtin_expect(((res == 54) != 0), 0)) {
1595 wg_timers_event_handshake_complete(&peer->p_timers);
1596 } else if (__predict_false(res == ESTALE)__builtin_expect(((res == 70) != 0), 0)) {
1597 wg_timers_event_want_initiation(&peer->p_timers);
1598 } else if (__predict_false(res != 0)__builtin_expect(((res != 0) != 0), 0)) {
1599 panic("unexpected response: %d", res);
1600 }
1601
1602 wg_peer_set_endpoint_from_tag(peer, t);
1603
1604 wg_peer_counters_add(peer, 0, m->m_pkthdrM_dat.MH.MH_pkthdr.len);
1605
1606 m_adj(m, sizeof(struct wg_pkt_data));
1607 m_adj(m, -NOISE_AUTHTAG_LEN16);
1608
1609 counters_pkt(sc->sc_if.if_counters, ifc_ipackets, ifc_ibytes,
1610 m->m_pkthdrM_dat.MH.MH_pkthdr.len);
1611
1612 /* A packet with length 0 is a keepalive packet */
1613 if (__predict_false(m->m_pkthdr.len == 0)__builtin_expect(((m->M_dat.MH.MH_pkthdr.len == 0) != 0), 0
)
) {
1614 DPRINTF(sc, "Receiving keepalive packet from peer "do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Receiving keepalive packet from peer " "%llu\n", (sc)->sc_if
.if_xname, peer->p_id); } while (0)
1615 "%llu\n", peer->p_id)do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Receiving keepalive packet from peer " "%llu\n", (sc)->sc_if
.if_xname, peer->p_id); } while (0)
;
1616 goto done;
1617 }
1618
1619 /*
1620 * We can let the network stack handle the intricate validation of the
1621 * IP header, we just worry about the sizeof and the version, so we can
1622 * read the source address in wg_aip_lookup.
1623 *
1624 * We also need to trim the packet, as it was likely padded before
1625 * encryption. While we could drop it here, it will be more helpful to
1626 * pass it to bpf_mtap and use the counters that people are expecting
1627 * in ipv4_input and ipv6_input. We can rely on ipv4_input and
1628 * ipv6_input to properly validate the headers.
1629 */
1630 ip = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data));
1631 ip6 = mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data));
1632
1633 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len >= sizeof(struct ip) && ip->ip_v == IPVERSION4) {
1634 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family = AF_INET2;
1635
1636 len = ntohs(ip->ip_len)(__uint16_t)(__builtin_constant_p(ip->ip_len) ? (__uint16_t
)(((__uint16_t)(ip->ip_len) & 0xffU) << 8 | ((__uint16_t
)(ip->ip_len) & 0xff00U) >> 8) : __swap16md(ip->
ip_len))
;
1637 if (len >= sizeof(struct ip) && len < m->m_pkthdrM_dat.MH.MH_pkthdr.len)
1638 m_adj(m, len - m->m_pkthdrM_dat.MH.MH_pkthdr.len);
1639
1640 allowed_peer = wg_aip_lookup(sc->sc_aip4, &ip->ip_src);
1641#ifdef INET61
1642 } else if (m->m_pkthdrM_dat.MH.MH_pkthdr.len >= sizeof(struct ip6_hdr) &&
1643 (ip6->ip6_vfcip6_ctlun.ip6_un2_vfc & IPV6_VERSION_MASK0xf0) == IPV6_VERSION0x60) {
1644 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family = AF_INET624;
1645
1646 len = ntohs(ip6->ip6_plen)(__uint16_t)(__builtin_constant_p(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen
) ? (__uint16_t)(((__uint16_t)(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen
) & 0xffU) << 8 | ((__uint16_t)(ip6->ip6_ctlun.ip6_un1
.ip6_un1_plen) & 0xff00U) >> 8) : __swap16md(ip6->
ip6_ctlun.ip6_un1.ip6_un1_plen))
+ sizeof(struct ip6_hdr);
1647 if (len < m->m_pkthdrM_dat.MH.MH_pkthdr.len)
1648 m_adj(m, len - m->m_pkthdrM_dat.MH.MH_pkthdr.len);
1649
1650 allowed_peer = wg_aip_lookup(sc->sc_aip6, &ip6->ip6_src);
1651#endif
1652 } else {
1653 DPRINTF(sc, "Packet is neither ipv4 nor ipv6 from "do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Packet is neither ipv4 nor ipv6 from " "peer %llu\n", (sc)->
sc_if.if_xname, peer->p_id); } while (0)
1654 "peer %llu\n", peer->p_id)do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Packet is neither ipv4 nor ipv6 from " "peer %llu\n", (sc)->
sc_if.if_xname, peer->p_id); } while (0)
;
1655 goto error;
1656 }
1657
1658 if (__predict_false(peer != allowed_peer)__builtin_expect(((peer != allowed_peer) != 0), 0)) {
1659 DPRINTF(sc, "Packet has unallowed src IP from peer "do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Packet has unallowed src IP from peer " "%llu\n", (sc)->
sc_if.if_xname, peer->p_id); } while (0)
1660 "%llu\n", peer->p_id)do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Packet has unallowed src IP from peer " "%llu\n", (sc)->
sc_if.if_xname, peer->p_id); } while (0)
;
1661 goto error;
1662 }
1663
1664 /* tunneled packet was not offloaded */
1665 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags = 0;
1666
1667 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_ifidx = sc->sc_if.if_index;
1668 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = sc->sc_if.if_rdomainif_data.ifi_rdomain;
1669 m->m_flagsm_hdr.mh_flags &= ~(M_MCAST0x0200 | M_BCAST0x0100);
1670#if NPF1 > 0
1671 pf_pkt_addr_changed(m);
1672#endif /* NPF > 0 */
1673
1674done:
1675 t->t_mbuf = m;
1676error:
1677 t->t_done = 1;
1678 task_add(net_tq(sc->sc_if.if_index), &peer->p_deliver_in);
1679}
1680
1681void
1682wg_encap_worker(void *_sc)
1683{
1684 struct mbuf *m;
1685 struct wg_softc *sc = _sc;
1686 while ((m = wg_ring_dequeue(&sc->sc_encap_ring)) != NULL((void *)0))
1687 wg_encap(sc, m);
1688}
1689
1690void
1691wg_decap_worker(void *_sc)
1692{
1693 struct mbuf *m;
1694 struct wg_softc *sc = _sc;
1695 while ((m = wg_ring_dequeue(&sc->sc_decap_ring)) != NULL((void *)0))
1696 wg_decap(sc, m);
1697}
1698
1699void
1700wg_deliver_out(void *_peer)
1701{
1702 struct wg_peer *peer = _peer;
1703 struct wg_softc *sc = peer->p_sc;
1704 struct wg_endpoint endpoint;
1705 struct wg_tag *t;
1706 struct mbuf *m;
1707 int ret;
1708
1709 wg_peer_get_endpoint(peer, &endpoint);
1710
1711 while ((m = wg_queue_dequeue(&peer->p_encap_queue, &t)) != NULL((void *)0)) {
1712 /* t_mbuf will contain the encrypted packet */
1713 if (t->t_mbuf == NULL((void *)0)){
1714 counters_inc(sc->sc_if.if_counters, ifc_oerrors);
1715 m_freem(m);
1716 continue;
1717 }
1718
1719 ret = wg_send(sc, &endpoint, t->t_mbuf);
1720
1721 if (ret == 0) {
1722 wg_timers_event_any_authenticated_packet_traversal(
1723 &peer->p_timers);
1724 wg_timers_event_any_authenticated_packet_sent(
1725 &peer->p_timers);
1726
1727 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len != 0)
1728 wg_timers_event_data_sent(&peer->p_timers);
1729 } else if (ret == EADDRNOTAVAIL49) {
1730 wg_peer_clear_src(peer);
1731 wg_peer_get_endpoint(peer, &endpoint);
1732 }
1733
1734 m_freem(m);
1735 }
1736}
1737
1738void
1739wg_deliver_in(void *_peer)
1740{
1741 struct wg_peer *peer = _peer;
1742 struct wg_softc *sc = peer->p_sc;
1743 struct wg_tag *t;
1744 struct mbuf *m;
1745
1746 while ((m = wg_queue_dequeue(&peer->p_decap_queue, &t)) != NULL((void *)0)) {
1
Calling 'wg_queue_dequeue'
1747 /* t_mbuf will contain the decrypted packet */
1748 if (t->t_mbuf == NULL((void *)0)) {
1749 counters_inc(sc->sc_if.if_counters, ifc_ierrors);
1750 m_freem(m);
1751 continue;
1752 }
1753
1754 /* From here on m == t->t_mbuf */
1755 KASSERT(m == t->t_mbuf)((m == t->t_mbuf) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if_wg.c"
, 1755, "m == t->t_mbuf"))
;
1756
1757 wg_timers_event_any_authenticated_packet_received(
1758 &peer->p_timers);
1759 wg_timers_event_any_authenticated_packet_traversal(
1760 &peer->p_timers);
1761
1762 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len == 0) {
1763 m_freem(m);
1764 continue;
1765 }
1766
1767#if NBPFILTER1 > 0
1768 if (sc->sc_if.if_bpf != NULL((void *)0))
1769 bpf_mtap_af(sc->sc_if.if_bpf,
1770 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family, m, BPF_DIRECTION_IN(1 << 0));
1771#endif
1772
1773 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1774 if (m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family == AF_INET2)
1775 ipv4_input(&sc->sc_if, m);
1776#ifdef INET61
1777 else if (m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family == AF_INET624)
1778 ipv6_input(&sc->sc_if, m);
1779#endif
1780 else
1781 panic("invalid ph_family");
1782 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1783
1784 wg_timers_event_data_received(&peer->p_timers);
1785 }
1786}
1787
1788int
1789wg_queue_in(struct wg_softc *sc, struct wg_peer *peer, struct mbuf *m)
1790{
1791 struct wg_ring *parallel = &sc->sc_decap_ring;
1792 struct wg_queue *serial = &peer->p_decap_queue;
1793 struct wg_tag *t;
1794
1795 mtx_enter(&serial->q_mtx);
1796 if (serial->q_list.ml_len < MAX_QUEUED_PKT1024) {
1797 ml_enqueue(&serial->q_list, m);
1798 mtx_leave(&serial->q_mtx);
1799 } else {
1800 mtx_leave(&serial->q_mtx);
1801 m_freem(m);
1802 return ENOBUFS55;
1803 }
1804
1805 mtx_enter(&parallel->r_mtx);
1806 if (parallel->r_tail - parallel->r_head < MAX_QUEUED_PKT1024) {
1807 parallel->r_buf[parallel->r_tail & MAX_QUEUED_PKT_MASK(1024 - 1)] = m;
1808 parallel->r_tail++;
1809 mtx_leave(&parallel->r_mtx);
1810 } else {
1811 mtx_leave(&parallel->r_mtx);
1812 t = wg_tag_get(m);
1813 t->t_done = 1;
1814 return ENOBUFS55;
1815 }
1816
1817 return 0;
1818}
1819
1820void
1821wg_queue_out(struct wg_softc *sc, struct wg_peer *peer)
1822{
1823 struct wg_ring *parallel = &sc->sc_encap_ring;
1824 struct wg_queue *serial = &peer->p_encap_queue;
1825 struct mbuf_list ml, ml_free;
1826 struct mbuf *m;
1827 struct wg_tag *t;
1828 int dropped;
1829
1830 /*
1831 * We delist all staged packets and then add them to the queues. This
1832 * can race with wg_qstart when called from wg_send_keepalive, however
1833 * wg_qstart will not race as it is serialised.
1834 */
1835 mq_delist(&peer->p_stage_queue, &ml);
1836 ml_init(&ml_free);
1837
1838 while ((m = ml_dequeue(&ml)) != NULL((void *)0)) {
1839 mtx_enter(&serial->q_mtx);
1840 if (serial->q_list.ml_len < MAX_QUEUED_PKT1024) {
1841 ml_enqueue(&serial->q_list, m);
1842 mtx_leave(&serial->q_mtx);
1843 } else {
1844 mtx_leave(&serial->q_mtx);
1845 ml_enqueue(&ml_free, m);
1846 continue;
1847 }
1848
1849 mtx_enter(&parallel->r_mtx);
1850 if (parallel->r_tail - parallel->r_head < MAX_QUEUED_PKT1024) {
1851 parallel->r_buf[parallel->r_tail & MAX_QUEUED_PKT_MASK(1024 - 1)] = m;
1852 parallel->r_tail++;
1853 mtx_leave(&parallel->r_mtx);
1854 } else {
1855 mtx_leave(&parallel->r_mtx);
1856 t = wg_tag_get(m);
1857 t->t_done = 1;
1858 }
1859 }
1860
1861 if ((dropped = ml_purge(&ml_free)) > 0)
1862 counters_add(sc->sc_if.if_counters, ifc_oqdrops, dropped);
1863}
1864
1865struct mbuf *
1866wg_ring_dequeue(struct wg_ring *r)
1867{
1868 struct mbuf *m = NULL((void *)0);
1869 mtx_enter(&r->r_mtx);
1870 if (r->r_head != r->r_tail) {
1871 m = r->r_buf[r->r_head & MAX_QUEUED_PKT_MASK(1024 - 1)];
1872 r->r_head++;
1873 }
1874 mtx_leave(&r->r_mtx);
1875 return m;
1876}
1877
1878struct mbuf *
1879wg_queue_dequeue(struct wg_queue *q, struct wg_tag **t)
1880{
1881 struct mbuf *m;
1882 mtx_enter(&q->q_mtx);
1883 if ((m = q->q_list.ml_head) != NULL((void *)0) && (*t = wg_tag_get(m))->t_done)
2
Assuming the condition is true
3
Access to field 't_done' results in a dereference of a null pointer
1884 ml_dequeue(&q->q_list);
1885 else
1886 m = NULL((void *)0);
1887 mtx_leave(&q->q_mtx);
1888 return m;
1889}
1890
1891size_t
1892wg_queue_len(struct wg_queue *q)
1893{
1894 size_t len;
1895 mtx_enter(&q->q_mtx);
1896 len = q->q_list.ml_len;
1897 mtx_leave(&q->q_mtx);
1898 return len;
1899}
1900
1901struct noise_remote *
1902wg_remote_get(void *_sc, uint8_t public[NOISE_PUBLIC_KEY_LEN32])
1903{
1904 struct wg_peer *peer;
1905 struct wg_softc *sc = _sc;
1906 if ((peer = wg_peer_lookup(sc, public)) == NULL((void *)0))
1907 return NULL((void *)0);
1908 return &peer->p_remote;
1909}
1910
1911uint32_t
1912wg_index_set(void *_sc, struct noise_remote *remote)
1913{
1914 struct wg_peer *peer;
1915 struct wg_softc *sc = _sc;
1916 struct wg_index *index, *iter;
1917 uint32_t key;
1918
1919 /*
1920 * We can modify this without a lock as wg_index_set, wg_index_drop are
1921 * guaranteed to be serialised (per remote).
1922 */
1923 peer = CONTAINER_OF(remote, struct wg_peer, p_remote)({ const __typeof( ((struct wg_peer *)0)->p_remote ) *__mptr
= (remote); (struct wg_peer *)( (char *)__mptr - __builtin_offsetof
(struct wg_peer, p_remote) );})
;
1924 index = SLIST_FIRST(&peer->p_unused_index)((&peer->p_unused_index)->slh_first);
1925 KASSERT(index != NULL)((index != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if_wg.c"
, 1925, "index != NULL"))
;
1926 SLIST_REMOVE_HEAD(&peer->p_unused_index, i_unused_entry)do { (&peer->p_unused_index)->slh_first = (&peer
->p_unused_index)->slh_first->i_unused_entry.sle_next
; } while (0)
;
1927
1928 index->i_value = remote;
1929
1930 mtx_enter(&sc->sc_index_mtx);
1931assign_id:
1932 key = index->i_key = arc4random();
1933 key &= sc->sc_index_mask;
1934 LIST_FOREACH(iter, &sc->sc_index[key], i_entry)for((iter) = ((&sc->sc_index[key])->lh_first); (iter
)!= ((void *)0); (iter) = ((iter)->i_entry.le_next))
1935 if (iter->i_key == index->i_key)
1936 goto assign_id;
1937
1938 LIST_INSERT_HEAD(&sc->sc_index[key], index, i_entry)do { if (((index)->i_entry.le_next = (&sc->sc_index
[key])->lh_first) != ((void *)0)) (&sc->sc_index[key
])->lh_first->i_entry.le_prev = &(index)->i_entry
.le_next; (&sc->sc_index[key])->lh_first = (index);
(index)->i_entry.le_prev = &(&sc->sc_index[key
])->lh_first; } while (0)
;
1939
1940 mtx_leave(&sc->sc_index_mtx);
1941
1942 /* Likewise, no need to lock for index here. */
1943 return index->i_key;
1944}
1945
1946struct noise_remote *
1947wg_index_get(void *_sc, uint32_t key0)
1948{
1949 struct wg_softc *sc = _sc;
1950 struct wg_index *iter;
1951 struct noise_remote *remote = NULL((void *)0);
1952 uint32_t key = key0 & sc->sc_index_mask;
1953
1954 mtx_enter(&sc->sc_index_mtx);
1955 LIST_FOREACH(iter, &sc->sc_index[key], i_entry)for((iter) = ((&sc->sc_index[key])->lh_first); (iter
)!= ((void *)0); (iter) = ((iter)->i_entry.le_next))
1956 if (iter->i_key == key0) {
1957 remote = iter->i_value;
1958 break;
1959 }
1960 mtx_leave(&sc->sc_index_mtx);
1961 return remote;
1962}
1963
1964void
1965wg_index_drop(void *_sc, uint32_t key0)
1966{
1967 struct wg_softc *sc = _sc;
1968 struct wg_index *iter;
1969 struct wg_peer *peer = NULL((void *)0);
1970 uint32_t key = key0 & sc->sc_index_mask;
1971
1972 mtx_enter(&sc->sc_index_mtx);
1973 LIST_FOREACH(iter, &sc->sc_index[key], i_entry)for((iter) = ((&sc->sc_index[key])->lh_first); (iter
)!= ((void *)0); (iter) = ((iter)->i_entry.le_next))
1974 if (iter->i_key == key0) {
1975 LIST_REMOVE(iter, i_entry)do { if ((iter)->i_entry.le_next != ((void *)0)) (iter)->
i_entry.le_next->i_entry.le_prev = (iter)->i_entry.le_prev
; *(iter)->i_entry.le_prev = (iter)->i_entry.le_next; (
(iter)->i_entry.le_prev) = ((void *)-1); ((iter)->i_entry
.le_next) = ((void *)-1); } while (0)
;
1976 break;
1977 }
1978 mtx_leave(&sc->sc_index_mtx);
1979
1980 /* We expect a peer */
1981 peer = CONTAINER_OF(iter->i_value, struct wg_peer, p_remote)({ const __typeof( ((struct wg_peer *)0)->p_remote ) *__mptr
= (iter->i_value); (struct wg_peer *)( (char *)__mptr - __builtin_offsetof
(struct wg_peer, p_remote) );})
;
1982 KASSERT(peer != NULL)((peer != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if_wg.c"
, 1982, "peer != NULL"))
;
1983 SLIST_INSERT_HEAD(&peer->p_unused_index, iter, i_unused_entry)do { (iter)->i_unused_entry.sle_next = (&peer->p_unused_index
)->slh_first; (&peer->p_unused_index)->slh_first
= (iter); } while (0)
;
1984}
1985
1986struct mbuf *
1987wg_input(void *_sc, struct mbuf *m, struct ip *ip, struct ip6_hdr *ip6,
1988 void *_uh, int hlen)
1989{
1990 struct wg_pkt_data *data;
1991 struct noise_remote *remote;
1992 struct wg_tag *t;
1993 struct wg_softc *sc = _sc;
1994 struct udphdr *uh = _uh;
1995
1996 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
1997
1998 if ((t = wg_tag_get(m)) == NULL((void *)0)) {
1999 m_freem(m);
2000 return NULL((void *)0);
2001 }
2002
2003 if (ip != NULL((void *)0)) {
2004 t->t_endpoint.e_remote.r_sa.sa_len = sizeof(struct sockaddr_in);
2005 t->t_endpoint.e_remote.r_sa.sa_family = AF_INET2;
2006 t->t_endpoint.e_remote.r_sin.sin_port = uh->uh_sport;
2007 t->t_endpoint.e_remote.r_sin.sin_addr = ip->ip_src;
2008 t->t_endpoint.e_local.l_in = ip->ip_dst;
2009#ifdef INET61
2010 } else if (ip6 != NULL((void *)0)) {
2011 t->t_endpoint.e_remote.r_sa.sa_len = sizeof(struct sockaddr_in6);
2012 t->t_endpoint.e_remote.r_sa.sa_family = AF_INET624;
2013 t->t_endpoint.e_remote.r_sin6.sin6_port = uh->uh_sport;
2014 t->t_endpoint.e_remote.r_sin6.sin6_addr = ip6->ip6_src;
2015 t->t_endpoint.e_local.l_in6l_pktinfo6.ipi6_addr = ip6->ip6_dst;
2016#endif
2017 } else {
2018 m_freem(m);
2019 return NULL((void *)0);
2020 }
2021
2022 /* m has a IP/IPv6 header of hlen length, we don't need it anymore. */
2023 m_adj(m, hlen);
2024
2025 /*
2026 * Ensure mbuf is contiguous over full length of packet. This is done
2027 * os we can directly read the handshake values in wg_handshake, and so
2028 * we can decrypt a transport packet by passing a single buffer to
2029 * noise_remote_decrypt in wg_decap.
2030 */
2031 if ((m = m_pullup(m, m->m_pkthdrM_dat.MH.MH_pkthdr.len)) == NULL((void *)0))
2032 return NULL((void *)0);
2033
2034 if ((m->m_pkthdrM_dat.MH.MH_pkthdr.len == sizeof(struct wg_pkt_initiation) &&
2035 *mtod(m, uint32_t *)((uint32_t *)((m)->m_hdr.mh_data)) == WG_PKT_INITIATION((__uint32_t)(1))) ||
2036 (m->m_pkthdrM_dat.MH.MH_pkthdr.len == sizeof(struct wg_pkt_response) &&
2037 *mtod(m, uint32_t *)((uint32_t *)((m)->m_hdr.mh_data)) == WG_PKT_RESPONSE((__uint32_t)(2))) ||
2038 (m->m_pkthdrM_dat.MH.MH_pkthdr.len == sizeof(struct wg_pkt_cookie) &&
2039 *mtod(m, uint32_t *)((uint32_t *)((m)->m_hdr.mh_data)) == WG_PKT_COOKIE((__uint32_t)(3)))) {
2040
2041 if (mq_enqueue(&sc->sc_handshake_queue, m) != 0)
2042 DPRINTF(sc, "Dropping handshake packet\n")do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Dropping handshake packet\n", (sc)->sc_if.if_xname); } while
(0)
;
2043 task_add(wg_handshake_taskq, &sc->sc_handshake);
2044
2045 } else if (m->m_pkthdrM_dat.MH.MH_pkthdr.len >= sizeof(struct wg_pkt_data) +
2046 NOISE_AUTHTAG_LEN16 && *mtod(m, uint32_t *)((uint32_t *)((m)->m_hdr.mh_data)) == WG_PKT_DATA((__uint32_t)(4))) {
2047
2048 data = mtod(m, struct wg_pkt_data *)((struct wg_pkt_data *)((m)->m_hdr.mh_data));
2049
2050 if ((remote = wg_index_get(sc, data->r_idx)) != NULL((void *)0)) {
2051 t->t_peer = CONTAINER_OF(remote, struct wg_peer,({ const __typeof( ((struct wg_peer *)0)->p_remote ) *__mptr
= (remote); (struct wg_peer *)( (char *)__mptr - __builtin_offsetof
(struct wg_peer, p_remote) );})
2052 p_remote)({ const __typeof( ((struct wg_peer *)0)->p_remote ) *__mptr
= (remote); (struct wg_peer *)( (char *)__mptr - __builtin_offsetof
(struct wg_peer, p_remote) );})
;
2053 t->t_mbuf = NULL((void *)0);
2054 t->t_done = 0;
2055
2056 if (wg_queue_in(sc, t->t_peer, m) != 0)
2057 counters_inc(sc->sc_if.if_counters,
2058 ifc_iqdrops);
2059 task_add(wg_crypt_taskq, &sc->sc_decap);
2060 } else {
2061 counters_inc(sc->sc_if.if_counters, ifc_ierrors);
2062 m_freem(m);
2063 }
2064 } else {
2065 counters_inc(sc->sc_if.if_counters, ifc_ierrors);
2066 m_freem(m);
2067 }
2068
2069 return NULL((void *)0);
2070}
2071
2072void
2073wg_qstart(struct ifqueue *ifq)
2074{
2075 struct ifnet *ifp = ifq->ifq_if;
2076 struct wg_softc *sc = ifp->if_softc;
2077 struct wg_peer *peer;
2078 struct wg_tag *t;
2079 struct mbuf *m;
2080 SLIST_HEAD(,wg_peer)struct { struct wg_peer *slh_first; } start_list;
2081
2082 SLIST_INIT(&start_list){ ((&start_list)->slh_first) = ((void *)0); };
2083
2084 /*
2085 * We should be OK to modify p_start_list, p_start_onlist in this
2086 * function as there should only be one ifp->if_qstart invoked at a
2087 * time.
2088 */
2089 while ((m = ifq_dequeue(ifq)) != NULL((void *)0)) {
2090 t = wg_tag_get(m);
2091 peer = t->t_peer;
2092 if (mq_push(&peer->p_stage_queue, m) != 0)
2093 counters_inc(ifp->if_counters, ifc_oqdrops);
2094 if (!peer->p_start_onlist) {
2095 SLIST_INSERT_HEAD(&start_list, peer, p_start_list)do { (peer)->p_start_list.sle_next = (&start_list)->
slh_first; (&start_list)->slh_first = (peer); } while (
0)
;
2096 peer->p_start_onlist = 1;
2097 }
2098 }
2099 SLIST_FOREACH(peer, &start_list, p_start_list)for((peer) = ((&start_list)->slh_first); (peer) != ((void
*)0); (peer) = ((peer)->p_start_list.sle_next))
{
2100 if (noise_remote_ready(&peer->p_remote) == 0)
2101 wg_queue_out(sc, peer);
2102 else
2103 wg_timers_event_want_initiation(&peer->p_timers);
2104 peer->p_start_onlist = 0;
2105 }
2106 task_add(wg_crypt_taskq, &sc->sc_encap);
2107}
2108
2109int
2110wg_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
2111 struct rtentry *rt)
2112{
2113 struct wg_softc *sc = ifp->if_softc;
2114 struct wg_peer *peer;
2115 struct wg_tag *t;
2116 int af, ret = EINVAL22;
2117
2118 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
2119
2120 if ((t = wg_tag_get(m)) == NULL((void *)0)) {
2121 ret = ENOBUFS55;
2122 goto error;
2123 }
2124
2125 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family = sa->sa_family;
2126 if (sa->sa_family == AF_INET2) {
2127 peer = wg_aip_lookup(sc->sc_aip4,
2128 &mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data))->ip_dst);
2129#ifdef INET61
2130 } else if (sa->sa_family == AF_INET624) {
2131 peer = wg_aip_lookup(sc->sc_aip6,
2132 &mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data))->ip6_dst);
2133#endif
2134 } else {
2135 ret = EAFNOSUPPORT47;
2136 goto error;
2137 }
2138
2139#if NBPFILTER1 > 0
2140 if (sc->sc_if.if_bpf)
2141 bpf_mtap_af(sc->sc_if.if_bpf, sa->sa_family, m,
2142 BPF_DIRECTION_OUT(1 << 1));
2143#endif
2144
2145 if (peer == NULL((void *)0)) {
2146 ret = ENETUNREACH51;
2147 goto error;
2148 }
2149
2150 af = peer->p_endpoint.e_remote.r_sa.sa_family;
2151 if (af != AF_INET2 && af != AF_INET624) {
2152 DPRINTF(sc, "No valid endpoint has been configured or "do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"No valid endpoint has been configured or " "discovered for peer %llu\n"
, (sc)->sc_if.if_xname, peer->p_id); } while (0)
2153 "discovered for peer %llu\n", peer->p_id)do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"No valid endpoint has been configured or " "discovered for peer %llu\n"
, (sc)->sc_if.if_xname, peer->p_id); } while (0)
;
2154 ret = EDESTADDRREQ39;
2155 goto error;
2156 }
2157
2158 if (m->m_pkthdrM_dat.MH.MH_pkthdr.ph_loopcnt++ > M_MAXLOOP128) {
2159 DPRINTF(sc, "Packet looped")do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Packet looped", (sc)->sc_if.if_xname); } while (0)
;
2160 ret = ELOOP62;
2161 goto error;
2162 }
2163
2164 /*
2165 * As we hold a reference to peer in the mbuf, we can't handle a
2166 * delayed packet without doing some refcnting. If a peer is removed
2167 * while a delayed holds a reference, bad things will happen. For the
2168 * time being, delayed packets are unsupported. This may be fixed with
2169 * another aip_lookup in wg_qstart, or refcnting as mentioned before.
2170 */
2171 if (m->m_pkthdrM_dat.MH.MH_pkthdr.pf.delay > 0) {
2172 DPRINTF(sc, "PF Delay Unsupported")do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"PF Delay Unsupported", (sc)->sc_if.if_xname); } while (0
)
;
2173 ret = EOPNOTSUPP45;
2174 goto error;
2175 }
2176
2177 t->t_peer = peer;
2178 t->t_mbuf = NULL((void *)0);
2179 t->t_done = 0;
2180 t->t_mtu = ifp->if_mtuif_data.ifi_mtu;
2181
2182 /*
2183 * We still have an issue with ifq that will count a packet that gets
2184 * dropped in wg_qstart, or not encrypted. These get counted as
2185 * ofails or oqdrops, so the packet gets counted twice.
2186 */
2187 return if_enqueue(ifp, m);
2188error:
2189 counters_inc(ifp->if_counters, ifc_oerrors);
2190 m_freem(m);
2191 return ret;
2192}
2193
2194int
2195wg_ioctl_set(struct wg_softc *sc, struct wg_data_io *data)
2196{
2197 struct wg_interface_io *iface_p, iface_o;
2198 struct wg_peer_io *peer_p, peer_o;
2199 struct wg_aip_io *aip_p, aip_o;
2200
2201 struct wg_peer *peer, *tpeer;
2202 struct wg_aip *aip, *taip;
2203
2204 in_port_t port;
2205 int rtable;
2206
2207 uint8_t public[WG_KEY_SIZE32], private[WG_KEY_SIZE32];
2208 size_t i, j;
2209 int ret, has_identity;
2210
2211 if ((ret = suser(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
)) != 0)
2212 return ret;
2213
2214 rw_enter_write(&sc->sc_lock);
2215
2216 iface_p = data->wgd_interface;
2217 if ((ret = copyin(iface_p, &iface_o, sizeof(iface_o))) != 0)
2218 goto error;
2219
2220 if (iface_o.i_flags & WG_INTERFACE_REPLACE_PEERS(1 << 4))
2221 TAILQ_FOREACH_SAFE(peer, &sc->sc_peer_seq, p_seq_entry, tpeer)for ((peer) = ((&sc->sc_peer_seq)->tqh_first); (peer
) != ((void *)0) && ((tpeer) = ((peer)->p_seq_entry
.tqe_next), 1); (peer) = (tpeer))
2222 wg_peer_destroy(peer);
2223
2224 if (iface_o.i_flags & WG_INTERFACE_HAS_PRIVATE(1 << 1) &&
2225 (noise_local_keys(&sc->sc_local, NULL((void *)0), private) ||
2226 timingsafe_bcmp(private, iface_o.i_private, WG_KEY_SIZE32))) {
2227 if (curve25519_generate_public(public, iface_o.i_private)) {
2228 if ((peer = wg_peer_lookup(sc, public)) != NULL((void *)0))
2229 wg_peer_destroy(peer);
2230 }
2231 noise_local_lock_identity(&sc->sc_local);
2232 has_identity = noise_local_set_private(&sc->sc_local,
2233 iface_o.i_private);
2234 TAILQ_FOREACH(peer, &sc->sc_peer_seq, p_seq_entry)for((peer) = ((&sc->sc_peer_seq)->tqh_first); (peer
) != ((void *)0); (peer) = ((peer)->p_seq_entry.tqe_next))
{
2235 noise_remote_precompute(&peer->p_remote);
2236 wg_timers_event_reset_handshake_last_sent(&peer->p_timers);
2237 noise_remote_expire_current(&peer->p_remote);
2238 }
2239 cookie_checker_update(&sc->sc_cookie,
2240 has_identity == 0 ? public : NULL((void *)0));
2241 noise_local_unlock_identity(&sc->sc_local);
2242 }
2243
2244 if (iface_o.i_flags & WG_INTERFACE_HAS_PORT(1 << 2))
2245 port = htons(iface_o.i_port)(__uint16_t)(__builtin_constant_p(iface_o.i_port) ? (__uint16_t
)(((__uint16_t)(iface_o.i_port) & 0xffU) << 8 | ((__uint16_t
)(iface_o.i_port) & 0xff00U) >> 8) : __swap16md(iface_o
.i_port))
;
2246 else
2247 port = sc->sc_udp_port;
2248
2249 if (iface_o.i_flags & WG_INTERFACE_HAS_RTABLE(1 << 3))
2250 rtable = iface_o.i_rtable;
2251 else
2252 rtable = sc->sc_udp_rtable;
2253
2254 if (port != sc->sc_udp_port || rtable != sc->sc_udp_rtable) {
2255 TAILQ_FOREACH(peer, &sc->sc_peer_seq, p_seq_entry)for((peer) = ((&sc->sc_peer_seq)->tqh_first); (peer
) != ((void *)0); (peer) = ((peer)->p_seq_entry.tqe_next))
2256 wg_peer_clear_src(peer);
2257
2258 if (sc->sc_if.if_flags & IFF_RUNNING0x40)
2259 if ((ret = wg_bind(sc, &port, &rtable)) != 0)
2260 goto error;
2261
2262 sc->sc_udp_port = port;
2263 sc->sc_udp_rtable = rtable;
2264 }
2265
2266 peer_p = &iface_p->i_peers[0];
2267 for (i = 0; i < iface_o.i_peers_count; i++) {
2268 if ((ret = copyin(peer_p, &peer_o, sizeof(peer_o))) != 0)
2269 goto error;
2270
2271 /* Peer must have public key */
2272 if (!(peer_o.p_flags & WG_PEER_HAS_PUBLIC(1 << 0)))
2273 goto next_peer;
2274
2275 /* 0 = latest protocol, 1 = this protocol */
2276 if (peer_o.p_protocol_version != 0) {
2277 if (peer_o.p_protocol_version > 1) {
2278 ret = EPFNOSUPPORT46;
2279 goto error;
2280 }
2281 }
2282
2283 /* Get local public and check that peer key doesn't match */
2284 if (noise_local_keys(&sc->sc_local, public, NULL((void *)0)) == 0 &&
2285 bcmp(public, peer_o.p_public, WG_KEY_SIZE32) == 0)
2286 goto next_peer;
2287
2288 /* Lookup peer, or create if it doesn't exist */
2289 if ((peer = wg_peer_lookup(sc, peer_o.p_public)) == NULL((void *)0)) {
2290 /* If we want to delete, no need creating a new one.
2291 * Also, don't create a new one if we only want to
2292 * update. */
2293 if (peer_o.p_flags & (WG_PEER_REMOVE(1 << 5)|WG_PEER_UPDATE(1 << 6)))
2294 goto next_peer;
2295
2296 if ((peer = wg_peer_create(sc,
2297 peer_o.p_public)) == NULL((void *)0)) {
2298 ret = ENOMEM12;
2299 goto error;
2300 }
2301 }
2302
2303 /* Remove peer and continue if specified */
2304 if (peer_o.p_flags & WG_PEER_REMOVE(1 << 5)) {
2305 wg_peer_destroy(peer);
2306 goto next_peer;
2307 }
2308
2309 if (peer_o.p_flags & WG_PEER_HAS_ENDPOINT(1 << 3))
2310 wg_peer_set_sockaddr(peer, &peer_o.p_sap_endpoint.sa_sa);
2311
2312 if (peer_o.p_flags & WG_PEER_HAS_PSK(1 << 1))
2313 noise_remote_set_psk(&peer->p_remote, peer_o.p_psk);
2314
2315 if (peer_o.p_flags & WG_PEER_HAS_PKA(1 << 2))
2316 wg_timers_set_persistent_keepalive(&peer->p_timers,
2317 peer_o.p_pka);
2318
2319 if (peer_o.p_flags & WG_PEER_REPLACE_AIPS(1 << 4)) {
2320 LIST_FOREACH_SAFE(aip, &peer->p_aip, a_entry, taip)for ((aip) = ((&peer->p_aip)->lh_first); (aip) &&
((taip) = ((aip)->a_entry.le_next), 1); (aip) = (taip))
{
2321 wg_aip_remove(sc, peer, &aip->a_data);
2322 }
2323 }
2324
2325 aip_p = &peer_p->p_aips[0];
2326 for (j = 0; j < peer_o.p_aips_count; j++) {
2327 if ((ret = copyin(aip_p, &aip_o, sizeof(aip_o))) != 0)
2328 goto error;
2329 ret = wg_aip_add(sc, peer, &aip_o);
2330 if (ret != 0)
2331 goto error;
2332 aip_p++;
2333 }
2334
2335 peer_p = (struct wg_peer_io *)aip_p;
2336 continue;
2337next_peer:
2338 aip_p = &peer_p->p_aips[0];
2339 aip_p += peer_o.p_aips_count;
2340 peer_p = (struct wg_peer_io *)aip_p;
2341 }
2342
2343error:
2344 rw_exit_write(&sc->sc_lock);
2345 explicit_bzero(&iface_o, sizeof(iface_o));
2346 explicit_bzero(&peer_o, sizeof(peer_o));
2347 explicit_bzero(&aip_o, sizeof(aip_o));
2348 explicit_bzero(public, sizeof(public));
2349 explicit_bzero(private, sizeof(private));
2350 return ret;
2351}
2352
2353int
2354wg_ioctl_get(struct wg_softc *sc, struct wg_data_io *data)
2355{
2356 struct wg_interface_io *iface_p, iface_o;
2357 struct wg_peer_io *peer_p, peer_o;
2358 struct wg_aip_io *aip_p;
2359
2360 struct wg_peer *peer;
2361 struct wg_aip *aip;
2362
2363 size_t size, peer_count, aip_count;
2364 int ret = 0, is_suser = suser(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
) == 0;
2365
2366 size = sizeof(struct wg_interface_io);
2367 if (data->wgd_size < size && !is_suser)
2368 goto ret_size;
2369
2370 iface_p = data->wgd_interface;
2371 bzero(&iface_o, sizeof(iface_o))__builtin_bzero((&iface_o), (sizeof(iface_o)));
2372
2373 rw_enter_read(&sc->sc_lock);
2374
2375 if (sc->sc_udp_port != 0) {
2376 iface_o.i_port = ntohs(sc->sc_udp_port)(__uint16_t)(__builtin_constant_p(sc->sc_udp_port) ? (__uint16_t
)(((__uint16_t)(sc->sc_udp_port) & 0xffU) << 8 |
((__uint16_t)(sc->sc_udp_port) & 0xff00U) >> 8)
: __swap16md(sc->sc_udp_port))
;
2377 iface_o.i_flags |= WG_INTERFACE_HAS_PORT(1 << 2);
2378 }
2379
2380 if (sc->sc_udp_rtable != 0) {
2381 iface_o.i_rtable = sc->sc_udp_rtable;
2382 iface_o.i_flags |= WG_INTERFACE_HAS_RTABLE(1 << 3);
2383 }
2384
2385 if (!is_suser)
2386 goto copy_out_iface;
2387
2388 if (noise_local_keys(&sc->sc_local, iface_o.i_public,
2389 iface_o.i_private) == 0) {
2390 iface_o.i_flags |= WG_INTERFACE_HAS_PUBLIC(1 << 0);
2391 iface_o.i_flags |= WG_INTERFACE_HAS_PRIVATE(1 << 1);
2392 }
2393
2394 size += sizeof(struct wg_peer_io) * sc->sc_peer_num;
2395 size += sizeof(struct wg_aip_io) * sc->sc_aip_num;
2396 if (data->wgd_size < size)
2397 goto unlock_and_ret_size;
2398
2399 peer_count = 0;
2400 peer_p = &iface_p->i_peers[0];
2401 TAILQ_FOREACH(peer, &sc->sc_peer_seq, p_seq_entry)for((peer) = ((&sc->sc_peer_seq)->tqh_first); (peer
) != ((void *)0); (peer) = ((peer)->p_seq_entry.tqe_next))
{
2402 bzero(&peer_o, sizeof(peer_o))__builtin_bzero((&peer_o), (sizeof(peer_o)));
2403 peer_o.p_flags = WG_PEER_HAS_PUBLIC(1 << 0);
2404 peer_o.p_protocol_version = 1;
2405
2406 if (noise_remote_keys(&peer->p_remote, peer_o.p_public,
2407 peer_o.p_psk) == 0)
2408 peer_o.p_flags |= WG_PEER_HAS_PSK(1 << 1);
2409
2410 if (wg_timers_get_persistent_keepalive(&peer->p_timers,
2411 &peer_o.p_pka) == 0)
2412 peer_o.p_flags |= WG_PEER_HAS_PKA(1 << 2);
2413
2414 if (wg_peer_get_sockaddr(peer, &peer_o.p_sap_endpoint.sa_sa) == 0)
2415 peer_o.p_flags |= WG_PEER_HAS_ENDPOINT(1 << 3);
2416
2417 mtx_enter(&peer->p_counters_mtx);
2418 peer_o.p_txbytes = peer->p_counters_tx;
2419 peer_o.p_rxbytes = peer->p_counters_rx;
2420 mtx_leave(&peer->p_counters_mtx);
2421
2422 wg_timers_get_last_handshake(&peer->p_timers,
2423 &peer_o.p_last_handshake);
2424
2425 aip_count = 0;
2426 aip_p = &peer_p->p_aips[0];
2427 LIST_FOREACH(aip, &peer->p_aip, a_entry)for((aip) = ((&peer->p_aip)->lh_first); (aip)!= ((void
*)0); (aip) = ((aip)->a_entry.le_next))
{
2428 if ((ret = copyout(&aip->a_data, aip_p, sizeof(*aip_p))) != 0)
2429 goto unlock_and_ret_size;
2430 aip_p++;
2431 aip_count++;
2432 }
2433 peer_o.p_aips_count = aip_count;
2434
2435 if ((ret = copyout(&peer_o, peer_p, sizeof(peer_o))) != 0)
2436 goto unlock_and_ret_size;
2437
2438 peer_p = (struct wg_peer_io *)aip_p;
2439 peer_count++;
2440 }
2441 iface_o.i_peers_count = peer_count;
2442
2443copy_out_iface:
2444 ret = copyout(&iface_o, iface_p, sizeof(iface_o));
2445unlock_and_ret_size:
2446 rw_exit_read(&sc->sc_lock);
2447 explicit_bzero(&iface_o, sizeof(iface_o));
2448 explicit_bzero(&peer_o, sizeof(peer_o));
2449ret_size:
2450 data->wgd_size = size;
2451 return ret;
2452}
2453
2454int
2455wg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2456{
2457 struct ifreq *ifr = (struct ifreq *) data;
2458 struct wg_softc *sc = ifp->if_softc;
2459 int ret = 0;
2460
2461 switch (cmd) {
2462 case SIOCSWG(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct wg_data_io) & 0x1fff) << 16) | ((('i')) <<
8) | ((210)))
:
2463 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2464 ret = wg_ioctl_set(sc, (struct wg_data_io *) data);
2465 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2466 break;
2467 case SIOCGWG(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct wg_data_io) & 0x1fff) << 16) | ((('i')) <<
8) | ((211)))
:
2468 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2469 ret = wg_ioctl_get(sc, (struct wg_data_io *) data);
2470 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2471 break;
2472 /* Interface IOCTLs */
2473 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
2474 SET(ifp->if_flags, IFF_UP)((ifp->if_flags) |= (0x1));
2475 /* FALLTHROUGH */
2476 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
2477 if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1)))
2478 ret = wg_up(sc);
2479 else
2480 wg_down(sc);
2481 break;
2482 case SIOCSIFMTU((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((127)))
:
2483 /* Arbitrary limits */
2484 if (ifr->ifr_mtuifr_ifru.ifru_metric <= 0 || ifr->ifr_mtuifr_ifru.ifru_metric > 9000)
2485 ret = EINVAL22;
2486 else
2487 ifp->if_mtuif_data.ifi_mtu = ifr->ifr_mtuifr_ifru.ifru_metric;
2488 break;
2489 case SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((49)))
:
2490 case SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((50)))
:
2491 break;
2492 default:
2493 ret = ENOTTY25;
2494 }
2495
2496 return ret;
2497}
2498
2499int
2500wg_up(struct wg_softc *sc)
2501{
2502 struct wg_peer *peer;
2503 int ret = 0;
2504
2505 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
2506 /*
2507 * We use IFF_RUNNING as an exclusive access here. We also may want
2508 * an exclusive sc_lock as wg_bind may write to sc_udp_port. We also
2509 * want to drop NET_LOCK as we want to call socreate, sobind, etc. Once
2510 * solock is no longer === NET_LOCK, we may be able to avoid this.
2511 */
2512 if (!ISSET(sc->sc_if.if_flags, IFF_RUNNING)((sc->sc_if.if_flags) & (0x40))) {
2513 SET(sc->sc_if.if_flags, IFF_RUNNING)((sc->sc_if.if_flags) |= (0x40));
2514 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2515
2516 rw_enter_write(&sc->sc_lock);
2517 /*
2518 * If we successfully bind the socket, then enable the timers
2519 * for the peer. This will send all staged packets and a
2520 * keepalive if necessary.
2521 */
2522 ret = wg_bind(sc, &sc->sc_udp_port, &sc->sc_udp_rtable);
2523 if (ret == 0) {
2524 TAILQ_FOREACH(peer, &sc->sc_peer_seq, p_seq_entry)for((peer) = ((&sc->sc_peer_seq)->tqh_first); (peer
) != ((void *)0); (peer) = ((peer)->p_seq_entry.tqe_next))
{
2525 wg_timers_enable(&peer->p_timers);
2526 wg_queue_out(sc, peer);
2527 }
2528 }
2529 rw_exit_write(&sc->sc_lock);
2530
2531 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2532 if (ret != 0)
2533 CLR(sc->sc_if.if_flags, IFF_RUNNING)((sc->sc_if.if_flags) &= ~(0x40));
2534 }
2535 return ret;
2536}
2537
2538void
2539wg_down(struct wg_softc *sc)
2540{
2541 struct wg_peer *peer;
2542
2543 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
2544 if (!ISSET(sc->sc_if.if_flags, IFF_RUNNING)((sc->sc_if.if_flags) & (0x40)))
2545 return;
2546 CLR(sc->sc_if.if_flags, IFF_RUNNING)((sc->sc_if.if_flags) &= ~(0x40));
2547 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2548
2549 /*
2550 * We only need a read lock here, as we aren't writing to anything
2551 * that isn't granularly locked.
2552 */
2553 rw_enter_read(&sc->sc_lock);
2554 TAILQ_FOREACH(peer, &sc->sc_peer_seq, p_seq_entry)for((peer) = ((&sc->sc_peer_seq)->tqh_first); (peer
) != ((void *)0); (peer) = ((peer)->p_seq_entry.tqe_next))
{
2555 mq_purge(&peer->p_stage_queue);
2556 wg_timers_disable(&peer->p_timers);
2557 }
2558
2559 taskq_barrier(wg_handshake_taskq);
2560 TAILQ_FOREACH(peer, &sc->sc_peer_seq, p_seq_entry)for((peer) = ((&sc->sc_peer_seq)->tqh_first); (peer
) != ((void *)0); (peer) = ((peer)->p_seq_entry.tqe_next))
{
2561 noise_remote_clear(&peer->p_remote);
2562 wg_timers_event_reset_handshake_last_sent(&peer->p_timers);
2563 }
2564
2565 wg_unbind(sc);
2566 rw_exit_read(&sc->sc_lock);
2567 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2568}
2569
2570int
2571wg_clone_create(struct if_clone *ifc, int unit)
2572{
2573 struct ifnet *ifp;
2574 struct wg_softc *sc;
2575 struct noise_upcall local_upcall;
2576
2577 KERNEL_ASSERT_LOCKED()((_kernel_lock_held()) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if_wg.c"
, 2577, "_kernel_lock_held()"))
;
2578
2579 if (wg_counter == 0) {
2580 wg_handshake_taskq = taskq_create("wg_handshake",
2581 2, IPL_NET0x7, TASKQ_MPSAFE(1 << 0));
2582 wg_crypt_taskq = taskq_create("wg_crypt",
2583 ncpus, IPL_NET0x7, TASKQ_MPSAFE(1 << 0));
2584
2585 if (wg_handshake_taskq == NULL((void *)0) || wg_crypt_taskq == NULL((void *)0)) {
2586 if (wg_handshake_taskq != NULL((void *)0))
2587 taskq_destroy(wg_handshake_taskq);
2588 if (wg_crypt_taskq != NULL((void *)0))
2589 taskq_destroy(wg_crypt_taskq);
2590 wg_handshake_taskq = NULL((void *)0);
2591 wg_crypt_taskq = NULL((void *)0);
2592 return ENOTRECOVERABLE93;
2593 }
2594 }
2595 wg_counter++;
2596
2597 if ((sc = malloc(sizeof(*sc), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008)) == NULL((void *)0))
2598 goto ret_00;
2599
2600 local_upcall.u_arg = sc;
2601 local_upcall.u_remote_get = wg_remote_get;
2602 local_upcall.u_index_set = wg_index_set;
2603 local_upcall.u_index_drop = wg_index_drop;
2604
2605 TAILQ_INIT(&sc->sc_peer_seq)do { (&sc->sc_peer_seq)->tqh_first = ((void *)0); (
&sc->sc_peer_seq)->tqh_last = &(&sc->sc_peer_seq
)->tqh_first; } while (0)
;
2606
2607 /* sc_if is initialised after everything else */
2608 arc4random_buf(&sc->sc_secret, sizeof(sc->sc_secret));
2609
2610 rw_init(&sc->sc_lock, "wg")_rw_init_flags(&sc->sc_lock, "wg", 0, ((void *)0));
2611 noise_local_init(&sc->sc_local, &local_upcall);
2612 if (cookie_checker_init(&sc->sc_cookie, &wg_ratelimit_pool) != 0)
2613 goto ret_01;
2614 sc->sc_udp_port = 0;
2615 sc->sc_udp_rtable = 0;
2616
2617 rw_init(&sc->sc_so_lock, "wg_so")_rw_init_flags(&sc->sc_so_lock, "wg_so", 0, ((void *)0
))
;
2618 sc->sc_so4 = NULL((void *)0);
2619#ifdef INET61
2620 sc->sc_so6 = NULL((void *)0);
2621#endif
2622
2623 sc->sc_aip_num = 0;
2624 if ((sc->sc_aip4 = art_alloc(0, 32, 0)) == NULL((void *)0))
2625 goto ret_02;
2626#ifdef INET61
2627 if ((sc->sc_aip6 = art_alloc(0, 128, 0)) == NULL((void *)0))
2628 goto ret_03;
2629#endif
2630
2631 rw_init(&sc->sc_peer_lock, "wg_peer")_rw_init_flags(&sc->sc_peer_lock, "wg_peer", 0, ((void
*)0))
;
2632 sc->sc_peer_num = 0;
2633 if ((sc->sc_peer = hashinit(HASHTABLE_PEER_SIZE(1 << 11), M_DEVBUF2,
2634 M_NOWAIT0x0002, &sc->sc_peer_mask)) == NULL((void *)0))
2635 goto ret_04;
2636
2637 mtx_init(&sc->sc_index_mtx, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_index_mtx), ((((0x7)) > 0x0 && ((0x7)) < 0x9
) ? 0x9 : ((0x7)))); } while (0)
;
2638 if ((sc->sc_index = hashinit(HASHTABLE_INDEX_SIZE(1 << 13), M_DEVBUF2,
2639 M_NOWAIT0x0002, &sc->sc_index_mask)) == NULL((void *)0))
2640 goto ret_05;
2641
2642 task_set(&sc->sc_handshake, wg_handshake_worker, sc);
2643 mq_init(&sc->sc_handshake_queue, MAX_QUEUED_HANDSHAKES4096, IPL_NET0x7);
2644
2645 task_set(&sc->sc_encap, wg_encap_worker, sc);
2646 task_set(&sc->sc_decap, wg_decap_worker, sc);
2647
2648 bzero(&sc->sc_encap_ring, sizeof(sc->sc_encap_ring))__builtin_bzero((&sc->sc_encap_ring), (sizeof(sc->sc_encap_ring
)))
;
2649 mtx_init(&sc->sc_encap_ring.r_mtx, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_encap_ring.r_mtx), ((((0x7)) > 0x0 && ((0x7)) <
0x9) ? 0x9 : ((0x7)))); } while (0)
;
2650 bzero(&sc->sc_decap_ring, sizeof(sc->sc_decap_ring))__builtin_bzero((&sc->sc_decap_ring), (sizeof(sc->sc_decap_ring
)))
;
2651 mtx_init(&sc->sc_decap_ring.r_mtx, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_decap_ring.r_mtx), ((((0x7)) > 0x0 && ((0x7)) <
0x9) ? 0x9 : ((0x7)))); } while (0)
;
2652
2653 /* We've setup the softc, now we can setup the ifnet */
2654 ifp = &sc->sc_if;
2655 ifp->if_softc = sc;
2656
2657 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "wg%d", unit);
2658
2659 ifp->if_mtuif_data.ifi_mtu = DEFAULT_MTU1420;
2660 ifp->if_flags = IFF_BROADCAST0x2 | IFF_MULTICAST0x8000 | IFF_NOARP0x80;
2661 ifp->if_xflags = IFXF_CLONED0x2 | IFXF_MPSAFE0x1;
2662 ifp->if_txmit = 64; /* Keep our workers active for longer. */
2663
2664 ifp->if_ioctl = wg_ioctl;
2665 ifp->if_qstart = wg_qstart;
2666 ifp->if_output = wg_output;
2667
2668 ifp->if_typeif_data.ifi_type = IFT_WIREGUARD0xfb;
2669 ifp->if_rtrequest = p2p_rtrequest;
2670
2671 if_attach(ifp);
2672 if_alloc_sadl(ifp);
2673 if_counters_alloc(ifp);
2674
2675#if NBPFILTER1 > 0
2676 bpfattach(&ifp->if_bpf, ifp, DLT_LOOP12, sizeof(uint32_t));
2677#endif
2678
2679 DPRINTF(sc, "Interface created\n")do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Interface created\n", (sc)->sc_if.if_xname); } while (0)
;
2680
2681 return 0;
2682ret_05:
2683 hashfree(sc->sc_peer, HASHTABLE_PEER_SIZE(1 << 11), M_DEVBUF2);
2684ret_04:
2685#ifdef INET61
2686 free(sc->sc_aip6, M_RTABLE5, sizeof(*sc->sc_aip6));
2687ret_03:
2688#endif
2689 free(sc->sc_aip4, M_RTABLE5, sizeof(*sc->sc_aip4));
2690ret_02:
2691 cookie_checker_deinit(&sc->sc_cookie);
2692ret_01:
2693 free(sc, M_DEVBUF2, sizeof(*sc));
2694ret_00:
2695 return ENOBUFS55;
2696}
2697int
2698wg_clone_destroy(struct ifnet *ifp)
2699{
2700 struct wg_softc *sc = ifp->if_softc;
2701 struct wg_peer *peer, *tpeer;
2702
2703 KERNEL_ASSERT_LOCKED()((_kernel_lock_held()) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if_wg.c"
, 2703, "_kernel_lock_held()"))
;
2704
2705 rw_enter_write(&sc->sc_lock);
2706 TAILQ_FOREACH_SAFE(peer, &sc->sc_peer_seq, p_seq_entry, tpeer)for ((peer) = ((&sc->sc_peer_seq)->tqh_first); (peer
) != ((void *)0) && ((tpeer) = ((peer)->p_seq_entry
.tqe_next), 1); (peer) = (tpeer))
2707 wg_peer_destroy(peer);
2708 rw_exit_write(&sc->sc_lock);
2709
2710 wg_unbind(sc);
2711 if_detach(ifp);
2712
2713 wg_counter--;
2714 if (wg_counter == 0) {
2715 KASSERT(wg_handshake_taskq != NULL && wg_crypt_taskq != NULL)((wg_handshake_taskq != ((void *)0) && wg_crypt_taskq
!= ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if_wg.c"
, 2715, "wg_handshake_taskq != NULL && wg_crypt_taskq != NULL"
))
;
2716 taskq_destroy(wg_handshake_taskq);
2717 taskq_destroy(wg_crypt_taskq);
2718 wg_handshake_taskq = NULL((void *)0);
2719 wg_crypt_taskq = NULL((void *)0);
2720 }
2721
2722 DPRINTF(sc, "Destroyed interface\n")do { if ((((sc)->sc_if.if_flags) & (0x4))) printf("%s: "
"Destroyed interface\n", (sc)->sc_if.if_xname); } while (
0)
;
2723
2724 hashfree(sc->sc_index, HASHTABLE_INDEX_SIZE(1 << 13), M_DEVBUF2);
2725 hashfree(sc->sc_peer, HASHTABLE_PEER_SIZE(1 << 11), M_DEVBUF2);
2726#ifdef INET61
2727 free(sc->sc_aip6, M_RTABLE5, sizeof(*sc->sc_aip6));
2728#endif
2729 free(sc->sc_aip4, M_RTABLE5, sizeof(*sc->sc_aip4));
2730 cookie_checker_deinit(&sc->sc_cookie);
2731 free(sc, M_DEVBUF2, sizeof(*sc));
2732 return 0;
2733}
2734
2735void
2736wgattach(int nwg)
2737{
2738#ifdef WGTEST
2739 cookie_test();
2740 noise_test();
2741#endif
2742 if_clone_attach(&wg_cloner);
2743
2744 pool_init(&wg_aip_pool, sizeof(struct wg_aip), 0,
2745 IPL_NET0x7, 0, "wgaip", NULL((void *)0));
2746 pool_init(&wg_peer_pool, sizeof(struct wg_peer), 0,
2747 IPL_NET0x7, 0, "wgpeer", NULL((void *)0));
2748 pool_init(&wg_ratelimit_pool, sizeof(struct ratelimit_entry), 0,
2749 IPL_NET0x7, 0, "wgratelimit", NULL((void *)0));
2750}