Bug Summary

File:net/if.c
Warning:line 2016, column 2
Value stored to 'error' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name if.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/net/if.c
1/* $OpenBSD: if.c,v 1.716 2024/01/06 11:42:11 bluhm Exp $ */
2/* $NetBSD: if.c,v 1.35 1996/05/07 05:26:04 thorpej Exp $ */
3
4/*
5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33/*
34 * Copyright (c) 1980, 1986, 1993
35 * The Regents of the University of California. All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)if.c 8.3 (Berkeley) 1/4/94
62 */
63
64#include "bpfilter.h"
65#include "bridge.h"
66#include "carp.h"
67#include "ether.h"
68#include "pf.h"
69#include "pfsync.h"
70#include "ppp.h"
71#include "pppoe.h"
72#include "if_wg.h"
73
74#include <sys/param.h>
75#include <sys/systm.h>
76#include <sys/mbuf.h>
77#include <sys/socket.h>
78#include <sys/socketvar.h>
79#include <sys/timeout.h>
80#include <sys/protosw.h>
81#include <sys/kernel.h>
82#include <sys/ioctl.h>
83#include <sys/domain.h>
84#include <sys/task.h>
85#include <sys/atomic.h>
86#include <sys/percpu.h>
87#include <sys/proc.h>
88#include <sys/stdint.h> /* uintptr_t */
89#include <sys/rwlock.h>
90#include <sys/smr.h>
91
92#include <net/if.h>
93#include <net/if_dl.h>
94#include <net/if_types.h>
95#include <net/route.h>
96#include <net/netisr.h>
97
98#include "vlan.h"
99#if NVLAN1 > 0
100#include <net/if_vlan_var.h>
101#endif
102
103#include <netinet/in.h>
104#include <netinet/if_ether.h>
105#include <netinet/igmp.h>
106#ifdef MROUTING1
107#include <netinet/ip_mroute.h>
108#endif
109#include <netinet/tcp.h>
110#include <netinet/tcp_timer.h>
111#include <netinet/tcp_var.h>
112
113#ifdef INET61
114#include <netinet6/in6_var.h>
115#include <netinet6/in6_ifattach.h>
116#include <netinet6/nd6.h>
117#include <netinet/ip6.h>
118#include <netinet6/ip6_var.h>
119#endif
120
121#ifdef MPLS1
122#include <netmpls/mpls.h>
123#endif
124
125#if NBPFILTER1 > 0
126#include <net/bpf.h>
127#endif
128
129#if NBRIDGE1 > 0
130#include <net/if_bridge.h>
131#endif
132
133#if NCARP1 > 0
134#include <netinet/ip_carp.h>
135#endif
136
137#if NPF1 > 0
138#include <net/pfvar.h>
139#endif
140
141#include <sys/device.h>
142
143void if_attachsetup(struct ifnet *);
144void if_attach_common(struct ifnet *);
145void if_remove(struct ifnet *);
146int if_createrdomain(int, struct ifnet *);
147int if_setrdomain(struct ifnet *, int);
148void if_slowtimo(void *);
149
150void if_detached_qstart(struct ifqueue *);
151int if_detached_ioctl(struct ifnet *, u_long, caddr_t);
152
153int ifioctl_get(u_long, caddr_t);
154int ifconf(caddr_t);
155static int
156 if_sffpage_check(const caddr_t);
157
158int if_getgroup(caddr_t, struct ifnet *);
159int if_getgroupmembers(caddr_t);
160int if_getgroupattribs(caddr_t);
161int if_setgroupattribs(caddr_t);
162int if_getgrouplist(caddr_t);
163
164void if_linkstate(struct ifnet *);
165void if_linkstate_task(void *);
166
167int if_clone_list(struct if_clonereq *);
168struct if_clone *if_clone_lookup(const char *, int *);
169
170int if_group_egress_build(void);
171
172void if_watchdog_task(void *);
173
174void if_netisr(void *);
175
176#ifdef DDB1
177void ifa_print_all(void);
178#endif
179
180void if_qstart_compat(struct ifqueue *);
181
182/*
183 * interface index map
184 *
185 * the kernel maintains a mapping of interface indexes to struct ifnet
186 * pointers.
187 *
188 * the map is an array of struct ifnet pointers prefixed by an if_map
189 * structure. the if_map structure stores the length of its array.
190 *
191 * as interfaces are attached to the system, the map is grown on demand
192 * up to USHRT_MAX entries.
193 *
194 * interface index 0 is reserved and represents no interface. this
195 * supports the use of the interface index as the scope for IPv6 link
196 * local addresses, where scope 0 means no scope has been specified.
197 * it also supports the use of interface index as the unique identifier
198 * for network interfaces in SNMP applications as per RFC2863. therefore
199 * if_get(0) returns NULL.
200 */
201
202struct ifnet *if_ref(struct ifnet *);
203
204/*
205 * struct if_idxmap
206 *
207 * infrastructure to manage updates and accesses to the current if_map.
208 *
209 * interface index 0 is special and represents "no interface", so we
210 * use the 0th slot in map to store the length of the array.
211 */
212
213struct if_idxmap {
214 unsigned int serial;
215 unsigned int count;
216 struct ifnet **map; /* SMR protected */
217 struct rwlock lock;
218 unsigned char *usedidx; /* bitmap of indices in use */
219};
220
221struct if_idxmap_dtor {
222 struct smr_entry smr;
223 struct ifnet **map;
224};
225
226void if_idxmap_init(unsigned int);
227void if_idxmap_free(void *);
228void if_idxmap_alloc(struct ifnet *);
229void if_idxmap_insert(struct ifnet *);
230void if_idxmap_remove(struct ifnet *);
231
232TAILQ_HEAD(, ifg_group)struct { struct ifg_group *tqh_first; struct ifg_group **tqh_last
; }
ifg_head =
233 TAILQ_HEAD_INITIALIZER(ifg_head){ ((void *)0), &(ifg_head).tqh_first }; /* [N] list of interface groups */
234
235LIST_HEAD(, if_clone)struct { struct if_clone *lh_first; } if_cloners =
236 LIST_HEAD_INITIALIZER(if_cloners){ ((void *)0) }; /* [I] list of clonable interfaces */
237int if_cloners_count; /* [I] number of clonable interfaces */
238
239struct rwlock if_cloners_lock = RWLOCK_INITIALIZER("clonelk"){ 0, "clonelk" };
240
241/* hooks should only be added, deleted, and run from a process context */
242struct mutex if_hooks_mtx = MUTEX_INITIALIZER(IPL_NONE){ ((void *)0), ((((0x0)) > 0x0 && ((0x0)) < 0x9
) ? 0x9 : ((0x0))), 0x0 }
;
243void if_hooks_run(struct task_list *);
244
245int ifq_congestion;
246
247int netisr;
248
249struct softnet {
250 char sn_name[16];
251 struct taskq *sn_taskq;
252};
253
254#define NET_TASKQ4 4
255struct softnet softnets[NET_TASKQ4];
256
257struct task if_input_task_locked = TASK_INITIALIZER(if_netisr, NULL){{ ((void *)0), ((void *)0) }, (if_netisr), (((void *)0)), 0 };
258
259/*
260 * Serialize socket operations to ensure no new sleeping points
261 * are introduced in IP output paths.
262 */
263struct rwlock netlock = RWLOCK_INITIALIZER("netlock"){ 0, "netlock" };
264
265/*
266 * Network interface utility routines.
267 */
268void
269ifinit(void)
270{
271 unsigned int i;
272
273 /*
274 * most machines boot with 4 or 5 interfaces, so size the initial map
275 * to accommodate this
276 */
277 if_idxmap_init(8); /* 8 is a nice power of 2 for malloc */
278
279 for (i = 0; i < NET_TASKQ4; i++) {
280 struct softnet *sn = &softnets[i];
281 snprintf(sn->sn_name, sizeof(sn->sn_name), "softnet%u", i);
282 sn->sn_taskq = taskq_create(sn->sn_name, 1, IPL_NET0x4,
283 TASKQ_MPSAFE(1 << 0));
284 if (sn->sn_taskq == NULL((void *)0))
285 panic("unable to create network taskq %d", i);
286 }
287}
288
289static struct if_idxmap if_idxmap;
290
291/*
292 * XXXSMP: For `ifnetlist' modification both kernel and net locks
293 * should be taken. For read-only access only one lock of them required.
294 */
295struct ifnet_head ifnetlist = TAILQ_HEAD_INITIALIZER(ifnetlist){ ((void *)0), &(ifnetlist).tqh_first };
296
297static inline unsigned int
298if_idxmap_limit(struct ifnet **if_map)
299{
300 return ((uintptr_t)if_map[0]);
301}
302
303static inline size_t
304if_idxmap_usedidx_size(unsigned int limit)
305{
306 return (max(howmany(limit, NBBY)(((limit) + ((8) - 1)) / (8)), sizeof(struct if_idxmap_dtor)));
307}
308
309void
310if_idxmap_init(unsigned int limit)
311{
312 struct ifnet **if_map;
313
314 rw_init(&if_idxmap.lock, "idxmaplk")_rw_init_flags(&if_idxmap.lock, "idxmaplk", 0, ((void *)0
))
;
315 if_idxmap.serial = 1; /* skip ifidx 0 */
316
317 if_map = mallocarray(limit, sizeof(*if_map), M_IFADDR9,
318 M_WAITOK0x0001 | M_ZERO0x0008);
319
320 if_map[0] = (struct ifnet *)(uintptr_t)limit;
321
322 if_idxmap.usedidx = malloc(if_idxmap_usedidx_size(limit),
323 M_IFADDR9, M_WAITOK0x0001 | M_ZERO0x0008);
324 setbit(if_idxmap.usedidx, 0)((if_idxmap.usedidx)[(0)>>3] |= 1<<((0)&(8 -1
)))
; /* blacklist ifidx 0 */
325
326 /* this is called early so there's nothing to race with */
327 SMR_PTR_SET_LOCKED(&if_idxmap.map, if_map)do { do { __asm volatile("" ::: "memory"); } while (0); ({ typeof
(*&if_idxmap.map) __tmp = (if_map); *(volatile typeof(*&
if_idxmap.map) *)&(*&if_idxmap.map) = __tmp; __tmp; }
); } while (0)
;
328}
329
330void
331if_idxmap_alloc(struct ifnet *ifp)
332{
333 struct ifnet **if_map;
334 unsigned int limit;
335 unsigned int index, i;
336
337 refcnt_init(&ifp->if_refcnt);
338
339 rw_enter_write(&if_idxmap.lock);
340
341 if (++if_idxmap.count >= USHRT_MAX0xffff)
342 panic("too many interfaces");
343
344 if_map = SMR_PTR_GET_LOCKED(&if_idxmap.map)(*(&if_idxmap.map));
345 limit = if_idxmap_limit(if_map);
346
347 index = if_idxmap.serial++ & USHRT_MAX0xffff;
348
349 if (index >= limit) {
350 struct if_idxmap_dtor *dtor;
351 struct ifnet **oif_map;
352 unsigned int olimit;
353 unsigned char *nusedidx;
354
355 oif_map = if_map;
356 olimit = limit;
357
358 limit = olimit * 2;
359 if_map = mallocarray(limit, sizeof(*if_map), M_IFADDR9,
360 M_WAITOK0x0001 | M_ZERO0x0008);
361 if_map[0] = (struct ifnet *)(uintptr_t)limit;
362
363 for (i = 1; i < olimit; i++) {
364 struct ifnet *oifp = SMR_PTR_GET_LOCKED(&oif_map[i])(*(&oif_map[i]));
365 if (oifp == NULL((void *)0))
366 continue;
367
368 /*
369 * nif_map isn't visible yet, so don't need
370 * SMR_PTR_SET_LOCKED and its membar.
371 */
372 if_map[i] = if_ref(oifp);
373 }
374
375 nusedidx = malloc(if_idxmap_usedidx_size(limit),
376 M_IFADDR9, M_WAITOK0x0001 | M_ZERO0x0008);
377 memcpy(nusedidx, if_idxmap.usedidx, howmany(olimit, NBBY))__builtin_memcpy((nusedidx), (if_idxmap.usedidx), ((((olimit)
+ ((8) - 1)) / (8))))
;
378
379 /* use the old usedidx bitmap as an smr_entry for the if_map */
380 dtor = (struct if_idxmap_dtor *)if_idxmap.usedidx;
381 if_idxmap.usedidx = nusedidx;
382
383 SMR_PTR_SET_LOCKED(&if_idxmap.map, if_map)do { do { __asm volatile("" ::: "memory"); } while (0); ({ typeof
(*&if_idxmap.map) __tmp = (if_map); *(volatile typeof(*&
if_idxmap.map) *)&(*&if_idxmap.map) = __tmp; __tmp; }
); } while (0)
;
384
385 dtor->map = oif_map;
386 smr_init(&dtor->smr);
387 smr_call(&dtor->smr, if_idxmap_free, dtor)smr_call_impl(&dtor->smr, if_idxmap_free, dtor, 0);
388 }
389
390 /* pick the next free index */
391 for (i = 0; i < USHRT_MAX0xffff; i++) {
392 if (index != 0 && isclr(if_idxmap.usedidx, index)(((if_idxmap.usedidx)[(index)>>3] & (1<<((index
)&(8 -1)))) == 0)
)
393 break;
394
395 index = if_idxmap.serial++ & USHRT_MAX0xffff;
396 }
397 KASSERT(index != 0 && index < limit)((index != 0 && index < limit) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/net/if.c", 397, "index != 0 && index < limit"
))
;
398 KASSERT(isclr(if_idxmap.usedidx, index))(((((if_idxmap.usedidx)[(index)>>3] & (1<<((index
)&(8 -1)))) == 0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c"
, 398, "isclr(if_idxmap.usedidx, index)"))
;
399
400 setbit(if_idxmap.usedidx, index)((if_idxmap.usedidx)[(index)>>3] |= 1<<((index)&
(8 -1)))
;
401 ifp->if_index = index;
402
403 rw_exit_write(&if_idxmap.lock);
404}
405
406void
407if_idxmap_free(void *arg)
408{
409 struct if_idxmap_dtor *dtor = arg;
410 struct ifnet **oif_map = dtor->map;
411 unsigned int olimit = if_idxmap_limit(oif_map);
412 unsigned int i;
413
414 for (i = 1; i < olimit; i++)
415 if_put(oif_map[i]);
416
417 free(oif_map, M_IFADDR9, olimit * sizeof(*oif_map));
418 free(dtor, M_IFADDR9, if_idxmap_usedidx_size(olimit));
419}
420
421void
422if_idxmap_insert(struct ifnet *ifp)
423{
424 struct ifnet **if_map;
425 unsigned int index = ifp->if_index;
426
427 rw_enter_write(&if_idxmap.lock);
428
429 if_map = SMR_PTR_GET_LOCKED(&if_idxmap.map)(*(&if_idxmap.map));
430
431 KASSERTMSG(index != 0 && index < if_idxmap_limit(if_map),((index != 0 && index < if_idxmap_limit(if_map)) ?
(void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d"
" " "%s(%p) index %u vs limit %u", "diagnostic ", "index != 0 && index < if_idxmap_limit(if_map)"
, "/usr/src/sys/net/if.c", 433, ifp->if_xname, ifp, index,
if_idxmap_limit(if_map)))
432 "%s(%p) index %u vs limit %u", ifp->if_xname, ifp, index,((index != 0 && index < if_idxmap_limit(if_map)) ?
(void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d"
" " "%s(%p) index %u vs limit %u", "diagnostic ", "index != 0 && index < if_idxmap_limit(if_map)"
, "/usr/src/sys/net/if.c", 433, ifp->if_xname, ifp, index,
if_idxmap_limit(if_map)))
433 if_idxmap_limit(if_map))((index != 0 && index < if_idxmap_limit(if_map)) ?
(void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d"
" " "%s(%p) index %u vs limit %u", "diagnostic ", "index != 0 && index < if_idxmap_limit(if_map)"
, "/usr/src/sys/net/if.c", 433, ifp->if_xname, ifp, index,
if_idxmap_limit(if_map)))
;
434 KASSERT(SMR_PTR_GET_LOCKED(&if_map[index]) == NULL)(((*(&if_map[index])) == ((void *)0)) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/net/if.c", 434, "SMR_PTR_GET_LOCKED(&if_map[index]) == NULL"
))
;
435 KASSERT(isset(if_idxmap.usedidx, index))((((if_idxmap.usedidx)[(index)>>3] & (1<<((index
)&(8 -1))))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c"
, 435, "isset(if_idxmap.usedidx, index)"))
;
436
437 /* commit */
438 SMR_PTR_SET_LOCKED(&if_map[index], if_ref(ifp))do { do { __asm volatile("" ::: "memory"); } while (0); ({ typeof
(*&if_map[index]) __tmp = (if_ref(ifp)); *(volatile typeof
(*&if_map[index]) *)&(*&if_map[index]) = __tmp; __tmp
; }); } while (0)
;
439
440 rw_exit_write(&if_idxmap.lock);
441}
442
443void
444if_idxmap_remove(struct ifnet *ifp)
445{
446 struct ifnet **if_map;
447 unsigned int index = ifp->if_index;
448
449 rw_enter_write(&if_idxmap.lock);
450
451 if_map = SMR_PTR_GET_LOCKED(&if_idxmap.map)(*(&if_idxmap.map));
452
453 KASSERT(index != 0 && index < if_idxmap_limit(if_map))((index != 0 && index < if_idxmap_limit(if_map)) ?
(void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c", 453
, "index != 0 && index < if_idxmap_limit(if_map)")
)
;
454 KASSERT(SMR_PTR_GET_LOCKED(&if_map[index]) == ifp)(((*(&if_map[index])) == ifp) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/if.c", 454, "SMR_PTR_GET_LOCKED(&if_map[index]) == ifp"
))
;
455 KASSERT(isset(if_idxmap.usedidx, index))((((if_idxmap.usedidx)[(index)>>3] & (1<<((index
)&(8 -1))))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c"
, 455, "isset(if_idxmap.usedidx, index)"))
;
456
457 SMR_PTR_SET_LOCKED(&if_map[index], NULL)do { do { __asm volatile("" ::: "memory"); } while (0); ({ typeof
(*&if_map[index]) __tmp = (((void *)0)); *(volatile typeof
(*&if_map[index]) *)&(*&if_map[index]) = __tmp; __tmp
; }); } while (0)
;
458
459 if_idxmap.count--;
460 clrbit(if_idxmap.usedidx, index)((if_idxmap.usedidx)[(index)>>3] &= ~(1<<((index
)&(8 -1))))
;
461 /* end of if_idxmap modifications */
462
463 rw_exit_write(&if_idxmap.lock);
464
465 smr_barrier()smr_barrier_impl(0);
466 if_put(ifp);
467}
468
469/*
470 * Attach an interface to the
471 * list of "active" interfaces.
472 */
473void
474if_attachsetup(struct ifnet *ifp)
475{
476 unsigned long ifidx;
477
478 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
479
480 if_addgroup(ifp, IFG_ALL"all");
481
482#ifdef INET61
483 nd6_ifattach(ifp);
484#endif
485
486#if NPF1 > 0
487 pfi_attach_ifnet(ifp);
488#endif
489
490 timeout_set(&ifp->if_slowtimo, if_slowtimo, ifp);
491 if_slowtimo(ifp);
492
493 if_idxmap_insert(ifp);
494 KASSERT(if_get(0) == NULL)((if_get(0) == ((void *)0)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/if.c", 494, "if_get(0) == NULL"))
;
495
496 ifidx = ifp->if_index;
497
498 task_set(&ifp->if_watchdogtask, if_watchdog_task, (void *)ifidx);
499 task_set(&ifp->if_linkstatetask, if_linkstate_task, (void *)ifidx);
500
501 /* Announce the interface. */
502 rtm_ifannounce(ifp, IFAN_ARRIVAL0);
503}
504
505/*
506 * Allocate the link level name for the specified interface. This
507 * is an attachment helper. It must be called after ifp->if_addrlen
508 * is initialized, which may not be the case when if_attach() is
509 * called.
510 */
511void
512if_alloc_sadl(struct ifnet *ifp)
513{
514 unsigned int socksize;
515 int namelen, masklen;
516 struct sockaddr_dl *sdl;
517
518 /*
519 * If the interface already has a link name, release it
520 * now. This is useful for interfaces that can change
521 * link types, and thus switch link names often.
522 */
523 if_free_sadl(ifp);
524
525 namelen = strlen(ifp->if_xname);
526 masklen = offsetof(struct sockaddr_dl, sdl_data[0])__builtin_offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
527 socksize = masklen + ifp->if_addrlenif_data.ifi_addrlen;
528#define ROUNDUP(a)(1 + (((a) - 1) | (sizeof(long) - 1))) (1 + (((a) - 1) | (sizeof(long) - 1)))
529 if (socksize < sizeof(*sdl))
530 socksize = sizeof(*sdl);
531 socksize = ROUNDUP(socksize)(1 + (((socksize) - 1) | (sizeof(long) - 1)));
532 sdl = malloc(socksize, M_IFADDR9, M_WAITOK0x0001|M_ZERO0x0008);
533 sdl->sdl_len = socksize;
534 sdl->sdl_family = AF_LINK18;
535 bcopy(ifp->if_xname, sdl->sdl_data, namelen);
536 sdl->sdl_nlen = namelen;
537 sdl->sdl_alen = ifp->if_addrlenif_data.ifi_addrlen;
538 sdl->sdl_index = ifp->if_index;
539 sdl->sdl_type = ifp->if_typeif_data.ifi_type;
540 ifp->if_sadl = sdl;
541}
542
543/*
544 * Free the link level name for the specified interface. This is
545 * a detach helper. This is called from if_detach() or from
546 * link layer type specific detach functions.
547 */
548void
549if_free_sadl(struct ifnet *ifp)
550{
551 if (ifp->if_sadl == NULL((void *)0))
552 return;
553
554 free(ifp->if_sadl, M_IFADDR9, ifp->if_sadl->sdl_len);
555 ifp->if_sadl = NULL((void *)0);
556}
557
558void
559if_attachhead(struct ifnet *ifp)
560{
561 if_attach_common(ifp);
562 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
563 TAILQ_INSERT_HEAD(&ifnetlist, ifp, if_list)do { if (((ifp)->if_list.tqe_next = (&ifnetlist)->tqh_first
) != ((void *)0)) (&ifnetlist)->tqh_first->if_list.
tqe_prev = &(ifp)->if_list.tqe_next; else (&ifnetlist
)->tqh_last = &(ifp)->if_list.tqe_next; (&ifnetlist
)->tqh_first = (ifp); (ifp)->if_list.tqe_prev = &(&
ifnetlist)->tqh_first; } while (0)
;
564 if_attachsetup(ifp);
565 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
566}
567
568void
569if_attach(struct ifnet *ifp)
570{
571 if_attach_common(ifp);
572 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
573 TAILQ_INSERT_TAIL(&ifnetlist, ifp, if_list)do { (ifp)->if_list.tqe_next = ((void *)0); (ifp)->if_list
.tqe_prev = (&ifnetlist)->tqh_last; *(&ifnetlist)->
tqh_last = (ifp); (&ifnetlist)->tqh_last = &(ifp)->
if_list.tqe_next; } while (0)
;
574 if_attachsetup(ifp);
575 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
576}
577
578void
579if_attach_queues(struct ifnet *ifp, unsigned int nqs)
580{
581 struct ifqueue **map;
582 struct ifqueue *ifq;
583 int i;
584
585 KASSERT(ifp->if_ifqs == ifp->if_snd.ifq_ifqs)((ifp->if_ifqs == ifp->if_snd._ifq_ptr._ifq_ifqs) ? (void
)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c", 585, "ifp->if_ifqs == ifp->if_snd.ifq_ifqs"
))
;
586 KASSERT(nqs != 0)((nqs != 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c"
, 586, "nqs != 0"))
;
587
588 map = mallocarray(sizeof(*map), nqs, M_DEVBUF2, M_WAITOK0x0001);
589
590 ifp->if_snd.ifq_softc_ifq_ptr._ifq_softc = NULL((void *)0);
591 map[0] = &ifp->if_snd;
592
593 for (i = 1; i < nqs; i++) {
594 ifq = malloc(sizeof(*ifq), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
595 ifq_init_maxlen(ifq, ifp->if_snd.ifq_maxlen);
596 ifq_init(ifq, ifp, i);
597 map[i] = ifq;
598 }
599
600 ifp->if_ifqs = map;
601 ifp->if_nifqs = nqs;
602}
603
604void
605if_attach_iqueues(struct ifnet *ifp, unsigned int niqs)
606{
607 struct ifiqueue **map;
608 struct ifiqueue *ifiq;
609 unsigned int i;
610
611 KASSERT(niqs != 0)((niqs != 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c"
, 611, "niqs != 0"))
;
612
613 map = mallocarray(niqs, sizeof(*map), M_DEVBUF2, M_WAITOK0x0001);
614
615 ifp->if_rcv.ifiq_softc_ifiq_ptr._ifiq_softc = NULL((void *)0);
616 map[0] = &ifp->if_rcv;
617
618 for (i = 1; i < niqs; i++) {
619 ifiq = malloc(sizeof(*ifiq), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
620 ifiq_init(ifiq, ifp, i);
621 map[i] = ifiq;
622 }
623
624 ifp->if_iqs = map;
625 ifp->if_niqs = niqs;
626}
627
628void
629if_attach_common(struct ifnet *ifp)
630{
631 KASSERT(ifp->if_ioctl != NULL)((ifp->if_ioctl != ((void *)0)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/if.c", 631, "ifp->if_ioctl != NULL"))
;
632
633 TAILQ_INIT(&ifp->if_addrlist)do { (&ifp->if_addrlist)->tqh_first = ((void *)0); (
&ifp->if_addrlist)->tqh_last = &(&ifp->if_addrlist
)->tqh_first; } while (0)
;
634 TAILQ_INIT(&ifp->if_maddrlist)do { (&ifp->if_maddrlist)->tqh_first = ((void *)0);
(&ifp->if_maddrlist)->tqh_last = &(&ifp->
if_maddrlist)->tqh_first; } while (0)
;
635 TAILQ_INIT(&ifp->if_groups)do { (&ifp->if_groups)->tqh_first = ((void *)0); (&
ifp->if_groups)->tqh_last = &(&ifp->if_groups
)->tqh_first; } while (0)
;
636
637 if (!ISSET(ifp->if_xflags, IFXF_MPSAFE)((ifp->if_xflags) & (0x1))) {
638 KASSERTMSG(ifp->if_qstart == NULL,((ifp->if_qstart == ((void *)0)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d"
" " "%s: if_qstart set without MPSAFE set", "diagnostic ", "ifp->if_qstart == NULL"
, "/usr/src/sys/net/if.c", 639, ifp->if_xname))
639 "%s: if_qstart set without MPSAFE set", ifp->if_xname)((ifp->if_qstart == ((void *)0)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d"
" " "%s: if_qstart set without MPSAFE set", "diagnostic ", "ifp->if_qstart == NULL"
, "/usr/src/sys/net/if.c", 639, ifp->if_xname))
;
640 ifp->if_qstart = if_qstart_compat;
641 } else {
642 KASSERTMSG(ifp->if_start == NULL,((ifp->if_start == ((void *)0)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d"
" " "%s: if_start set with MPSAFE set", "diagnostic ", "ifp->if_start == NULL"
, "/usr/src/sys/net/if.c", 643, ifp->if_xname))
643 "%s: if_start set with MPSAFE set", ifp->if_xname)((ifp->if_start == ((void *)0)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d"
" " "%s: if_start set with MPSAFE set", "diagnostic ", "ifp->if_start == NULL"
, "/usr/src/sys/net/if.c", 643, ifp->if_xname))
;
644 KASSERTMSG(ifp->if_qstart != NULL,((ifp->if_qstart != ((void *)0)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d"
" " "%s: if_qstart not set with MPSAFE set", "diagnostic ", "ifp->if_qstart != NULL"
, "/usr/src/sys/net/if.c", 645, ifp->if_xname))
645 "%s: if_qstart not set with MPSAFE set", ifp->if_xname)((ifp->if_qstart != ((void *)0)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d"
" " "%s: if_qstart not set with MPSAFE set", "diagnostic ", "ifp->if_qstart != NULL"
, "/usr/src/sys/net/if.c", 645, ifp->if_xname))
;
646 }
647
648 if_idxmap_alloc(ifp);
649
650 ifq_init(&ifp->if_snd, ifp, 0);
651
652 ifp->if_snd.ifq_ifqs_ifq_ptr._ifq_ifqs[0] = &ifp->if_snd;
653 ifp->if_ifqs = ifp->if_snd.ifq_ifqs_ifq_ptr._ifq_ifqs;
654 ifp->if_nifqs = 1;
655 if (ifp->if_txmit == 0)
656 ifp->if_txmit = IF_TXMIT_DEFAULT16;
657
658 ifiq_init(&ifp->if_rcv, ifp, 0);
659
660 ifp->if_rcv.ifiq_ifiqs_ifiq_ptr._ifiq_ifiqs[0] = &ifp->if_rcv;
661 ifp->if_iqs = ifp->if_rcv.ifiq_ifiqs_ifiq_ptr._ifiq_ifiqs;
662 ifp->if_niqs = 1;
663
664 TAILQ_INIT(&ifp->if_addrhooks)do { (&ifp->if_addrhooks)->tqh_first = ((void *)0);
(&ifp->if_addrhooks)->tqh_last = &(&ifp->
if_addrhooks)->tqh_first; } while (0)
;
665 TAILQ_INIT(&ifp->if_linkstatehooks)do { (&ifp->if_linkstatehooks)->tqh_first = ((void *
)0); (&ifp->if_linkstatehooks)->tqh_last = &(&
ifp->if_linkstatehooks)->tqh_first; } while (0)
;
666 TAILQ_INIT(&ifp->if_detachhooks)do { (&ifp->if_detachhooks)->tqh_first = ((void *)0
); (&ifp->if_detachhooks)->tqh_last = &(&ifp
->if_detachhooks)->tqh_first; } while (0)
;
667
668 if (ifp->if_rtrequest == NULL((void *)0))
669 ifp->if_rtrequest = if_rtrequest_dummy;
670 if (ifp->if_enqueue == NULL((void *)0))
671 ifp->if_enqueue = if_enqueue_ifq;
672#if NBPFILTER1 > 0
673 if (ifp->if_bpf_mtap == NULL((void *)0))
674 ifp->if_bpf_mtap = bpf_mtap_ether;
675#endif
676 ifp->if_llprio = IFQ_DEFPRIO3;
677}
678
679void
680if_attach_ifq(struct ifnet *ifp, const struct ifq_ops *newops, void *args)
681{
682 /*
683 * only switch the ifq_ops on the first ifq on an interface.
684 *
685 * the only ifq_ops we provide priq and hfsc, and hfsc only
686 * works on a single ifq. because the code uses the ifq_ops
687 * on the first ifq (if_snd) to select a queue for an mbuf,
688 * by switching only the first one we change both the algorithm
689 * and force the routing of all new packets to it.
690 */
691 ifq_attach(&ifp->if_snd, newops, args);
692}
693
694void
695if_start(struct ifnet *ifp)
696{
697 KASSERT(ifp->if_qstart == if_qstart_compat)((ifp->if_qstart == if_qstart_compat) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/net/if.c", 697, "ifp->if_qstart == if_qstart_compat"
))
;
698 if_qstart_compat(&ifp->if_snd);
699}
700void
701if_qstart_compat(struct ifqueue *ifq)
702{
703 struct ifnet *ifp = ifq->ifq_if;
704 int s;
705
706 /*
707 * the stack assumes that an interface can have multiple
708 * transmit rings, but a lot of drivers are still written
709 * so that interfaces and send rings have a 1:1 mapping.
710 * this provides compatibility between the stack and the older
711 * drivers by translating from the only queue they have
712 * (ifp->if_snd) back to the interface and calling if_start.
713 */
714
715 KERNEL_LOCK()_kernel_lock();
716 s = splnet()splraise(0x4);
717 (*ifp->if_start)(ifp);
718 splx(s)spllower(s);
719 KERNEL_UNLOCK()_kernel_unlock();
720}
721
722int
723if_enqueue(struct ifnet *ifp, struct mbuf *m)
724{
725 CLR(m->m_pkthdr.csum_flags, M_TIMESTAMP)((m->M_dat.MH.MH_pkthdr.csum_flags) &= ~(0x2000));
726
727#if NPF1 > 0
728 if (m->m_pkthdrM_dat.MH.MH_pkthdr.pf.delay > 0)
729 return (pf_delay_pkt(m, ifp->if_index));
730#endif
731
732#if NBRIDGE1 > 0
733 if (ifp->if_bridgeidx && (m->m_flagsm_hdr.mh_flags & M_PROTO10x0010) == 0) {
734 int error;
735
736 error = bridge_enqueue(ifp, m);
737 return (error);
738 }
739#endif
740
741#if NPF1 > 0
742 pf_pkt_addr_changed(m);
743#endif /* NPF > 0 */
744
745 return ((*ifp->if_enqueue)(ifp, m));
746}
747
748int
749if_enqueue_ifq(struct ifnet *ifp, struct mbuf *m)
750{
751 struct ifqueue *ifq = &ifp->if_snd;
752 int error;
753
754 if (ifp->if_nifqs > 1) {
755 unsigned int idx;
756
757 /*
758 * use the operations on the first ifq to pick which of
759 * the array gets this mbuf.
760 */
761
762 idx = ifq_idx(&ifp->if_snd, ifp->if_nifqs, m);
763 ifq = ifp->if_ifqs[idx];
764 }
765
766 error = ifq_enqueue(ifq, m);
767 if (error)
768 return (error);
769
770 ifq_start(ifq);
771
772 return (0);
773}
774
775void
776if_input(struct ifnet *ifp, struct mbuf_list *ml)
777{
778 ifiq_input(&ifp->if_rcv, ml);
779}
780
781int
782if_input_local(struct ifnet *ifp, struct mbuf *m, sa_family_t af)
783{
784 int keepflags, keepcksum;
785 uint16_t keepmss;
786
787#if NBPFILTER1 > 0
788 /*
789 * Only send packets to bpf if they are destined to local
790 * addresses.
791 *
792 * if_input_local() is also called for SIMPLEX interfaces to
793 * duplicate packets for local use. But don't dup them to bpf.
794 */
795 if (ifp->if_flags & IFF_LOOPBACK0x8) {
796 caddr_t if_bpf = ifp->if_bpf;
797
798 if (if_bpf)
799 bpf_mtap_af(if_bpf, af, m, BPF_DIRECTION_OUT(1 << 1));
800 }
801#endif
802 keepflags = m->m_flagsm_hdr.mh_flags & (M_BCAST0x0100|M_MCAST0x0200);
803 /*
804 * Preserve outgoing checksum flags, in case the packet is
805 * forwarded to another interface. Then the checksum, which
806 * is now incorrect, will be calculated before sending.
807 */
808 keepcksum = m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & (M_IPV4_CSUM_OUT0x0001 |
809 M_TCP_CSUM_OUT0x0002 | M_UDP_CSUM_OUT0x0004 | M_ICMP_CSUM_OUT0x0200 |
810 M_TCP_TSO0x8000);
811 keepmss = m->m_pkthdrM_dat.MH.MH_pkthdr.ph_mss;
812 m_resethdr(m);
813 m->m_flagsm_hdr.mh_flags |= M_LOOP0x0040 | keepflags;
814 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags = keepcksum;
815 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_mss = keepmss;
816 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_ifidx = ifp->if_index;
817 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = ifp->if_rdomainif_data.ifi_rdomain;
818
819 if (ISSET(keepcksum, M_TCP_TSO)((keepcksum) & (0x8000)) && m->m_pkthdrM_dat.MH.MH_pkthdr.len > ifp->if_mtuif_data.ifi_mtu) {
820 if (ifp->if_mtuif_data.ifi_mtu > 0 &&
821 ((af == AF_INET2 &&
822 ISSET(ifp->if_capabilities, IFCAP_TSOv4)((ifp->if_data.ifi_capabilities) & (0x00001000))) ||
823 (af == AF_INET624 &&
824 ISSET(ifp->if_capabilities, IFCAP_TSOv6)((ifp->if_data.ifi_capabilities) & (0x00002000))))) {
825 tcpstat_inc(tcps_inswlro);
826 tcpstat_add(tcps_inpktlro,
827 (m->m_pkthdrM_dat.MH.MH_pkthdr.len + ifp->if_mtuif_data.ifi_mtu - 1) / ifp->if_mtuif_data.ifi_mtu);
828 } else {
829 tcpstat_inc(tcps_inbadlro);
830 m_freem(m);
831 return (EPROTONOSUPPORT43);
832 }
833 }
834
835 if (ISSET(keepcksum, M_TCP_CSUM_OUT)((keepcksum) & (0x0002)))
836 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK0x0020;
837 if (ISSET(keepcksum, M_UDP_CSUM_OUT)((keepcksum) & (0x0004)))
838 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK0x0080;
839 if (ISSET(keepcksum, M_ICMP_CSUM_OUT)((keepcksum) & (0x0200)))
840 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_ICMP_CSUM_IN_OK0x0400;
841
842 /* do not count multicast loopback and simplex interfaces */
843 if (ISSET(ifp->if_flags, IFF_LOOPBACK)((ifp->if_flags) & (0x8))) {
844 counters_pkt(ifp->if_counters, ifc_opackets, ifc_obytes,
845 m->m_pkthdrM_dat.MH.MH_pkthdr.len);
846 }
847
848 switch (af) {
849 case AF_INET2:
850 if (ISSET(keepcksum, M_IPV4_CSUM_OUT)((keepcksum) & (0x0001)))
851 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK0x0008;
852 ipv4_input(ifp, m);
853 break;
854#ifdef INET61
855 case AF_INET624:
856 ipv6_input(ifp, m);
857 break;
858#endif /* INET6 */
859#ifdef MPLS1
860 case AF_MPLS33:
861 mpls_input(ifp, m);
862 break;
863#endif /* MPLS */
864 default:
865 printf("%s: can't handle af%d\n", ifp->if_xname, af);
866 m_freem(m);
867 return (EAFNOSUPPORT47);
868 }
869
870 return (0);
871}
872
873int
874if_output_ml(struct ifnet *ifp, struct mbuf_list *ml,
875 struct sockaddr *dst, struct rtentry *rt)
876{
877 struct mbuf *m;
878 int error = 0;
879
880 while ((m = ml_dequeue(ml)) != NULL((void *)0)) {
881 error = ifp->if_output(ifp, m, dst, rt);
882 if (error)
883 break;
884 }
885 if (error)
886 ml_purge(ml);
887
888 return error;
889}
890
891int
892if_output_tso(struct ifnet *ifp, struct mbuf **mp, struct sockaddr *dst,
893 struct rtentry *rt, u_int mtu)
894{
895 uint32_t ifcap;
896 int error;
897
898 switch (dst->sa_family) {
899 case AF_INET2:
900 ifcap = IFCAP_TSOv40x00001000;
901 break;
902#ifdef INET61
903 case AF_INET624:
904 ifcap = IFCAP_TSOv60x00002000;
905 break;
906#endif
907 default:
908 unhandled_af(dst->sa_family);
909 }
910
911 /*
912 * Try to send with TSO first. When forwarding LRO may set
913 * maximum segment size in mbuf header. Chop TCP segment
914 * even if it would fit interface MTU to preserve maximum
915 * path MTU.
916 */
917 error = tcp_if_output_tso(ifp, mp, dst, rt, ifcap, mtu);
918 if (error || *mp == NULL((void *)0))
919 return error;
920
921 if ((*mp)->m_pkthdrM_dat.MH.MH_pkthdr.len <= mtu) {
922 switch (dst->sa_family) {
923 case AF_INET2:
924 in_hdr_cksum_out(*mp, ifp);
925 in_proto_cksum_out(*mp, ifp);
926 break;
927#ifdef INET61
928 case AF_INET624:
929 in6_proto_cksum_out(*mp, ifp);
930 break;
931#endif
932 }
933 error = ifp->if_output(ifp, *mp, dst, rt);
934 *mp = NULL((void *)0);
935 return error;
936 }
937
938 /* mp still contains mbuf that has to be fragmented or dropped. */
939 return 0;
940}
941
942int
943if_output_mq(struct ifnet *ifp, struct mbuf_queue *mq, unsigned int *total,
944 struct sockaddr *dst, struct rtentry *rt)
945{
946 struct mbuf_list ml;
947 unsigned int len;
948 int error;
949
950 mq_delist(mq, &ml);
951 len = ml_len(&ml)((&ml)->ml_len);
952 error = if_output_ml(ifp, &ml, dst, rt);
953
954 /* XXXSMP we also discard if other CPU enqueues */
955 if (mq_len(mq)({ typeof((mq)->mq_list.ml_len) __tmp = *(volatile typeof(
(mq)->mq_list.ml_len) *)&((mq)->mq_list.ml_len); membar_datadep_consumer
(); __tmp; })
> 0) {
956 /* mbuf is back in queue. Discard. */
957 atomic_sub_int(total, len + mq_purge(mq))_atomic_sub_int(total, len + mq_purge(mq));
958 } else
959 atomic_sub_int(total, len)_atomic_sub_int(total, len);
960
961 return error;
962}
963
964int
965if_output_local(struct ifnet *ifp, struct mbuf *m, sa_family_t af)
966{
967 struct ifiqueue *ifiq;
968 unsigned int flow = 0;
969
970 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family = af;
971 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_ifidx = ifp->if_index;
972 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = ifp->if_rdomainif_data.ifi_rdomain;
973
974 if (ISSET(m->m_pkthdr.csum_flags, M_FLOWID)((m->M_dat.MH.MH_pkthdr.csum_flags) & (0x4000)))
975 flow = m->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid;
976
977 ifiq = ifp->if_iqs[flow % ifp->if_niqs];
978
979 return (ifiq_enqueue(ifiq, m) == 0 ? 0 : ENOBUFS55);
980}
981
982void
983if_input_process(struct ifnet *ifp, struct mbuf_list *ml)
984{
985 struct mbuf *m;
986
987 if (ml_empty(ml)((ml)->ml_len == 0))
988 return;
989
990 if (!ISSET(ifp->if_xflags, IFXF_CLONED)((ifp->if_xflags) & (0x2)))
991 enqueue_randomness(ml_len(ml)((ml)->ml_len) ^ (uintptr_t)MBUF_LIST_FIRST(ml)((ml)->ml_head));
992
993 /*
994 * We grab the shared netlock for packet processing in the softnet
995 * threads. Packets can regrab the exclusive lock via queues.
996 * ioctl, sysctl, and socket syscall may use shared lock if access is
997 * read only or MP safe. Usually they hold the exclusive net lock.
998 */
999
1000 NET_LOCK_SHARED()do { rw_enter_read(&netlock); } while (0);
1001 while ((m = ml_dequeue(ml)) != NULL((void *)0))
1002 (*ifp->if_input)(ifp, m);
1003 NET_UNLOCK_SHARED()do { rw_exit_read(&netlock); } while (0);
1004}
1005
1006void
1007if_vinput(struct ifnet *ifp, struct mbuf *m)
1008{
1009#if NBPFILTER1 > 0
1010 caddr_t if_bpf;
1011#endif
1012
1013 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_ifidx = ifp->if_index;
1014 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = ifp->if_rdomainif_data.ifi_rdomain;
1015
1016 counters_pkt(ifp->if_counters,
1017 ifc_ipackets, ifc_ibytes, m->m_pkthdrM_dat.MH.MH_pkthdr.len);
1018
1019#if NPF1 > 0
1020 pf_pkt_addr_changed(m);
1021#endif
1022
1023#if NBPFILTER1 > 0
1024 if_bpf = ifp->if_bpf;
1025 if (if_bpf) {
1026 if ((*ifp->if_bpf_mtap)(if_bpf, m, BPF_DIRECTION_IN(1 << 0))) {
1027 m_freem(m);
1028 return;
1029 }
1030 }
1031#endif
1032
1033 if (__predict_true(!ISSET(ifp->if_xflags, IFXF_MONITOR))__builtin_expect(((!((ifp->if_xflags) & (0x100))) != 0
), 1)
)
1034 (*ifp->if_input)(ifp, m);
1035 else
1036 m_freem(m);
1037}
1038
1039void
1040if_netisr(void *unused)
1041{
1042 int n, t = 0;
1043
1044 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1045
1046 while ((n = netisr) != 0) {
1047 /* Like sched_pause() but with a rwlock dance. */
1048 if (curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})
->ci_schedstate.spc_schedflags & SPCF_SHOULDYIELD0x0002) {
1049 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1050 yield();
1051 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1052 }
1053
1054 atomic_clearbits_intx86_atomic_clearbits_u32(&netisr, n);
1055
1056#if NETHER1 > 0
1057 if (n & (1 << NETISR_ARP18))
1058 arpintr();
1059#endif
1060 if (n & (1 << NETISR_IP2))
1061 ipintr();
1062#ifdef INET61
1063 if (n & (1 << NETISR_IPV624))
1064 ip6intr();
1065#endif
1066#if NPPP1 > 0
1067 if (n & (1 << NETISR_PPP28)) {
1068 KERNEL_LOCK()_kernel_lock();
1069 pppintr();
1070 KERNEL_UNLOCK()_kernel_unlock();
1071 }
1072#endif
1073#if NBRIDGE1 > 0
1074 if (n & (1 << NETISR_BRIDGE29))
1075 bridgeintr();
1076#endif
1077#ifdef PIPEX1
1078 if (n & (1 << NETISR_PIPEX27))
1079 pipexintr();
1080#endif
1081#if NPPPOE1 > 0
1082 if (n & (1 << NETISR_PPPOE30)) {
1083 KERNEL_LOCK()_kernel_lock();
1084 pppoeintr();
1085 KERNEL_UNLOCK()_kernel_unlock();
1086 }
1087#endif
1088 t |= n;
1089 }
1090
1091 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1092}
1093
1094void
1095if_hooks_run(struct task_list *hooks)
1096{
1097 struct task *t, *nt;
1098 struct task cursor = { .t_func = NULL((void *)0) };
1099 void (*func)(void *);
1100 void *arg;
1101
1102 mtx_enter(&if_hooks_mtx);
1103 for (t = TAILQ_FIRST(hooks)((hooks)->tqh_first); t != NULL((void *)0); t = nt) {
1104 if (t->t_func == NULL((void *)0)) { /* skip cursors */
1105 nt = TAILQ_NEXT(t, t_entry)((t)->t_entry.tqe_next);
1106 continue;
1107 }
1108 func = t->t_func;
1109 arg = t->t_arg;
1110
1111 TAILQ_INSERT_AFTER(hooks, t, &cursor, t_entry)do { if (((&cursor)->t_entry.tqe_next = (t)->t_entry
.tqe_next) != ((void *)0)) (&cursor)->t_entry.tqe_next
->t_entry.tqe_prev = &(&cursor)->t_entry.tqe_next
; else (hooks)->tqh_last = &(&cursor)->t_entry.
tqe_next; (t)->t_entry.tqe_next = (&cursor); (&cursor
)->t_entry.tqe_prev = &(t)->t_entry.tqe_next; } while
(0)
;
1112 mtx_leave(&if_hooks_mtx);
1113
1114 (*func)(arg);
1115
1116 mtx_enter(&if_hooks_mtx);
1117 nt = TAILQ_NEXT(&cursor, t_entry)((&cursor)->t_entry.tqe_next); /* avoid _Q_INVALIDATE */
1118 TAILQ_REMOVE(hooks, &cursor, t_entry)do { if (((&cursor)->t_entry.tqe_next) != ((void *)0))
(&cursor)->t_entry.tqe_next->t_entry.tqe_prev = (&
cursor)->t_entry.tqe_prev; else (hooks)->tqh_last = (&
cursor)->t_entry.tqe_prev; *(&cursor)->t_entry.tqe_prev
= (&cursor)->t_entry.tqe_next; ((&cursor)->t_entry
.tqe_prev) = ((void *)-1); ((&cursor)->t_entry.tqe_next
) = ((void *)-1); } while (0)
;
1119 }
1120 mtx_leave(&if_hooks_mtx);
1121}
1122
1123void
1124if_remove(struct ifnet *ifp)
1125{
1126 /* Remove the interface from the list of all interfaces. */
1127 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1128 TAILQ_REMOVE(&ifnetlist, ifp, if_list)do { if (((ifp)->if_list.tqe_next) != ((void *)0)) (ifp)->
if_list.tqe_next->if_list.tqe_prev = (ifp)->if_list.tqe_prev
; else (&ifnetlist)->tqh_last = (ifp)->if_list.tqe_prev
; *(ifp)->if_list.tqe_prev = (ifp)->if_list.tqe_next; (
(ifp)->if_list.tqe_prev) = ((void *)-1); ((ifp)->if_list
.tqe_next) = ((void *)-1); } while (0)
;
1129 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1130
1131 /* Remove the interface from the interface index map. */
1132 if_idxmap_remove(ifp);
1133
1134 /* Sleep until the last reference is released. */
1135 refcnt_finalize(&ifp->if_refcnt, "ifrm");
1136}
1137
1138void
1139if_deactivate(struct ifnet *ifp)
1140{
1141 /*
1142 * Call detach hooks from head to tail. To make sure detach
1143 * hooks are executed in the reverse order they were added, all
1144 * the hooks have to be added to the head!
1145 */
1146
1147 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1148 if_hooks_run(&ifp->if_detachhooks);
1149 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1150}
1151
1152void
1153if_detachhook_add(struct ifnet *ifp, struct task *t)
1154{
1155 mtx_enter(&if_hooks_mtx);
1156 TAILQ_INSERT_HEAD(&ifp->if_detachhooks, t, t_entry)do { if (((t)->t_entry.tqe_next = (&ifp->if_detachhooks
)->tqh_first) != ((void *)0)) (&ifp->if_detachhooks
)->tqh_first->t_entry.tqe_prev = &(t)->t_entry.tqe_next
; else (&ifp->if_detachhooks)->tqh_last = &(t)->
t_entry.tqe_next; (&ifp->if_detachhooks)->tqh_first
= (t); (t)->t_entry.tqe_prev = &(&ifp->if_detachhooks
)->tqh_first; } while (0)
;
1157 mtx_leave(&if_hooks_mtx);
1158}
1159
1160void
1161if_detachhook_del(struct ifnet *ifp, struct task *t)
1162{
1163 mtx_enter(&if_hooks_mtx);
1164 TAILQ_REMOVE(&ifp->if_detachhooks, t, t_entry)do { if (((t)->t_entry.tqe_next) != ((void *)0)) (t)->t_entry
.tqe_next->t_entry.tqe_prev = (t)->t_entry.tqe_prev; else
(&ifp->if_detachhooks)->tqh_last = (t)->t_entry
.tqe_prev; *(t)->t_entry.tqe_prev = (t)->t_entry.tqe_next
; ((t)->t_entry.tqe_prev) = ((void *)-1); ((t)->t_entry
.tqe_next) = ((void *)-1); } while (0)
;
1165 mtx_leave(&if_hooks_mtx);
1166}
1167
1168/*
1169 * Detach an interface from everything in the kernel. Also deallocate
1170 * private resources.
1171 */
1172void
1173if_detach(struct ifnet *ifp)
1174{
1175 struct ifaddr *ifa;
1176 struct ifg_list *ifg;
1177 int i, s;
1178
1179 /* Undo pseudo-driver changes. */
1180 if_deactivate(ifp);
1181
1182 /* Other CPUs must not have a reference before we start destroying. */
1183 if_remove(ifp);
1184
1185 ifp->if_qstart = if_detached_qstart;
1186
1187 /* Wait until the start routines finished. */
1188 ifq_barrier(&ifp->if_snd);
1189 ifq_clr_oactive(&ifp->if_snd);
1190
1191#if NBPFILTER1 > 0
1192 bpfdetach(ifp);
1193#endif
1194
1195 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1196 s = splnet()splraise(0x4);
1197 ifp->if_ioctl = if_detached_ioctl;
1198 ifp->if_watchdog = NULL((void *)0);
1199
1200 /* Remove the watchdog timeout & task */
1201 timeout_del(&ifp->if_slowtimo);
1202 task_del(net_tq(ifp->if_index), &ifp->if_watchdogtask);
1203
1204 /* Remove the link state task */
1205 task_del(net_tq(ifp->if_index), &ifp->if_linkstatetask);
1206
1207 rti_delete(ifp);
1208#if NETHER1 > 0 && defined(NFSCLIENT1)
1209 if (ifp->if_index == revarp_ifidx)
1210 revarp_ifidx = 0;
1211#endif
1212#ifdef MROUTING1
1213 vif_delete(ifp);
1214#endif
1215 in_ifdetach(ifp);
1216#ifdef INET61
1217 in6_ifdetach(ifp);
1218#endif
1219#if NPF1 > 0
1220 pfi_detach_ifnet(ifp);
1221#endif
1222
1223 while ((ifg = TAILQ_FIRST(&ifp->if_groups)((&ifp->if_groups)->tqh_first)) != NULL((void *)0))
1224 if_delgroup(ifp, ifg->ifgl_group->ifg_group);
1225
1226 if_free_sadl(ifp);
1227
1228 /* We should not have any address left at this point. */
1229 if (!TAILQ_EMPTY(&ifp->if_addrlist)(((&ifp->if_addrlist)->tqh_first) == ((void *)0))) {
1230#ifdef DIAGNOSTIC1
1231 printf("%s: address list non empty\n", ifp->if_xname);
1232#endif
1233 while ((ifa = TAILQ_FIRST(&ifp->if_addrlist)((&ifp->if_addrlist)->tqh_first)) != NULL((void *)0)) {
1234 ifa_del(ifp, ifa);
1235 ifa->ifa_ifp = NULL((void *)0);
1236 ifafree(ifa);
1237 }
1238 }
1239 splx(s)spllower(s);
1240 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1241
1242 KASSERT(TAILQ_EMPTY(&ifp->if_addrhooks))(((((&ifp->if_addrhooks)->tqh_first) == ((void *)0)
)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c"
, 1242, "TAILQ_EMPTY(&ifp->if_addrhooks)"))
;
1243 KASSERT(TAILQ_EMPTY(&ifp->if_linkstatehooks))(((((&ifp->if_linkstatehooks)->tqh_first) == ((void
*)0))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c"
, 1243, "TAILQ_EMPTY(&ifp->if_linkstatehooks)"))
;
1244 KASSERT(TAILQ_EMPTY(&ifp->if_detachhooks))(((((&ifp->if_detachhooks)->tqh_first) == ((void *)
0))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c"
, 1244, "TAILQ_EMPTY(&ifp->if_detachhooks)"))
;
1245
1246#ifdef INET61
1247 nd6_ifdetach(ifp);
1248#endif
1249
1250 /* Announce that the interface is gone. */
1251 rtm_ifannounce(ifp, IFAN_DEPARTURE1);
1252
1253 if (ifp->if_counters != NULL((void *)0))
1254 if_counters_free(ifp);
1255
1256 for (i = 0; i < ifp->if_nifqs; i++)
1257 ifq_destroy(ifp->if_ifqs[i]);
1258 if (ifp->if_ifqs != ifp->if_snd.ifq_ifqs_ifq_ptr._ifq_ifqs) {
1259 for (i = 1; i < ifp->if_nifqs; i++) {
1260 free(ifp->if_ifqs[i], M_DEVBUF2,
1261 sizeof(struct ifqueue));
1262 }
1263 free(ifp->if_ifqs, M_DEVBUF2,
1264 sizeof(struct ifqueue *) * ifp->if_nifqs);
1265 }
1266
1267 for (i = 0; i < ifp->if_niqs; i++)
1268 ifiq_destroy(ifp->if_iqs[i]);
1269 if (ifp->if_iqs != ifp->if_rcv.ifiq_ifiqs_ifiq_ptr._ifiq_ifiqs) {
1270 for (i = 1; i < ifp->if_niqs; i++) {
1271 free(ifp->if_iqs[i], M_DEVBUF2,
1272 sizeof(struct ifiqueue));
1273 }
1274 free(ifp->if_iqs, M_DEVBUF2,
1275 sizeof(struct ifiqueue *) * ifp->if_niqs);
1276 }
1277}
1278
1279/*
1280 * Returns true if ``ifp0'' is connected to the interface with index ``ifidx''.
1281 */
1282int
1283if_isconnected(const struct ifnet *ifp0, unsigned int ifidx)
1284{
1285 struct ifnet *ifp;
1286 int connected = 0;
1287
1288 ifp = if_get(ifidx);
1289 if (ifp == NULL((void *)0))
1290 return (0);
1291
1292 if (ifp0->if_index == ifp->if_index)
1293 connected = 1;
1294
1295#if NBRIDGE1 > 0
1296 if (ifp0->if_bridgeidx != 0 && ifp0->if_bridgeidx == ifp->if_bridgeidx)
1297 connected = 1;
1298#endif
1299#if NCARP1 > 0
1300 if ((ifp0->if_typeif_data.ifi_type == IFT_CARP0xf7 &&
1301 ifp0->if_carpdevidxif_carp_ptr.carp_idx == ifp->if_index) ||
1302 (ifp->if_typeif_data.ifi_type == IFT_CARP0xf7 && ifp->if_carpdevidxif_carp_ptr.carp_idx == ifp0->if_index))
1303 connected = 1;
1304#endif
1305
1306 if_put(ifp);
1307 return (connected);
1308}
1309
1310/*
1311 * Create a clone network interface.
1312 */
1313int
1314if_clone_create(const char *name, int rdomain)
1315{
1316 struct if_clone *ifc;
1317 struct ifnet *ifp;
1318 int unit, ret;
1319
1320 ifc = if_clone_lookup(name, &unit);
1321 if (ifc == NULL((void *)0))
1322 return (EINVAL22);
1323
1324 rw_enter_write(&if_cloners_lock);
1325
1326 if ((ifp = if_unit(name)) != NULL((void *)0)) {
1327 ret = EEXIST17;
1328 goto unlock;
1329 }
1330
1331 ret = (*ifc->ifc_create)(ifc, unit);
1332
1333 if (ret != 0 || (ifp = if_unit(name)) == NULL((void *)0))
1334 goto unlock;
1335
1336 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1337 if_addgroup(ifp, ifc->ifc_name);
1338 if (rdomain != 0)
1339 if_setrdomain(ifp, rdomain);
1340 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1341unlock:
1342 rw_exit_write(&if_cloners_lock);
1343 if_put(ifp);
1344
1345 return (ret);
1346}
1347
1348/*
1349 * Destroy a clone network interface.
1350 */
1351int
1352if_clone_destroy(const char *name)
1353{
1354 struct if_clone *ifc;
1355 struct ifnet *ifp;
1356 int ret;
1357
1358 ifc = if_clone_lookup(name, NULL((void *)0));
1359 if (ifc == NULL((void *)0))
1360 return (EINVAL22);
1361
1362 if (ifc->ifc_destroy == NULL((void *)0))
1363 return (EOPNOTSUPP45);
1364
1365 rw_enter_write(&if_cloners_lock);
1366
1367 TAILQ_FOREACH(ifp, &ifnetlist, if_list)for((ifp) = ((&ifnetlist)->tqh_first); (ifp) != ((void
*)0); (ifp) = ((ifp)->if_list.tqe_next))
{
1368 if (strcmp(ifp->if_xname, name) == 0)
1369 break;
1370 }
1371 if (ifp == NULL((void *)0)) {
1372 rw_exit_write(&if_cloners_lock);
1373 return (ENXIO6);
1374 }
1375
1376 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1377 if (ifp->if_flags & IFF_UP0x1) {
1378 int s;
1379 s = splnet()splraise(0x4);
1380 if_down(ifp);
1381 splx(s)spllower(s);
1382 }
1383 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1384 ret = (*ifc->ifc_destroy)(ifp);
1385
1386 rw_exit_write(&if_cloners_lock);
1387
1388 return (ret);
1389}
1390
1391/*
1392 * Look up a network interface cloner.
1393 */
1394struct if_clone *
1395if_clone_lookup(const char *name, int *unitp)
1396{
1397 struct if_clone *ifc;
1398 const char *cp;
1399 int unit;
1400
1401 /* separate interface name from unit */
1402 for (cp = name;
1403 cp - name < IFNAMSIZ16 && *cp && (*cp < '0' || *cp > '9');
1404 cp++)
1405 continue;
1406
1407 if (cp == name || cp - name == IFNAMSIZ16 || !*cp)
1408 return (NULL((void *)0)); /* No name or unit number */
1409
1410 if (cp - name < IFNAMSIZ16-1 && *cp == '0' && cp[1] != '\0')
1411 return (NULL((void *)0)); /* unit number 0 padded */
1412
1413 LIST_FOREACH(ifc, &if_cloners, ifc_list)for((ifc) = ((&if_cloners)->lh_first); (ifc)!= ((void *
)0); (ifc) = ((ifc)->ifc_list.le_next))
{
1414 if (strlen(ifc->ifc_name) == cp - name &&
1415 !strncmp(name, ifc->ifc_name, cp - name))
1416 break;
1417 }
1418
1419 if (ifc == NULL((void *)0))
1420 return (NULL((void *)0));
1421
1422 unit = 0;
1423 while (cp - name < IFNAMSIZ16 && *cp) {
1424 if (*cp < '0' || *cp > '9' ||
1425 unit > (INT_MAX0x7fffffff - (*cp - '0')) / 10) {
1426 /* Bogus unit number. */
1427 return (NULL((void *)0));
1428 }
1429 unit = (unit * 10) + (*cp++ - '0');
1430 }
1431
1432 if (unitp != NULL((void *)0))
1433 *unitp = unit;
1434 return (ifc);
1435}
1436
1437/*
1438 * Register a network interface cloner.
1439 */
1440void
1441if_clone_attach(struct if_clone *ifc)
1442{
1443 /*
1444 * we are called at kernel boot by main(), when pseudo devices are
1445 * being attached. The main() is the only guy which may alter the
1446 * if_cloners. While system is running and main() is done with
1447 * initialization, the if_cloners becomes immutable.
1448 */
1449 KASSERT(pdevinit_done == 0)((pdevinit_done == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c"
, 1449, "pdevinit_done == 0"))
;
1450 LIST_INSERT_HEAD(&if_cloners, ifc, ifc_list)do { if (((ifc)->ifc_list.le_next = (&if_cloners)->
lh_first) != ((void *)0)) (&if_cloners)->lh_first->
ifc_list.le_prev = &(ifc)->ifc_list.le_next; (&if_cloners
)->lh_first = (ifc); (ifc)->ifc_list.le_prev = &(&
if_cloners)->lh_first; } while (0)
;
1451 if_cloners_count++;
1452}
1453
1454/*
1455 * Provide list of interface cloners to userspace.
1456 */
1457int
1458if_clone_list(struct if_clonereq *ifcr)
1459{
1460 char outbuf[IFNAMSIZ16], *dst;
1461 struct if_clone *ifc;
1462 int count, error = 0;
1463
1464 if ((dst = ifcr->ifcr_buffer) == NULL((void *)0)) {
1465 /* Just asking how many there are. */
1466 ifcr->ifcr_total = if_cloners_count;
1467 return (0);
1468 }
1469
1470 if (ifcr->ifcr_count < 0)
1471 return (EINVAL22);
1472
1473 ifcr->ifcr_total = if_cloners_count;
1474 count = MIN(if_cloners_count, ifcr->ifcr_count)(((if_cloners_count)<(ifcr->ifcr_count))?(if_cloners_count
):(ifcr->ifcr_count))
;
1475
1476 LIST_FOREACH(ifc, &if_cloners, ifc_list)for((ifc) = ((&if_cloners)->lh_first); (ifc)!= ((void *
)0); (ifc) = ((ifc)->ifc_list.le_next))
{
1477 if (count == 0)
1478 break;
1479 bzero(outbuf, sizeof outbuf)__builtin_bzero((outbuf), (sizeof outbuf));
1480 strlcpy(outbuf, ifc->ifc_name, IFNAMSIZ16);
1481 error = copyout(outbuf, dst, IFNAMSIZ16);
1482 if (error)
1483 break;
1484 count--;
1485 dst += IFNAMSIZ16;
1486 }
1487
1488 return (error);
1489}
1490
1491/*
1492 * set queue congestion marker
1493 */
1494void
1495if_congestion(void)
1496{
1497 extern int ticks;
1498
1499 ifq_congestion = ticks;
1500}
1501
1502int
1503if_congested(void)
1504{
1505 extern int ticks;
1506 int diff;
1507
1508 diff = ticks - ifq_congestion;
1509 if (diff < 0) {
1510 ifq_congestion = ticks - hz;
1511 return (0);
1512 }
1513
1514 return (diff <= (hz / 100));
1515}
1516
1517#define equal(a1, a2)(bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0) \
1518 (bcmp((caddr_t)(a1), (caddr_t)(a2), \
1519 (a1)->sa_len) == 0)
1520
1521/*
1522 * Locate an interface based on a complete address.
1523 */
1524struct ifaddr *
1525ifa_ifwithaddr(const struct sockaddr *addr, u_int rtableid)
1526{
1527 struct ifnet *ifp;
1528 struct ifaddr *ifa;
1529 u_int rdomain;
1530
1531 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
1532
1533 rdomain = rtable_l2(rtableid);
1534 TAILQ_FOREACH(ifp, &ifnetlist, if_list)for((ifp) = ((&ifnetlist)->tqh_first); (ifp) != ((void
*)0); (ifp) = ((ifp)->if_list.tqe_next))
{
1535 if (ifp->if_rdomainif_data.ifi_rdomain != rdomain)
1536 continue;
1537
1538 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list)for((ifa) = ((&ifp->if_addrlist)->tqh_first); (ifa)
!= ((void *)0); (ifa) = ((ifa)->ifa_list.tqe_next))
{
1539 if (ifa->ifa_addr->sa_family != addr->sa_family)
1540 continue;
1541
1542 if (equal(addr, ifa->ifa_addr)(bcmp((caddr_t)(addr), (caddr_t)(ifa->ifa_addr), (addr)->
sa_len) == 0)
) {
1543 return (ifa);
1544 }
1545 }
1546 }
1547 return (NULL((void *)0));
1548}
1549
1550/*
1551 * Locate the point to point interface with a given destination address.
1552 */
1553struct ifaddr *
1554ifa_ifwithdstaddr(const struct sockaddr *addr, u_int rdomain)
1555{
1556 struct ifnet *ifp;
1557 struct ifaddr *ifa;
1558
1559 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
1560
1561 rdomain = rtable_l2(rdomain);
1562 TAILQ_FOREACH(ifp, &ifnetlist, if_list)for((ifp) = ((&ifnetlist)->tqh_first); (ifp) != ((void
*)0); (ifp) = ((ifp)->if_list.tqe_next))
{
1563 if (ifp->if_rdomainif_data.ifi_rdomain != rdomain)
1564 continue;
1565 if (ifp->if_flags & IFF_POINTOPOINT0x10) {
1566 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list)for((ifa) = ((&ifp->if_addrlist)->tqh_first); (ifa)
!= ((void *)0); (ifa) = ((ifa)->ifa_list.tqe_next))
{
1567 if (ifa->ifa_addr->sa_family !=
1568 addr->sa_family || ifa->ifa_dstaddr == NULL((void *)0))
1569 continue;
1570 if (equal(addr, ifa->ifa_dstaddr)(bcmp((caddr_t)(addr), (caddr_t)(ifa->ifa_dstaddr), (addr)
->sa_len) == 0)
) {
1571 return (ifa);
1572 }
1573 }
1574 }
1575 }
1576 return (NULL((void *)0));
1577}
1578
1579/*
1580 * Find an interface address specific to an interface best matching
1581 * a given address.
1582 */
1583struct ifaddr *
1584ifaof_ifpforaddr(const struct sockaddr *addr, struct ifnet *ifp)
1585{
1586 struct ifaddr *ifa;
1587 const char *cp, *cp2, *cp3;
1588 char *cplim;
1589 struct ifaddr *ifa_maybe = NULL((void *)0);
1590 u_int af = addr->sa_family;
1591
1592 if (af >= AF_MAX36)
1593 return (NULL((void *)0));
1594 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list)for((ifa) = ((&ifp->if_addrlist)->tqh_first); (ifa)
!= ((void *)0); (ifa) = ((ifa)->ifa_list.tqe_next))
{
1595 if (ifa->ifa_addr->sa_family != af)
1596 continue;
1597 if (ifa_maybe == NULL((void *)0))
1598 ifa_maybe = ifa;
1599 if (ifa->ifa_netmask == 0 || ifp->if_flags & IFF_POINTOPOINT0x10) {
1600 if (equal(addr, ifa->ifa_addr)(bcmp((caddr_t)(addr), (caddr_t)(ifa->ifa_addr), (addr)->
sa_len) == 0)
||
1601 (ifa->ifa_dstaddr && equal(addr, ifa->ifa_dstaddr)(bcmp((caddr_t)(addr), (caddr_t)(ifa->ifa_dstaddr), (addr)
->sa_len) == 0)
))
1602 return (ifa);
1603 continue;
1604 }
1605 cp = addr->sa_data;
1606 cp2 = ifa->ifa_addr->sa_data;
1607 cp3 = ifa->ifa_netmask->sa_data;
1608 cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask;
1609 for (; cp3 < cplim; cp3++)
1610 if ((*cp++ ^ *cp2++) & *cp3)
1611 break;
1612 if (cp3 == cplim)
1613 return (ifa);
1614 }
1615 return (ifa_maybe);
1616}
1617
1618void
1619if_rtrequest_dummy(struct ifnet *ifp, int req, struct rtentry *rt)
1620{
1621}
1622
1623/*
1624 * Default action when installing a local route on a point-to-point
1625 * interface.
1626 */
1627void
1628p2p_rtrequest(struct ifnet *ifp, int req, struct rtentry *rt)
1629{
1630 struct ifnet *lo0ifp;
1631 struct ifaddr *ifa, *lo0ifa;
1632
1633 switch (req) {
1634 case RTM_ADD0x1:
1635 if (!ISSET(rt->rt_flags, RTF_LOCAL)((rt->rt_flags) & (0x200000)))
1636 break;
1637
1638 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list)for((ifa) = ((&ifp->if_addrlist)->tqh_first); (ifa)
!= ((void *)0); (ifa) = ((ifa)->ifa_list.tqe_next))
{
1639 if (memcmp(rt_key(rt), ifa->ifa_addr,__builtin_memcmp((((rt)->rt_dest)), (ifa->ifa_addr), ((
(rt)->rt_dest)->sa_len))
1640 rt_key(rt)->sa_len)__builtin_memcmp((((rt)->rt_dest)), (ifa->ifa_addr), ((
(rt)->rt_dest)->sa_len))
== 0)
1641 break;
1642 }
1643
1644 if (ifa == NULL((void *)0))
1645 break;
1646
1647 KASSERT(ifa == rt->rt_ifa)((ifa == rt->rt_ifa) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c"
, 1647, "ifa == rt->rt_ifa"))
;
1648
1649 lo0ifp = if_get(rtable_loindex(ifp->if_rdomainif_data.ifi_rdomain));
1650 KASSERT(lo0ifp != NULL)((lo0ifp != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c"
, 1650, "lo0ifp != NULL"))
;
1651 TAILQ_FOREACH(lo0ifa, &lo0ifp->if_addrlist, ifa_list)for((lo0ifa) = ((&lo0ifp->if_addrlist)->tqh_first);
(lo0ifa) != ((void *)0); (lo0ifa) = ((lo0ifa)->ifa_list.tqe_next
))
{
1652 if (lo0ifa->ifa_addr->sa_family ==
1653 ifa->ifa_addr->sa_family)
1654 break;
1655 }
1656 if_put(lo0ifp);
1657
1658 if (lo0ifa == NULL((void *)0))
1659 break;
1660
1661 rt->rt_flags &= ~RTF_LLINFO0x400;
1662 break;
1663 case RTM_DELETE0x2:
1664 case RTM_RESOLVE0xb:
1665 default:
1666 break;
1667 }
1668}
1669
1670int
1671p2p_bpf_mtap(caddr_t if_bpf, const struct mbuf *m, u_int dir)
1672{
1673#if NBPFILTER1 > 0
1674 return (bpf_mtap_af(if_bpf, m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family, m, dir));
1675#else
1676 return (0);
1677#endif
1678}
1679
1680void
1681p2p_input(struct ifnet *ifp, struct mbuf *m)
1682{
1683 void (*input)(struct ifnet *, struct mbuf *);
1684
1685 switch (m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family) {
1686 case AF_INET2:
1687 input = ipv4_input;
1688 break;
1689#ifdef INET61
1690 case AF_INET624:
1691 input = ipv6_input;
1692 break;
1693#endif
1694#ifdef MPLS1
1695 case AF_MPLS33:
1696 input = mpls_input;
1697 break;
1698#endif
1699 default:
1700 m_freem(m);
1701 return;
1702 }
1703
1704 (*input)(ifp, m);
1705}
1706
1707/*
1708 * Bring down all interfaces
1709 */
1710void
1711if_downall(void)
1712{
1713 struct ifreq ifrq; /* XXX only partly built */
1714 struct ifnet *ifp;
1715
1716 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1717 TAILQ_FOREACH(ifp, &ifnetlist, if_list)for((ifp) = ((&ifnetlist)->tqh_first); (ifp) != ((void
*)0); (ifp) = ((ifp)->if_list.tqe_next))
{
1718 if ((ifp->if_flags & IFF_UP0x1) == 0)
1719 continue;
1720 if_down(ifp);
1721 ifrq.ifr_flagsifr_ifru.ifru_flags = ifp->if_flags;
1722 (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
, (caddr_t)&ifrq);
1723 }
1724 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1725}
1726
1727/*
1728 * Mark an interface down and notify protocols of
1729 * the transition.
1730 */
1731void
1732if_down(struct ifnet *ifp)
1733{
1734 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
1735
1736 ifp->if_flags &= ~IFF_UP0x1;
1737 getmicrotime(&ifp->if_lastchangeif_data.ifi_lastchange);
1738 ifq_purge(&ifp->if_snd);
1739
1740 if_linkstate(ifp);
1741}
1742
1743/*
1744 * Mark an interface up and notify protocols of
1745 * the transition.
1746 */
1747void
1748if_up(struct ifnet *ifp)
1749{
1750 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
1751
1752 ifp->if_flags |= IFF_UP0x1;
1753 getmicrotime(&ifp->if_lastchangeif_data.ifi_lastchange);
1754
1755#ifdef INET61
1756 /* Userland expects the kernel to set ::1 on default lo(4). */
1757 if (ifp->if_index == rtable_loindex(ifp->if_rdomainif_data.ifi_rdomain))
1758 in6_ifattach(ifp);
1759#endif
1760
1761 if_linkstate(ifp);
1762}
1763
1764/*
1765 * Notify userland, the routing table and hooks owner of
1766 * a link-state transition.
1767 */
1768void
1769if_linkstate_task(void *xifidx)
1770{
1771 unsigned int ifidx = (unsigned long)xifidx;
1772 struct ifnet *ifp;
1773
1774 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1775 KERNEL_LOCK()_kernel_lock();
1776
1777 ifp = if_get(ifidx);
1778 if (ifp != NULL((void *)0))
1779 if_linkstate(ifp);
1780 if_put(ifp);
1781
1782 KERNEL_UNLOCK()_kernel_unlock();
1783 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1784}
1785
1786void
1787if_linkstate(struct ifnet *ifp)
1788{
1789 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
1790
1791 rtm_ifchg(ifp);
1792 rt_if_track(ifp);
1793
1794 if_hooks_run(&ifp->if_linkstatehooks);
1795}
1796
1797void
1798if_linkstatehook_add(struct ifnet *ifp, struct task *t)
1799{
1800 mtx_enter(&if_hooks_mtx);
1801 TAILQ_INSERT_HEAD(&ifp->if_linkstatehooks, t, t_entry)do { if (((t)->t_entry.tqe_next = (&ifp->if_linkstatehooks
)->tqh_first) != ((void *)0)) (&ifp->if_linkstatehooks
)->tqh_first->t_entry.tqe_prev = &(t)->t_entry.tqe_next
; else (&ifp->if_linkstatehooks)->tqh_last = &(
t)->t_entry.tqe_next; (&ifp->if_linkstatehooks)->
tqh_first = (t); (t)->t_entry.tqe_prev = &(&ifp->
if_linkstatehooks)->tqh_first; } while (0)
;
1802 mtx_leave(&if_hooks_mtx);
1803}
1804
1805void
1806if_linkstatehook_del(struct ifnet *ifp, struct task *t)
1807{
1808 mtx_enter(&if_hooks_mtx);
1809 TAILQ_REMOVE(&ifp->if_linkstatehooks, t, t_entry)do { if (((t)->t_entry.tqe_next) != ((void *)0)) (t)->t_entry
.tqe_next->t_entry.tqe_prev = (t)->t_entry.tqe_prev; else
(&ifp->if_linkstatehooks)->tqh_last = (t)->t_entry
.tqe_prev; *(t)->t_entry.tqe_prev = (t)->t_entry.tqe_next
; ((t)->t_entry.tqe_prev) = ((void *)-1); ((t)->t_entry
.tqe_next) = ((void *)-1); } while (0)
;
1810 mtx_leave(&if_hooks_mtx);
1811}
1812
1813/*
1814 * Schedule a link state change task.
1815 */
1816void
1817if_link_state_change(struct ifnet *ifp)
1818{
1819 task_add(net_tq(ifp->if_index), &ifp->if_linkstatetask);
1820}
1821
1822/*
1823 * Handle interface watchdog timer routine. Called
1824 * from softclock, we decrement timer (if set) and
1825 * call the appropriate interface routine on expiration.
1826 */
1827void
1828if_slowtimo(void *arg)
1829{
1830 struct ifnet *ifp = arg;
1831 int s = splnet()splraise(0x4);
1832
1833 if (ifp->if_watchdog) {
1834 if (ifp->if_timer > 0 && --ifp->if_timer == 0)
1835 task_add(net_tq(ifp->if_index), &ifp->if_watchdogtask);
1836 timeout_add_sec(&ifp->if_slowtimo, IFNET_SLOWTIMO1);
1837 }
1838 splx(s)spllower(s);
1839}
1840
1841void
1842if_watchdog_task(void *xifidx)
1843{
1844 unsigned int ifidx = (unsigned long)xifidx;
1845 struct ifnet *ifp;
1846 int s;
1847
1848 ifp = if_get(ifidx);
1849 if (ifp == NULL((void *)0))
1850 return;
1851
1852 KERNEL_LOCK()_kernel_lock();
1853 s = splnet()splraise(0x4);
1854 if (ifp->if_watchdog)
1855 (*ifp->if_watchdog)(ifp);
1856 splx(s)spllower(s);
1857 KERNEL_UNLOCK()_kernel_unlock();
1858
1859 if_put(ifp);
1860}
1861
1862/*
1863 * Map interface name to interface structure pointer.
1864 */
1865struct ifnet *
1866if_unit(const char *name)
1867{
1868 struct ifnet *ifp;
1869
1870 KERNEL_ASSERT_LOCKED()((_kernel_lock_held()) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c"
, 1870, "_kernel_lock_held()"))
;
1871
1872 TAILQ_FOREACH(ifp, &ifnetlist, if_list)for((ifp) = ((&ifnetlist)->tqh_first); (ifp) != ((void
*)0); (ifp) = ((ifp)->if_list.tqe_next))
{
1873 if (strcmp(ifp->if_xname, name) == 0) {
1874 if_ref(ifp);
1875 return (ifp);
1876 }
1877 }
1878
1879 return (NULL((void *)0));
1880}
1881
1882/*
1883 * Map interface index to interface structure pointer.
1884 */
1885struct ifnet *
1886if_get(unsigned int index)
1887{
1888 struct ifnet **if_map;
1889 struct ifnet *ifp = NULL((void *)0);
1890
1891 if (index == 0)
1892 return (NULL((void *)0));
1893
1894 smr_read_enter();
1895 if_map = SMR_PTR_GET(&if_idxmap.map)({ typeof(*&if_idxmap.map) __tmp = *(volatile typeof(*&
if_idxmap.map) *)&(*&if_idxmap.map); membar_datadep_consumer
(); __tmp; })
;
1896 if (index < if_idxmap_limit(if_map)) {
1897 ifp = SMR_PTR_GET(&if_map[index])({ typeof(*&if_map[index]) __tmp = *(volatile typeof(*&
if_map[index]) *)&(*&if_map[index]); membar_datadep_consumer
(); __tmp; })
;
1898 if (ifp != NULL((void *)0)) {
1899 KASSERT(ifp->if_index == index)((ifp->if_index == index) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/if.c", 1899, "ifp->if_index == index")
)
;
1900 if_ref(ifp);
1901 }
1902 }
1903 smr_read_leave();
1904
1905 return (ifp);
1906}
1907
1908struct ifnet *
1909if_ref(struct ifnet *ifp)
1910{
1911 refcnt_take(&ifp->if_refcnt);
1912
1913 return (ifp);
1914}
1915
1916void
1917if_put(struct ifnet *ifp)
1918{
1919 if (ifp == NULL((void *)0))
1920 return;
1921
1922 refcnt_rele_wake(&ifp->if_refcnt);
1923}
1924
1925int
1926if_setlladdr(struct ifnet *ifp, const uint8_t *lladdr)
1927{
1928 if (ifp->if_sadl == NULL((void *)0))
1929 return (EINVAL22);
1930
1931 memcpy(((struct arpcom *)ifp)->ac_enaddr, lladdr, ETHER_ADDR_LEN)__builtin_memcpy((((struct arpcom *)ifp)->ac_enaddr), (lladdr
), (6))
;
1932 memcpy(LLADDR(ifp->if_sadl), lladdr, ETHER_ADDR_LEN)__builtin_memcpy((((caddr_t)((ifp->if_sadl)->sdl_data +
(ifp->if_sadl)->sdl_nlen))), (lladdr), (6))
;
1933
1934 return (0);
1935}
1936
1937int
1938if_createrdomain(int rdomain, struct ifnet *ifp)
1939{
1940 int error;
1941 struct ifnet *loifp;
1942 char loifname[IFNAMSIZ16];
1943 unsigned int unit = rdomain;
1944
1945 if ((error = rtable_add(rdomain)) != 0)
1946 return (error);
1947 if (!rtable_empty(rdomain))
1948 return (EEXIST17);
1949
1950 /* Create rdomain including its loopback if with unit == rdomain */
1951 snprintf(loifname, sizeof(loifname), "lo%u", unit);
1952 error = if_clone_create(loifname, 0);
1953 if ((loifp = if_unit(loifname)) == NULL((void *)0))
1954 return (ENXIO6);
1955 if (error && (ifp != loifp || error != EEXIST17)) {
1956 if_put(loifp);
1957 return (error);
1958 }
1959
1960 rtable_l2set(rdomain, rdomain, loifp->if_index);
1961 loifp->if_rdomainif_data.ifi_rdomain = rdomain;
1962 if_put(loifp);
1963
1964 return (0);
1965}
1966
1967int
1968if_setrdomain(struct ifnet *ifp, int rdomain)
1969{
1970 struct ifreq ifr;
1971 int error, up = 0, s;
1972
1973 if (rdomain < 0 || rdomain > RT_TABLEID_MAX255)
1974 return (EINVAL22);
1975
1976 if (rdomain != ifp->if_rdomainif_data.ifi_rdomain &&
1977 (ifp->if_flags & IFF_LOOPBACK0x8) &&
1978 (ifp->if_index == rtable_loindex(ifp->if_rdomainif_data.ifi_rdomain)))
1979 return (EPERM1);
1980
1981 if (!rtable_exists(rdomain))
1982 return (ESRCH3);
1983
1984 /* make sure that the routing table is a real rdomain */
1985 if (rdomain != rtable_l2(rdomain))
1986 return (EINVAL22);
1987
1988 if (rdomain != ifp->if_rdomainif_data.ifi_rdomain) {
1989 s = splnet()splraise(0x4);
1990 /*
1991 * We are tearing down the world.
1992 * Take down the IF so:
1993 * 1. everything that cares gets a message
1994 * 2. the automagic IPv6 bits are recreated
1995 */
1996 if (ifp->if_flags & IFF_UP0x1) {
1997 up = 1;
1998 if_down(ifp);
1999 }
2000 rti_delete(ifp);
2001#ifdef MROUTING1
2002 vif_delete(ifp);
2003#endif
2004 in_ifdetach(ifp);
2005#ifdef INET61
2006 in6_ifdetach(ifp);
2007#endif
2008 splx(s)spllower(s);
2009 }
2010
2011 /* Let devices like enc(4) or mpe(4) know about the change */
2012 ifr.ifr_rdomainidifr_ifru.ifru_metric = rdomain;
2013 if ((error = (*ifp->if_ioctl)(ifp, SIOCSIFRDOMAIN((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((159)))
,
2014 (caddr_t)&ifr)) != ENOTTY25)
2015 return (error);
2016 error = 0;
Value stored to 'error' is never read
2017
2018 /* Add interface to the specified rdomain */
2019 ifp->if_rdomainif_data.ifi_rdomain = rdomain;
2020
2021 /* If we took down the IF, bring it back */
2022 if (up) {
2023 s = splnet()splraise(0x4);
2024 if_up(ifp);
2025 splx(s)spllower(s);
2026 }
2027
2028 return (0);
2029}
2030
2031/*
2032 * Interface ioctls.
2033 */
2034int
2035ifioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p)
2036{
2037 struct ifnet *ifp;
2038 struct ifreq *ifr = (struct ifreq *)data;
2039 struct ifgroupreq *ifgr = (struct ifgroupreq *)data;
2040 struct if_afreq *ifar = (struct if_afreq *)data;
2041 char ifdescrbuf[IFDESCRSIZE64];
2042 char ifrtlabelbuf[RTLABEL_LEN32];
2043 int s, error = 0, oif_xflags;
2044 size_t bytesdone;
2045 unsigned short oif_flags;
2046
2047 switch (cmd) {
2048 case SIOCIFCREATE((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((122)))
:
2049 if ((error = suser(p)) != 0)
2050 return (error);
2051 KERNEL_LOCK()_kernel_lock();
2052 error = if_clone_create(ifr->ifr_name, 0);
2053 KERNEL_UNLOCK()_kernel_unlock();
2054 return (error);
2055 case SIOCIFDESTROY((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((121)))
:
2056 if ((error = suser(p)) != 0)
2057 return (error);
2058 KERNEL_LOCK()_kernel_lock();
2059 error = if_clone_destroy(ifr->ifr_name);
2060 KERNEL_UNLOCK()_kernel_unlock();
2061 return (error);
2062 case SIOCSIFGATTR((unsigned long)0x80000000 | ((sizeof(struct ifgroupreq) &
0x1fff) << 16) | ((('i')) << 8) | ((140)))
:
2063 if ((error = suser(p)) != 0)
2064 return (error);
2065 KERNEL_LOCK()_kernel_lock();
2066 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2067 error = if_setgroupattribs(data);
2068 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2069 KERNEL_UNLOCK()_kernel_unlock();
2070 return (error);
2071 case SIOCGIFCONF(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifconf) & 0x1fff) << 16) | ((('i')) <<
8) | ((36)))
:
2072 case SIOCIFGCLONERS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct if_clonereq) & 0x1fff) << 16) | ((('i')) <<
8) | ((120)))
:
2073 case SIOCGIFGMEMB(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifgroupreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((138)))
:
2074 case SIOCGIFGATTR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifgroupreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((139)))
:
2075 case SIOCGIFGLIST(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifgroupreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((141)))
:
2076 case SIOCGIFFLAGS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((17)))
:
2077 case SIOCGIFXFLAGS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((158)))
:
2078 case SIOCGIFMETRIC(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((23)))
:
2079 case SIOCGIFMTU(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((126)))
:
2080 case SIOCGIFHARDMTU(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((165)))
:
2081 case SIOCGIFDATA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((27)))
:
2082 case SIOCGIFDESCR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((129)))
:
2083 case SIOCGIFRTLABEL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((131)))
:
2084 case SIOCGIFPRIORITY(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((156)))
:
2085 case SIOCGIFRDOMAIN(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((160)))
:
2086 case SIOCGIFGROUP(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifgroupreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((136)))
:
2087 case SIOCGIFLLPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((182)))
:
2088 error = ifioctl_get(cmd, data);
2089 return (error);
2090 }
2091
2092 KERNEL_LOCK()_kernel_lock();
2093
2094 ifp = if_unit(ifr->ifr_name);
2095 if (ifp == NULL((void *)0)) {
2096 KERNEL_UNLOCK()_kernel_unlock();
2097 return (ENXIO6);
2098 }
2099 oif_flags = ifp->if_flags;
2100 oif_xflags = ifp->if_xflags;
2101
2102 switch (cmd) {
2103 case SIOCIFAFATTACH((unsigned long)0x80000000 | ((sizeof(struct if_afreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((171)))
:
2104 case SIOCIFAFDETACH((unsigned long)0x80000000 | ((sizeof(struct if_afreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((172)))
:
2105 if ((error = suser(p)) != 0)
2106 break;
2107 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2108 switch (ifar->ifar_af) {
2109 case AF_INET2:
2110 /* attach is a noop for AF_INET */
2111 if (cmd == SIOCIFAFDETACH((unsigned long)0x80000000 | ((sizeof(struct if_afreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((172)))
)
2112 in_ifdetach(ifp);
2113 break;
2114#ifdef INET61
2115 case AF_INET624:
2116 if (cmd == SIOCIFAFATTACH((unsigned long)0x80000000 | ((sizeof(struct if_afreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((171)))
)
2117 error = in6_ifattach(ifp);
2118 else
2119 in6_ifdetach(ifp);
2120 break;
2121#endif /* INET6 */
2122 default:
2123 error = EAFNOSUPPORT47;
2124 }
2125 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2126 break;
2127
2128 case SIOCSIFXFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((157)))
:
2129 if ((error = suser(p)) != 0)
2130 break;
2131
2132 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2133#ifdef INET61
2134 if ((ISSET(ifr->ifr_flags, IFXF_AUTOCONF6)((ifr->ifr_ifru.ifru_flags) & (0x20)) ||
2135 ISSET(ifr->ifr_flags, IFXF_AUTOCONF6TEMP)((ifr->ifr_ifru.ifru_flags) & (0x4))) &&
2136 !ISSET(ifp->if_xflags, IFXF_AUTOCONF6)((ifp->if_xflags) & (0x20)) &&
2137 !ISSET(ifp->if_xflags, IFXF_AUTOCONF6TEMP)((ifp->if_xflags) & (0x4))) {
2138 error = in6_ifattach(ifp);
2139 if (error != 0) {
2140 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2141 break;
2142 }
2143 }
2144
2145 if (ISSET(ifr->ifr_flags, IFXF_INET6_NOSOII)((ifr->ifr_ifru.ifru_flags) & (0x40)) &&
2146 !ISSET(ifp->if_xflags, IFXF_INET6_NOSOII)((ifp->if_xflags) & (0x40)))
2147 ifp->if_xflags |= IFXF_INET6_NOSOII0x40;
2148
2149 if (!ISSET(ifr->ifr_flags, IFXF_INET6_NOSOII)((ifr->ifr_ifru.ifru_flags) & (0x40)) &&
2150 ISSET(ifp->if_xflags, IFXF_INET6_NOSOII)((ifp->if_xflags) & (0x40)))
2151 ifp->if_xflags &= ~IFXF_INET6_NOSOII0x40;
2152
2153#endif /* INET6 */
2154
2155#ifdef MPLS1
2156 if (ISSET(ifr->ifr_flags, IFXF_MPLS)((ifr->ifr_ifru.ifru_flags) & (0x8)) &&
2157 !ISSET(ifp->if_xflags, IFXF_MPLS)((ifp->if_xflags) & (0x8))) {
2158 s = splnet()splraise(0x4);
2159 ifp->if_xflags |= IFXF_MPLS0x8;
2160 ifp->if_ll_output = ifp->if_output;
2161 ifp->if_output = mpls_output;
2162 splx(s)spllower(s);
2163 }
2164 if (ISSET(ifp->if_xflags, IFXF_MPLS)((ifp->if_xflags) & (0x8)) &&
2165 !ISSET(ifr->ifr_flags, IFXF_MPLS)((ifr->ifr_ifru.ifru_flags) & (0x8))) {
2166 s = splnet()splraise(0x4);
2167 ifp->if_xflags &= ~IFXF_MPLS0x8;
2168 ifp->if_output = ifp->if_ll_output;
2169 ifp->if_ll_output = NULL((void *)0);
2170 splx(s)spllower(s);
2171 }
2172#endif /* MPLS */
2173
2174#ifndef SMALL_KERNEL
2175 if (ifp->if_capabilitiesif_data.ifi_capabilities & IFCAP_WOL0x00008000) {
2176 if (ISSET(ifr->ifr_flags, IFXF_WOL)((ifr->ifr_ifru.ifru_flags) & (0x10)) &&
2177 !ISSET(ifp->if_xflags, IFXF_WOL)((ifp->if_xflags) & (0x10))) {
2178 s = splnet()splraise(0x4);
2179 ifp->if_xflags |= IFXF_WOL0x10;
2180 error = ifp->if_wol(ifp, 1);
2181 splx(s)spllower(s);
2182 }
2183 if (ISSET(ifp->if_xflags, IFXF_WOL)((ifp->if_xflags) & (0x10)) &&
2184 !ISSET(ifr->ifr_flags, IFXF_WOL)((ifr->ifr_ifru.ifru_flags) & (0x10))) {
2185 s = splnet()splraise(0x4);
2186 ifp->if_xflags &= ~IFXF_WOL0x10;
2187 error = ifp->if_wol(ifp, 0);
2188 splx(s)spllower(s);
2189 }
2190 } else if (ISSET(ifr->ifr_flags, IFXF_WOL)((ifr->ifr_ifru.ifru_flags) & (0x10))) {
2191 ifr->ifr_flagsifr_ifru.ifru_flags &= ~IFXF_WOL0x10;
2192 error = ENOTSUP91;
2193 }
2194#endif
2195 if (ISSET(ifr->ifr_flags, IFXF_LRO)((ifr->ifr_ifru.ifru_flags) & (0x200)) !=
2196 ISSET(ifp->if_xflags, IFXF_LRO)((ifp->if_xflags) & (0x200)))
2197 error = ifsetlro(ifp, ISSET(ifr->ifr_flags, IFXF_LRO)((ifr->ifr_ifru.ifru_flags) & (0x200)));
2198
2199 if (error == 0)
2200 ifp->if_xflags = (ifp->if_xflags & IFXF_CANTCHANGE(0x1|0x2)) |
2201 (ifr->ifr_flagsifr_ifru.ifru_flags & ~IFXF_CANTCHANGE(0x1|0x2));
2202
2203 if (!ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1)) &&
2204 ((!ISSET(oif_xflags, IFXF_AUTOCONF4)((oif_xflags) & (0x80)) &&
2205 ISSET(ifp->if_xflags, IFXF_AUTOCONF4)((ifp->if_xflags) & (0x80))) ||
2206 (!ISSET(oif_xflags, IFXF_AUTOCONF6)((oif_xflags) & (0x20)) &&
2207 ISSET(ifp->if_xflags, IFXF_AUTOCONF6)((ifp->if_xflags) & (0x20))) ||
2208 (!ISSET(oif_xflags, IFXF_AUTOCONF6TEMP)((oif_xflags) & (0x4)) &&
2209 ISSET(ifp->if_xflags, IFXF_AUTOCONF6TEMP)((ifp->if_xflags) & (0x4))))) {
2210 ifr->ifr_flagsifr_ifru.ifru_flags = ifp->if_flags | IFF_UP0x1;
2211 goto forceup;
2212 }
2213
2214 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2215 break;
2216
2217 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
2218 if ((error = suser(p)) != 0)
2219 break;
2220
2221 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2222forceup:
2223 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE(0x2|0x10|0x40|0x400| 0x800|0x8000|0x200)) |
2224 (ifr->ifr_flagsifr_ifru.ifru_flags & ~IFF_CANTCHANGE(0x2|0x10|0x40|0x400| 0x800|0x8000|0x200));
2225 error = (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
, data);
2226 if (error != 0) {
2227 ifp->if_flags = oif_flags;
2228 if (cmd == SIOCSIFXFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((157)))
)
2229 ifp->if_xflags = oif_xflags;
2230 } else if (ISSET(oif_flags ^ ifp->if_flags, IFF_UP)((oif_flags ^ ifp->if_flags) & (0x1))) {
2231 s = splnet()splraise(0x4);
2232 if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1)))
2233 if_up(ifp);
2234 else
2235 if_down(ifp);
2236 splx(s)spllower(s);
2237 }
2238 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2239 break;
2240
2241 case SIOCSIFMETRIC((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((24)))
:
2242 if ((error = suser(p)) != 0)
2243 break;
2244 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2245 ifp->if_metricif_data.ifi_metric = ifr->ifr_metricifr_ifru.ifru_metric;
2246 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2247 break;
2248
2249 case SIOCSIFMTU((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((127)))
:
2250 if ((error = suser(p)) != 0)
2251 break;
2252 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2253 error = (*ifp->if_ioctl)(ifp, cmd, data);
2254 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2255 if (error == 0)
2256 rtm_ifchg(ifp);
2257 break;
2258
2259 case SIOCSIFDESCR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((128)))
:
2260 if ((error = suser(p)) != 0)
2261 break;
2262 error = copyinstr(ifr->ifr_dataifr_ifru.ifru_data, ifdescrbuf,
2263 IFDESCRSIZE64, &bytesdone);
2264 if (error == 0) {
2265 (void)memset(ifp->if_description, 0, IFDESCRSIZE)__builtin_memset((ifp->if_description), (0), (64));
2266 strlcpy(ifp->if_description, ifdescrbuf, IFDESCRSIZE64);
2267 }
2268 break;
2269
2270 case SIOCSIFRTLABEL((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((130)))
:
2271 if ((error = suser(p)) != 0)
2272 break;
2273 error = copyinstr(ifr->ifr_dataifr_ifru.ifru_data, ifrtlabelbuf,
2274 RTLABEL_LEN32, &bytesdone);
2275 if (error == 0) {
2276 rtlabel_unref(ifp->if_rtlabelid);
2277 ifp->if_rtlabelid = rtlabel_name2id(ifrtlabelbuf);
2278 }
2279 break;
2280
2281 case SIOCSIFPRIORITY((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((155)))
:
2282 if ((error = suser(p)) != 0)
2283 break;
2284 if (ifr->ifr_metricifr_ifru.ifru_metric < 0 || ifr->ifr_metricifr_ifru.ifru_metric > 15) {
2285 error = EINVAL22;
2286 break;
2287 }
2288 ifp->if_priority = ifr->ifr_metricifr_ifru.ifru_metric;
2289 break;
2290
2291 case SIOCSIFRDOMAIN((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((159)))
:
2292 if ((error = suser(p)) != 0)
2293 break;
2294 error = if_createrdomain(ifr->ifr_rdomainidifr_ifru.ifru_metric, ifp);
2295 if (!error || error == EEXIST17) {
2296 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2297 error = if_setrdomain(ifp, ifr->ifr_rdomainidifr_ifru.ifru_metric);
2298 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2299 }
2300 break;
2301
2302 case SIOCAIFGROUP((unsigned long)0x80000000 | ((sizeof(struct ifgroupreq) &
0x1fff) << 16) | ((('i')) << 8) | ((135)))
:
2303 if ((error = suser(p)))
2304 break;
2305 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2306 error = if_addgroup(ifp, ifgr->ifgr_groupifgr_ifgru.ifgru_group);
2307 if (error == 0) {
2308 error = (*ifp->if_ioctl)(ifp, cmd, data);
2309 if (error == ENOTTY25)
2310 error = 0;
2311 }
2312 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2313 break;
2314
2315 case SIOCDIFGROUP((unsigned long)0x80000000 | ((sizeof(struct ifgroupreq) &
0x1fff) << 16) | ((('i')) << 8) | ((137)))
:
2316 if ((error = suser(p)))
2317 break;
2318 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2319 error = (*ifp->if_ioctl)(ifp, cmd, data);
2320 if (error == ENOTTY25)
2321 error = 0;
2322 if (error == 0)
2323 error = if_delgroup(ifp, ifgr->ifgr_groupifgr_ifgru.ifgru_group);
2324 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2325 break;
2326
2327 case SIOCSIFLLADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((31)))
:
2328 if ((error = suser(p)))
2329 break;
2330 if ((ifp->if_sadl == NULL((void *)0)) ||
2331 (ifr->ifr_addrifr_ifru.ifru_addr.sa_len != ETHER_ADDR_LEN6) ||
2332 (ETHER_IS_MULTICAST(ifr->ifr_addr.sa_data)(*(ifr->ifr_ifru.ifru_addr.sa_data) & 0x01))) {
2333 error = EINVAL22;
2334 break;
2335 }
2336 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2337 switch (ifp->if_typeif_data.ifi_type) {
2338 case IFT_ETHER0x06:
2339 case IFT_CARP0xf7:
2340 case IFT_XETHER0x1a:
2341 case IFT_ISO880250x09:
2342 error = (*ifp->if_ioctl)(ifp, cmd, data);
2343 if (error == ENOTTY25)
2344 error = 0;
2345 if (error == 0)
2346 error = if_setlladdr(ifp,
2347 ifr->ifr_addrifr_ifru.ifru_addr.sa_data);
2348 break;
2349 default:
2350 error = ENODEV19;
2351 }
2352
2353 if (error == 0)
2354 ifnewlladdr(ifp);
2355 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2356 if (error == 0)
2357 rtm_ifchg(ifp);
2358 break;
2359
2360 case SIOCSIFLLPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((181)))
:
2361 if ((error = suser(p)))
2362 break;
2363 if (ifr->ifr_llprioifr_ifru.ifru_metric < IFQ_MINPRIO0 ||
2364 ifr->ifr_llprioifr_ifru.ifru_metric > IFQ_MAXPRIO8 - 1) {
2365 error = EINVAL22;
2366 break;
2367 }
2368 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2369 ifp->if_llprio = ifr->ifr_llprioifr_ifru.ifru_metric;
2370 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2371 break;
2372
2373 case SIOCGIFSFFPAGE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct if_sffpage) & 0x1fff) << 16) | ((('i')) <<
8) | ((57)))
:
2374 error = suser(p);
2375 if (error != 0)
2376 break;
2377
2378 error = if_sffpage_check(data);
2379 if (error != 0)
2380 break;
2381
2382 /* don't take NET_LOCK because i2c reads take a long time */
2383 error = ((*ifp->if_ioctl)(ifp, cmd, data));
2384 break;
2385
2386 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
2387 if ((error = suser(p)) != 0)
2388 break;
2389 /* FALLTHROUGH */
2390 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
2391 /* net lock is not needed */
2392 error = ((*ifp->if_ioctl)(ifp, cmd, data));
2393 break;
2394
2395 case SIOCSETKALIVE((unsigned long)0x80000000 | ((sizeof(struct ifkalivereq) &
0x1fff) << 16) | ((('i')) << 8) | ((163)))
:
2396 case SIOCDIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((73)))
:
2397 case SIOCSLIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct if_laddrreq) &
0x1fff) << 16) | ((('i')) << 8) | ((74)))
:
2398 case SIOCSLIFPHYRTABLE((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((161)))
:
2399 case SIOCSLIFPHYTTL((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((168)))
:
2400 case SIOCSLIFPHYDF((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((193)))
:
2401 case SIOCSLIFPHYECN((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((199)))
:
2402 case SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((49)))
:
2403 case SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((50)))
:
2404 case SIOCSVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((166)))
:
2405 case SIOCDVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((175)))
:
2406 case SIOCSVNETFLOWID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((195)))
:
2407 case SIOCSTXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((197)))
:
2408 case SIOCSRXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((219)))
:
2409 case SIOCSIFPAIR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((176)))
:
2410 case SIOCSIFPARENT((unsigned long)0x80000000 | ((sizeof(struct if_parent) &
0x1fff) << 16) | ((('i')) << 8) | ((178)))
:
2411 case SIOCDIFPARENT((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((180)))
:
2412 case SIOCSETMPWCFG((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((173)))
:
2413 case SIOCSETLABEL((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((153)))
:
2414 case SIOCDELLABEL((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((151)))
:
2415 case SIOCSPWE3CTRLWORD((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((220)))
:
2416 case SIOCSPWE3FAT((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((221)))
:
2417 case SIOCSPWE3NEIGHBOR((unsigned long)0x80000000 | ((sizeof(struct if_laddrreq) &
0x1fff) << 16) | ((('i')) << 8) | ((222)))
:
2418 case SIOCDPWE3NEIGHBOR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((222)))
:
2419#if NBRIDGE1 > 0
2420 case SIOCBRDGADD((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((60)))
:
2421 case SIOCBRDGDEL((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((61)))
:
2422 case SIOCBRDGSIFFLGS((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((63)))
:
2423 case SIOCBRDGSCACHE((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) &
0x1fff) << 16) | ((('i')) << 8) | ((64)))
:
2424 case SIOCBRDGADDS((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((65)))
:
2425 case SIOCBRDGDELS((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((66)))
:
2426 case SIOCBRDGSADDR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifbareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((68)))
:
2427 case SIOCBRDGSTO((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) &
0x1fff) << 16) | ((('i')) << 8) | ((69)))
:
2428 case SIOCBRDGDADDR((unsigned long)0x80000000 | ((sizeof(struct ifbareq) & 0x1fff
) << 16) | ((('i')) << 8) | ((71)))
:
2429 case SIOCBRDGFLUSH((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((72)))
:
2430 case SIOCBRDGADDL((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((73)))
:
2431 case SIOCBRDGSIFPROT((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((74)))
:
2432 case SIOCBRDGARL((unsigned long)0x80000000 | ((sizeof(struct ifbrlreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((77)))
:
2433 case SIOCBRDGFRL((unsigned long)0x80000000 | ((sizeof(struct ifbrlreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((78)))
:
2434 case SIOCBRDGSPRI((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) &
0x1fff) << 16) | ((('i')) << 8) | ((80)))
:
2435 case SIOCBRDGSHT((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) &
0x1fff) << 16) | ((('i')) << 8) | ((81)))
:
2436 case SIOCBRDGSFD((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) &
0x1fff) << 16) | ((('i')) << 8) | ((82)))
:
2437 case SIOCBRDGSMA((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) &
0x1fff) << 16) | ((('i')) << 8) | ((83)))
:
2438 case SIOCBRDGSIFPRIO((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((84)))
:
2439 case SIOCBRDGSIFCOST((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((85)))
:
2440 case SIOCBRDGSTXHC((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) &
0x1fff) << 16) | ((('i')) << 8) | ((89)))
:
2441 case SIOCBRDGSPROTO((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) &
0x1fff) << 16) | ((('i')) << 8) | ((90)))
:
2442#endif
2443 if ((error = suser(p)) != 0)
2444 break;
2445 /* FALLTHROUGH */
2446 default:
2447 error = pru_control(so, cmd, data, ifp);
2448 if (error != EOPNOTSUPP45)
2449 break;
2450 switch (cmd) {
2451 case SIOCAIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifaliasreq) &
0x1fff) << 16) | ((('i')) << 8) | ((26)))
:
2452 case SIOCDIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((25)))
:
2453 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
2454 case SIOCSIFNETMASK((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((22)))
:
2455 case SIOCSIFDSTADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((14)))
:
2456 case SIOCSIFBRDADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((19)))
:
2457#ifdef INET61
2458 case SIOCAIFADDR_IN6((unsigned long)0x80000000 | ((sizeof(struct in6_aliasreq) &
0x1fff) << 16) | ((('i')) << 8) | ((26)))
:
2459 case SIOCDIFADDR_IN6((unsigned long)0x80000000 | ((sizeof(struct in6_ifreq) &
0x1fff) << 16) | ((('i')) << 8) | ((25)))
:
2460#endif
2461 error = suser(p);
2462 break;
2463 default:
2464 error = 0;
2465 break;
2466 }
2467 if (error)
2468 break;
2469 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2470 error = ((*ifp->if_ioctl)(ifp, cmd, data));
2471 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2472 break;
2473 }
2474
2475 if (oif_flags != ifp->if_flags || oif_xflags != ifp->if_xflags) {
2476 /* if_up() and if_down() already sent an update, skip here */
2477 if (((oif_flags ^ ifp->if_flags) & IFF_UP0x1) == 0)
2478 rtm_ifchg(ifp);
2479 }
2480
2481 if (((oif_flags ^ ifp->if_flags) & IFF_UP0x1) != 0)
2482 getmicrotime(&ifp->if_lastchangeif_data.ifi_lastchange);
2483
2484 KERNEL_UNLOCK()_kernel_unlock();
2485
2486 if_put(ifp);
2487
2488 return (error);
2489}
2490
2491int
2492ifioctl_get(u_long cmd, caddr_t data)
2493{
2494 struct ifnet *ifp;
2495 struct ifreq *ifr = (struct ifreq *)data;
2496 char ifdescrbuf[IFDESCRSIZE64];
2497 char ifrtlabelbuf[RTLABEL_LEN32];
2498 int error = 0;
2499 size_t bytesdone;
2500
2501 switch(cmd) {
2502 case SIOCGIFCONF(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifconf) & 0x1fff) << 16) | ((('i')) <<
8) | ((36)))
:
2503 NET_LOCK_SHARED()do { rw_enter_read(&netlock); } while (0);
2504 error = ifconf(data);
2505 NET_UNLOCK_SHARED()do { rw_exit_read(&netlock); } while (0);
2506 return (error);
2507 case SIOCIFGCLONERS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct if_clonereq) & 0x1fff) << 16) | ((('i')) <<
8) | ((120)))
:
2508 error = if_clone_list((struct if_clonereq *)data);
2509 return (error);
2510 case SIOCGIFGMEMB(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifgroupreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((138)))
:
2511 NET_LOCK_SHARED()do { rw_enter_read(&netlock); } while (0);
2512 error = if_getgroupmembers(data);
2513 NET_UNLOCK_SHARED()do { rw_exit_read(&netlock); } while (0);
2514 return (error);
2515 case SIOCGIFGATTR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifgroupreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((139)))
:
2516 NET_LOCK_SHARED()do { rw_enter_read(&netlock); } while (0);
2517 error = if_getgroupattribs(data);
2518 NET_UNLOCK_SHARED()do { rw_exit_read(&netlock); } while (0);
2519 return (error);
2520 case SIOCGIFGLIST(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifgroupreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((141)))
:
2521 NET_LOCK_SHARED()do { rw_enter_read(&netlock); } while (0);
2522 error = if_getgrouplist(data);
2523 NET_UNLOCK_SHARED()do { rw_exit_read(&netlock); } while (0);
2524 return (error);
2525 }
2526
2527 KERNEL_LOCK()_kernel_lock();
2528
2529 ifp = if_unit(ifr->ifr_name);
2530 if (ifp == NULL((void *)0)) {
2531 KERNEL_UNLOCK()_kernel_unlock();
2532 return (ENXIO6);
2533 }
2534
2535 NET_LOCK_SHARED()do { rw_enter_read(&netlock); } while (0);
2536
2537 switch(cmd) {
2538 case SIOCGIFFLAGS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((17)))
:
2539 ifr->ifr_flagsifr_ifru.ifru_flags = ifp->if_flags;
2540 if (ifq_is_oactive(&ifp->if_snd))
2541 ifr->ifr_flagsifr_ifru.ifru_flags |= IFF_OACTIVE0x400;
2542 break;
2543
2544 case SIOCGIFXFLAGS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((158)))
:
2545 ifr->ifr_flagsifr_ifru.ifru_flags = ifp->if_xflags & ~(IFXF_MPSAFE0x1|IFXF_CLONED0x2);
2546 break;
2547
2548 case SIOCGIFMETRIC(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((23)))
:
2549 ifr->ifr_metricifr_ifru.ifru_metric = ifp->if_metricif_data.ifi_metric;
2550 break;
2551
2552 case SIOCGIFMTU(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((126)))
:
2553 ifr->ifr_mtuifr_ifru.ifru_metric = ifp->if_mtuif_data.ifi_mtu;
2554 break;
2555
2556 case SIOCGIFHARDMTU(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((165)))
:
2557 ifr->ifr_hardmtuifr_ifru.ifru_metric = ifp->if_hardmtu;
2558 break;
2559
2560 case SIOCGIFDATA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((27)))
: {
2561 struct if_data ifdata;
2562 if_getdata(ifp, &ifdata);
2563 error = copyout(&ifdata, ifr->ifr_dataifr_ifru.ifru_data, sizeof(ifdata));
2564 break;
2565 }
2566
2567 case SIOCGIFDESCR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((129)))
:
2568 strlcpy(ifdescrbuf, ifp->if_description, IFDESCRSIZE64);
2569 error = copyoutstr(ifdescrbuf, ifr->ifr_dataifr_ifru.ifru_data, IFDESCRSIZE64,
2570 &bytesdone);
2571 break;
2572
2573 case SIOCGIFRTLABEL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((131)))
:
2574 if (ifp->if_rtlabelid && rtlabel_id2name(ifp->if_rtlabelid,
2575 ifrtlabelbuf, RTLABEL_LEN32) != NULL((void *)0)) {
2576 error = copyoutstr(ifrtlabelbuf, ifr->ifr_dataifr_ifru.ifru_data,
2577 RTLABEL_LEN32, &bytesdone);
2578 } else
2579 error = ENOENT2;
2580 break;
2581
2582 case SIOCGIFPRIORITY(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((156)))
:
2583 ifr->ifr_metricifr_ifru.ifru_metric = ifp->if_priority;
2584 break;
2585
2586 case SIOCGIFRDOMAIN(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((160)))
:
2587 ifr->ifr_rdomainidifr_ifru.ifru_metric = ifp->if_rdomainif_data.ifi_rdomain;
2588 break;
2589
2590 case SIOCGIFGROUP(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifgroupreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((136)))
:
2591 error = if_getgroup(data, ifp);
2592 break;
2593
2594 case SIOCGIFLLPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((182)))
:
2595 ifr->ifr_llprioifr_ifru.ifru_metric = ifp->if_llprio;
2596 break;
2597
2598 default:
2599 panic("invalid ioctl %lu", cmd);
2600 }
2601
2602 NET_UNLOCK_SHARED()do { rw_exit_read(&netlock); } while (0);
2603
2604 KERNEL_UNLOCK()_kernel_unlock();
2605
2606 if_put(ifp);
2607
2608 return (error);
2609}
2610
2611static int
2612if_sffpage_check(const caddr_t data)
2613{
2614 const struct if_sffpage *sff = (const struct if_sffpage *)data;
2615
2616 switch (sff->sff_addr) {
2617 case IFSFF_ADDR_EEPROM0xa0:
2618 case IFSFF_ADDR_DDM0xa2:
2619 break;
2620 default:
2621 return (EINVAL22);
2622 }
2623
2624 return (0);
2625}
2626
2627int
2628if_txhprio_l2_check(int hdrprio)
2629{
2630 switch (hdrprio) {
2631 case IF_HDRPRIO_PACKET-1:
2632 return (0);
2633 default:
2634 if (hdrprio >= IF_HDRPRIO_MIN0 && hdrprio <= IF_HDRPRIO_MAX8 - 1)
2635 return (0);
2636 break;
2637 }
2638
2639 return (EINVAL22);
2640}
2641
2642int
2643if_txhprio_l3_check(int hdrprio)
2644{
2645 switch (hdrprio) {
2646 case IF_HDRPRIO_PACKET-1:
2647 case IF_HDRPRIO_PAYLOAD-2:
2648 return (0);
2649 default:
2650 if (hdrprio >= IF_HDRPRIO_MIN0 && hdrprio <= IF_HDRPRIO_MAX8 - 1)
2651 return (0);
2652 break;
2653 }
2654
2655 return (EINVAL22);
2656}
2657
2658int
2659if_rxhprio_l2_check(int hdrprio)
2660{
2661 switch (hdrprio) {
2662 case IF_HDRPRIO_PACKET-1:
2663 case IF_HDRPRIO_OUTER-3:
2664 return (0);
2665 default:
2666 if (hdrprio >= IF_HDRPRIO_MIN0 && hdrprio <= IF_HDRPRIO_MAX8 - 1)
2667 return (0);
2668 break;
2669 }
2670
2671 return (EINVAL22);
2672}
2673
2674int
2675if_rxhprio_l3_check(int hdrprio)
2676{
2677 switch (hdrprio) {
2678 case IF_HDRPRIO_PACKET-1:
2679 case IF_HDRPRIO_PAYLOAD-2:
2680 case IF_HDRPRIO_OUTER-3:
2681 return (0);
2682 default:
2683 if (hdrprio >= IF_HDRPRIO_MIN0 && hdrprio <= IF_HDRPRIO_MAX8 - 1)
2684 return (0);
2685 break;
2686 }
2687
2688 return (EINVAL22);
2689}
2690
2691/*
2692 * Return interface configuration
2693 * of system. List may be used
2694 * in later ioctl's (above) to get
2695 * other information.
2696 */
2697int
2698ifconf(caddr_t data)
2699{
2700 struct ifconf *ifc = (struct ifconf *)data;
2701 struct ifnet *ifp;
2702 struct ifaddr *ifa;
2703 struct ifreq ifr, *ifrp;
2704 int space = ifc->ifc_len, error = 0;
2705
2706 /* If ifc->ifc_len is 0, fill it in with the needed size and return. */
2707 if (space == 0) {
2708 TAILQ_FOREACH(ifp, &ifnetlist, if_list)for((ifp) = ((&ifnetlist)->tqh_first); (ifp) != ((void
*)0); (ifp) = ((ifp)->if_list.tqe_next))
{
2709 struct sockaddr *sa;
2710
2711 if (TAILQ_EMPTY(&ifp->if_addrlist)(((&ifp->if_addrlist)->tqh_first) == ((void *)0)))
2712 space += sizeof (ifr);
2713 else
2714 TAILQ_FOREACH(ifa,for((ifa) = ((&ifp->if_addrlist)->tqh_first); (ifa)
!= ((void *)0); (ifa) = ((ifa)->ifa_list.tqe_next))
2715 &ifp->if_addrlist, ifa_list)for((ifa) = ((&ifp->if_addrlist)->tqh_first); (ifa)
!= ((void *)0); (ifa) = ((ifa)->ifa_list.tqe_next))
{
2716 sa = ifa->ifa_addr;
2717 if (sa->sa_len > sizeof(*sa))
2718 space += sa->sa_len -
2719 sizeof(*sa);
2720 space += sizeof(ifr);
2721 }
2722 }
2723 ifc->ifc_len = space;
2724 return (0);
2725 }
2726
2727 ifrp = ifc->ifc_reqifc_ifcu.ifcu_req;
2728 TAILQ_FOREACH(ifp, &ifnetlist, if_list)for((ifp) = ((&ifnetlist)->tqh_first); (ifp) != ((void
*)0); (ifp) = ((ifp)->if_list.tqe_next))
{
2729 if (space < sizeof(ifr))
2730 break;
2731 bcopy(ifp->if_xname, ifr.ifr_name, IFNAMSIZ16);
2732 if (TAILQ_EMPTY(&ifp->if_addrlist)(((&ifp->if_addrlist)->tqh_first) == ((void *)0))) {
2733 bzero((caddr_t)&ifr.ifr_addr, sizeof(ifr.ifr_addr))__builtin_bzero(((caddr_t)&ifr.ifr_ifru.ifru_addr), (sizeof
(ifr.ifr_ifru.ifru_addr)))
;
2734 error = copyout((caddr_t)&ifr, (caddr_t)ifrp,
2735 sizeof(ifr));
2736 if (error)
2737 break;
2738 space -= sizeof (ifr), ifrp++;
2739 } else
2740 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list)for((ifa) = ((&ifp->if_addrlist)->tqh_first); (ifa)
!= ((void *)0); (ifa) = ((ifa)->ifa_list.tqe_next))
{
2741 struct sockaddr *sa = ifa->ifa_addr;
2742
2743 if (space < sizeof(ifr))
2744 break;
2745 if (sa->sa_len <= sizeof(*sa)) {
2746 ifr.ifr_addrifr_ifru.ifru_addr = *sa;
2747 error = copyout((caddr_t)&ifr,
2748 (caddr_t)ifrp, sizeof (ifr));
2749 ifrp++;
2750 } else {
2751 space -= sa->sa_len - sizeof(*sa);
2752 if (space < sizeof (ifr))
2753 break;
2754 error = copyout((caddr_t)&ifr,
2755 (caddr_t)ifrp,
2756 sizeof(ifr.ifr_name));
2757 if (error == 0)
2758 error = copyout((caddr_t)sa,
2759 (caddr_t)&ifrp->ifr_addrifr_ifru.ifru_addr,
2760 sa->sa_len);
2761 ifrp = (struct ifreq *)(sa->sa_len +
2762 (caddr_t)&ifrp->ifr_addrifr_ifru.ifru_addr);
2763 }
2764 if (error)
2765 break;
2766 space -= sizeof (ifr);
2767 }
2768 }
2769 ifc->ifc_len -= space;
2770 return (error);
2771}
2772
2773void
2774if_counters_alloc(struct ifnet *ifp)
2775{
2776 KASSERT(ifp->if_counters == NULL)((ifp->if_counters == ((void *)0)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/if.c", 2776, "ifp->if_counters == NULL"
))
;
2777
2778 ifp->if_counters = counters_alloc(ifc_ncounters);
2779}
2780
2781void
2782if_counters_free(struct ifnet *ifp)
2783{
2784 KASSERT(ifp->if_counters != NULL)((ifp->if_counters != ((void *)0)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/if.c", 2784, "ifp->if_counters != NULL"
))
;
2785
2786 counters_free(ifp->if_counters, ifc_ncounters);
2787 ifp->if_counters = NULL((void *)0);
2788}
2789
2790void
2791if_getdata(struct ifnet *ifp, struct if_data *data)
2792{
2793 unsigned int i;
2794
2795 *data = ifp->if_data;
2796
2797 if (ifp->if_counters != NULL((void *)0)) {
2798 uint64_t counters[ifc_ncounters];
2799
2800 counters_read(ifp->if_counters, counters, nitems(counters)(sizeof((counters)) / sizeof((counters)[0])),
2801 NULL((void *)0));
2802
2803 data->ifi_ipackets += counters[ifc_ipackets];
2804 data->ifi_ierrors += counters[ifc_ierrors];
2805 data->ifi_opackets += counters[ifc_opackets];
2806 data->ifi_oerrors += counters[ifc_oerrors];
2807 data->ifi_collisions += counters[ifc_collisions];
2808 data->ifi_ibytes += counters[ifc_ibytes];
2809 data->ifi_obytes += counters[ifc_obytes];
2810 data->ifi_imcasts += counters[ifc_imcasts];
2811 data->ifi_omcasts += counters[ifc_omcasts];
2812 data->ifi_iqdrops += counters[ifc_iqdrops];
2813 data->ifi_oqdrops += counters[ifc_oqdrops];
2814 data->ifi_noproto += counters[ifc_noproto];
2815 }
2816
2817 for (i = 0; i < ifp->if_nifqs; i++) {
2818 struct ifqueue *ifq = ifp->if_ifqs[i];
2819
2820 ifq_add_data(ifq, data);
2821 }
2822
2823 for (i = 0; i < ifp->if_niqs; i++) {
2824 struct ifiqueue *ifiq = ifp->if_iqs[i];
2825
2826 ifiq_add_data(ifiq, data);
2827 }
2828}
2829
2830/*
2831 * Dummy functions replaced in ifnet during detach (if protocols decide to
2832 * fiddle with the if during detach.
2833 */
2834void
2835if_detached_qstart(struct ifqueue *ifq)
2836{
2837 ifq_purge(ifq);
2838}
2839
2840int
2841if_detached_ioctl(struct ifnet *ifp, u_long a, caddr_t b)
2842{
2843 return ENODEV19;
2844}
2845
2846/*
2847 * Create interface group without members
2848 */
2849struct ifg_group *
2850if_creategroup(const char *groupname)
2851{
2852 struct ifg_group *ifg;
2853
2854 if ((ifg = malloc(sizeof(*ifg), M_IFGROUP10, M_NOWAIT0x0002)) == NULL((void *)0))
2855 return (NULL((void *)0));
2856
2857 strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group));
2858 ifg->ifg_refcnt = 1;
2859 ifg->ifg_carp_demoted = 0;
2860 TAILQ_INIT(&ifg->ifg_members)do { (&ifg->ifg_members)->tqh_first = ((void *)0); (
&ifg->ifg_members)->tqh_last = &(&ifg->ifg_members
)->tqh_first; } while (0)
;
2861#if NPF1 > 0
2862 pfi_attach_ifgroup(ifg);
2863#endif
2864 TAILQ_INSERT_TAIL(&ifg_head, ifg, ifg_next)do { (ifg)->ifg_next.tqe_next = ((void *)0); (ifg)->ifg_next
.tqe_prev = (&ifg_head)->tqh_last; *(&ifg_head)->
tqh_last = (ifg); (&ifg_head)->tqh_last = &(ifg)->
ifg_next.tqe_next; } while (0)
;
2865
2866 return (ifg);
2867}
2868
2869/*
2870 * Add a group to an interface
2871 */
2872int
2873if_addgroup(struct ifnet *ifp, const char *groupname)
2874{
2875 struct ifg_list *ifgl;
2876 struct ifg_group *ifg = NULL((void *)0);
2877 struct ifg_member *ifgm;
2878 size_t namelen;
2879
2880 namelen = strlen(groupname);
2881 if (namelen == 0 || namelen >= IFNAMSIZ16 ||
2882 (groupname[namelen - 1] >= '0' && groupname[namelen - 1] <= '9'))
2883 return (EINVAL22);
2884
2885 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)for((ifgl) = ((&ifp->if_groups)->tqh_first); (ifgl)
!= ((void *)0); (ifgl) = ((ifgl)->ifgl_next.tqe_next))
2886 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname))
2887 return (EEXIST17);
2888
2889 if ((ifgl = malloc(sizeof(*ifgl), M_IFGROUP10, M_NOWAIT0x0002)) == NULL((void *)0))
2890 return (ENOMEM12);
2891
2892 if ((ifgm = malloc(sizeof(*ifgm), M_IFGROUP10, M_NOWAIT0x0002)) == NULL((void *)0)) {
2893 free(ifgl, M_IFGROUP10, sizeof(*ifgl));
2894 return (ENOMEM12);
2895 }
2896
2897 TAILQ_FOREACH(ifg, &ifg_head, ifg_next)for((ifg) = ((&ifg_head)->tqh_first); (ifg) != ((void *
)0); (ifg) = ((ifg)->ifg_next.tqe_next))
2898 if (!strcmp(ifg->ifg_group, groupname))
2899 break;
2900
2901 if (ifg == NULL((void *)0)) {
2902 ifg = if_creategroup(groupname);
2903 if (ifg == NULL((void *)0)) {
2904 free(ifgl, M_IFGROUP10, sizeof(*ifgl));
2905 free(ifgm, M_IFGROUP10, sizeof(*ifgm));
2906 return (ENOMEM12);
2907 }
2908 } else
2909 ifg->ifg_refcnt++;
2910 KASSERT(ifg->ifg_refcnt != 0)((ifg->ifg_refcnt != 0) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/if.c", 2910, "ifg->ifg_refcnt != 0"))
;
2911
2912 ifgl->ifgl_group = ifg;
2913 ifgm->ifgm_ifp = ifp;
2914
2915 TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next)do { (ifgm)->ifgm_next.tqe_next = ((void *)0); (ifgm)->
ifgm_next.tqe_prev = (&ifg->ifg_members)->tqh_last;
*(&ifg->ifg_members)->tqh_last = (ifgm); (&ifg
->ifg_members)->tqh_last = &(ifgm)->ifgm_next.tqe_next
; } while (0)
;
2916 TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next)do { (ifgl)->ifgl_next.tqe_next = ((void *)0); (ifgl)->
ifgl_next.tqe_prev = (&ifp->if_groups)->tqh_last; *
(&ifp->if_groups)->tqh_last = (ifgl); (&ifp->
if_groups)->tqh_last = &(ifgl)->ifgl_next.tqe_next;
} while (0)
;
2917
2918#if NPF1 > 0
2919 pfi_group_addmember(groupname);
2920#endif
2921
2922 return (0);
2923}
2924
2925/*
2926 * Remove a group from an interface
2927 */
2928int
2929if_delgroup(struct ifnet *ifp, const char *groupname)
2930{
2931 struct ifg_list *ifgl;
2932 struct ifg_member *ifgm;
2933
2934 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)for((ifgl) = ((&ifp->if_groups)->tqh_first); (ifgl)
!= ((void *)0); (ifgl) = ((ifgl)->ifgl_next.tqe_next))
2935 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname))
2936 break;
2937 if (ifgl == NULL((void *)0))
2938 return (ENOENT2);
2939
2940 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next)do { if (((ifgl)->ifgl_next.tqe_next) != ((void *)0)) (ifgl
)->ifgl_next.tqe_next->ifgl_next.tqe_prev = (ifgl)->
ifgl_next.tqe_prev; else (&ifp->if_groups)->tqh_last
= (ifgl)->ifgl_next.tqe_prev; *(ifgl)->ifgl_next.tqe_prev
= (ifgl)->ifgl_next.tqe_next; ((ifgl)->ifgl_next.tqe_prev
) = ((void *)-1); ((ifgl)->ifgl_next.tqe_next) = ((void *)
-1); } while (0)
;
2941
2942 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next)for((ifgm) = ((&ifgl->ifgl_group->ifg_members)->
tqh_first); (ifgm) != ((void *)0); (ifgm) = ((ifgm)->ifgm_next
.tqe_next))
2943 if (ifgm->ifgm_ifp == ifp)
2944 break;
2945
2946 if (ifgm != NULL((void *)0)) {
2947 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next)do { if (((ifgm)->ifgm_next.tqe_next) != ((void *)0)) (ifgm
)->ifgm_next.tqe_next->ifgm_next.tqe_prev = (ifgm)->
ifgm_next.tqe_prev; else (&ifgl->ifgl_group->ifg_members
)->tqh_last = (ifgm)->ifgm_next.tqe_prev; *(ifgm)->ifgm_next
.tqe_prev = (ifgm)->ifgm_next.tqe_next; ((ifgm)->ifgm_next
.tqe_prev) = ((void *)-1); ((ifgm)->ifgm_next.tqe_next) = (
(void *)-1); } while (0)
;
2948 free(ifgm, M_IFGROUP10, sizeof(*ifgm));
2949 }
2950
2951#if NPF1 > 0
2952 pfi_group_delmember(groupname);
2953#endif
2954
2955 KASSERT(ifgl->ifgl_group->ifg_refcnt != 0)((ifgl->ifgl_group->ifg_refcnt != 0) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/net/if.c", 2955, "ifgl->ifgl_group->ifg_refcnt != 0"
))
;
2956 if (--ifgl->ifgl_group->ifg_refcnt == 0) {
2957 TAILQ_REMOVE(&ifg_head, ifgl->ifgl_group, ifg_next)do { if (((ifgl->ifgl_group)->ifg_next.tqe_next) != ((void
*)0)) (ifgl->ifgl_group)->ifg_next.tqe_next->ifg_next
.tqe_prev = (ifgl->ifgl_group)->ifg_next.tqe_prev; else
(&ifg_head)->tqh_last = (ifgl->ifgl_group)->ifg_next
.tqe_prev; *(ifgl->ifgl_group)->ifg_next.tqe_prev = (ifgl
->ifgl_group)->ifg_next.tqe_next; ((ifgl->ifgl_group
)->ifg_next.tqe_prev) = ((void *)-1); ((ifgl->ifgl_group
)->ifg_next.tqe_next) = ((void *)-1); } while (0)
;
2958#if NPF1 > 0
2959 pfi_detach_ifgroup(ifgl->ifgl_group);
2960#endif
2961 free(ifgl->ifgl_group, M_IFGROUP10, sizeof(*ifgl->ifgl_group));
2962 }
2963
2964 free(ifgl, M_IFGROUP10, sizeof(*ifgl));
2965
2966 return (0);
2967}
2968
2969/*
2970 * Stores all groups from an interface in memory pointed
2971 * to by data
2972 */
2973int
2974if_getgroup(caddr_t data, struct ifnet *ifp)
2975{
2976 int len, error;
2977 struct ifg_list *ifgl;
2978 struct ifg_req ifgrq, *ifgp;
2979 struct ifgroupreq *ifgr = (struct ifgroupreq *)data;
2980
2981 if (ifgr->ifgr_len == 0) {
2982 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)for((ifgl) = ((&ifp->if_groups)->tqh_first); (ifgl)
!= ((void *)0); (ifgl) = ((ifgl)->ifgl_next.tqe_next))
2983 ifgr->ifgr_len += sizeof(struct ifg_req);
2984 return (0);
2985 }
2986
2987 len = ifgr->ifgr_len;
2988 ifgp = ifgr->ifgr_groupsifgr_ifgru.ifgru_groups;
2989 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)for((ifgl) = ((&ifp->if_groups)->tqh_first); (ifgl)
!= ((void *)0); (ifgl) = ((ifgl)->ifgl_next.tqe_next))
{
2990 if (len < sizeof(ifgrq))
2991 return (EINVAL22);
2992 bzero(&ifgrq, sizeof ifgrq)__builtin_bzero((&ifgrq), (sizeof ifgrq));
2993 strlcpy(ifgrq.ifgrq_groupifgrq_ifgrqu.ifgrqu_group, ifgl->ifgl_group->ifg_group,
2994 sizeof(ifgrq.ifgrq_groupifgrq_ifgrqu.ifgrqu_group));
2995 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp,
2996 sizeof(struct ifg_req))))
2997 return (error);
2998 len -= sizeof(ifgrq);
2999 ifgp++;
3000 }
3001
3002 return (0);
3003}
3004
3005/*
3006 * Stores all members of a group in memory pointed to by data
3007 */
3008int
3009if_getgroupmembers(caddr_t data)
3010{
3011 struct ifgroupreq *ifgr = (struct ifgroupreq *)data;
3012 struct ifg_group *ifg;
3013 struct ifg_member *ifgm;
3014 struct ifg_req ifgrq, *ifgp;
3015 int len, error;
3016
3017 TAILQ_FOREACH(ifg, &ifg_head, ifg_next)for((ifg) = ((&ifg_head)->tqh_first); (ifg) != ((void *
)0); (ifg) = ((ifg)->ifg_next.tqe_next))
3018 if (!strcmp(ifg->ifg_group, ifgr->ifgr_name))
3019 break;
3020 if (ifg == NULL((void *)0))
3021 return (ENOENT2);
3022
3023 if (ifgr->ifgr_len == 0) {
3024 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next)for((ifgm) = ((&ifg->ifg_members)->tqh_first); (ifgm
) != ((void *)0); (ifgm) = ((ifgm)->ifgm_next.tqe_next))
3025 ifgr->ifgr_len += sizeof(ifgrq);
3026 return (0);
3027 }
3028
3029 len = ifgr->ifgr_len;
3030 ifgp = ifgr->ifgr_groupsifgr_ifgru.ifgru_groups;
3031 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next)for((ifgm) = ((&ifg->ifg_members)->tqh_first); (ifgm
) != ((void *)0); (ifgm) = ((ifgm)->ifgm_next.tqe_next))
{
3032 if (len < sizeof(ifgrq))
3033 return (EINVAL22);
3034 bzero(&ifgrq, sizeof ifgrq)__builtin_bzero((&ifgrq), (sizeof ifgrq));
3035 strlcpy(ifgrq.ifgrq_memberifgrq_ifgrqu.ifgrqu_member, ifgm->ifgm_ifp->if_xname,
3036 sizeof(ifgrq.ifgrq_memberifgrq_ifgrqu.ifgrqu_member));
3037 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp,
3038 sizeof(struct ifg_req))))
3039 return (error);
3040 len -= sizeof(ifgrq);
3041 ifgp++;
3042 }
3043
3044 return (0);
3045}
3046
3047int
3048if_getgroupattribs(caddr_t data)
3049{
3050 struct ifgroupreq *ifgr = (struct ifgroupreq *)data;
3051 struct ifg_group *ifg;
3052
3053 TAILQ_FOREACH(ifg, &ifg_head, ifg_next)for((ifg) = ((&ifg_head)->tqh_first); (ifg) != ((void *
)0); (ifg) = ((ifg)->ifg_next.tqe_next))
3054 if (!strcmp(ifg->ifg_group, ifgr->ifgr_name))
3055 break;
3056 if (ifg == NULL((void *)0))
3057 return (ENOENT2);
3058
3059 ifgr->ifgr_attribifgr_ifgru.ifgru_attrib.ifg_carp_demoted = ifg->ifg_carp_demoted;
3060
3061 return (0);
3062}
3063
3064int
3065if_setgroupattribs(caddr_t data)
3066{
3067 struct ifgroupreq *ifgr = (struct ifgroupreq *)data;
3068 struct ifg_group *ifg;
3069 struct ifg_member *ifgm;
3070 int demote;
3071
3072 TAILQ_FOREACH(ifg, &ifg_head, ifg_next)for((ifg) = ((&ifg_head)->tqh_first); (ifg) != ((void *
)0); (ifg) = ((ifg)->ifg_next.tqe_next))
3073 if (!strcmp(ifg->ifg_group, ifgr->ifgr_name))
3074 break;
3075 if (ifg == NULL((void *)0))
3076 return (ENOENT2);
3077
3078 demote = ifgr->ifgr_attribifgr_ifgru.ifgru_attrib.ifg_carp_demoted;
3079 if (demote + ifg->ifg_carp_demoted > 0xff ||
3080 demote + ifg->ifg_carp_demoted < 0)
3081 return (EINVAL22);
3082
3083 ifg->ifg_carp_demoted += demote;
3084
3085 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next)for((ifgm) = ((&ifg->ifg_members)->tqh_first); (ifgm
) != ((void *)0); (ifgm) = ((ifgm)->ifgm_next.tqe_next))
3086 ifgm->ifgm_ifp->if_ioctl(ifgm->ifgm_ifp, SIOCSIFGATTR((unsigned long)0x80000000 | ((sizeof(struct ifgroupreq) &
0x1fff) << 16) | ((('i')) << 8) | ((140)))
, data);
3087
3088 return (0);
3089}
3090
3091/*
3092 * Stores all groups in memory pointed to by data
3093 */
3094int
3095if_getgrouplist(caddr_t data)
3096{
3097 struct ifgroupreq *ifgr = (struct ifgroupreq *)data;
3098 struct ifg_group *ifg;
3099 struct ifg_req ifgrq, *ifgp;
3100 int len, error;
3101
3102 if (ifgr->ifgr_len == 0) {
3103 TAILQ_FOREACH(ifg, &ifg_head, ifg_next)for((ifg) = ((&ifg_head)->tqh_first); (ifg) != ((void *
)0); (ifg) = ((ifg)->ifg_next.tqe_next))
3104 ifgr->ifgr_len += sizeof(ifgrq);
3105 return (0);
3106 }
3107
3108 len = ifgr->ifgr_len;
3109 ifgp = ifgr->ifgr_groupsifgr_ifgru.ifgru_groups;
3110 TAILQ_FOREACH(ifg, &ifg_head, ifg_next)for((ifg) = ((&ifg_head)->tqh_first); (ifg) != ((void *
)0); (ifg) = ((ifg)->ifg_next.tqe_next))
{
3111 if (len < sizeof(ifgrq))
3112 return (EINVAL22);
3113 bzero(&ifgrq, sizeof ifgrq)__builtin_bzero((&ifgrq), (sizeof ifgrq));
3114 strlcpy(ifgrq.ifgrq_groupifgrq_ifgrqu.ifgrqu_group, ifg->ifg_group,
3115 sizeof(ifgrq.ifgrq_groupifgrq_ifgrqu.ifgrqu_group));
3116 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp,
3117 sizeof(struct ifg_req))))
3118 return (error);
3119 len -= sizeof(ifgrq);
3120 ifgp++;
3121 }
3122
3123 return (0);
3124}
3125
3126void
3127if_group_routechange(const struct sockaddr *dst, const struct sockaddr *mask)
3128{
3129 switch (dst->sa_family) {
3130 case AF_INET2:
3131 if (satosin_const(dst)->sin_addr.s_addr == INADDR_ANY((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000000
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000000)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000000)
) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000000
))))
&&
3132 mask && (mask->sa_len == 0 ||
3133 satosin_const(mask)->sin_addr.s_addr == INADDR_ANY((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000000
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000000)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000000)
) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000000
))))
))
3134 if_group_egress_build();
3135 break;
3136#ifdef INET61
3137 case AF_INET624:
3138 if (IN6_ARE_ADDR_EQUAL(&(satosin6_const(dst))->sin6_addr,(__builtin_memcmp((&(&(satosin6_const(dst))->sin6_addr
)->__u6_addr.__u6_addr8[0]), (&(&in6addr_any)->
__u6_addr.__u6_addr8[0]), (sizeof(struct in6_addr))) == 0)
3139 &in6addr_any)(__builtin_memcmp((&(&(satosin6_const(dst))->sin6_addr
)->__u6_addr.__u6_addr8[0]), (&(&in6addr_any)->
__u6_addr.__u6_addr8[0]), (sizeof(struct in6_addr))) == 0)
&& mask && (mask->sa_len == 0 ||
3140 IN6_ARE_ADDR_EQUAL(&(satosin6_const(mask))->sin6_addr,(__builtin_memcmp((&(&(satosin6_const(mask))->sin6_addr
)->__u6_addr.__u6_addr8[0]), (&(&in6addr_any)->
__u6_addr.__u6_addr8[0]), (sizeof(struct in6_addr))) == 0)
3141 &in6addr_any)(__builtin_memcmp((&(&(satosin6_const(mask))->sin6_addr
)->__u6_addr.__u6_addr8[0]), (&(&in6addr_any)->
__u6_addr.__u6_addr8[0]), (sizeof(struct in6_addr))) == 0)
))
3142 if_group_egress_build();
3143 break;
3144#endif
3145 }
3146}
3147
3148int
3149if_group_egress_build(void)
3150{
3151 struct ifnet *ifp;
3152 struct ifg_group *ifg;
3153 struct ifg_member *ifgm, *next;
3154 struct sockaddr_in sa_in;
3155#ifdef INET61
3156 struct sockaddr_in6 sa_in6;
3157#endif
3158 struct rtentry *rt;
3159
3160 TAILQ_FOREACH(ifg, &ifg_head, ifg_next)for((ifg) = ((&ifg_head)->tqh_first); (ifg) != ((void *
)0); (ifg) = ((ifg)->ifg_next.tqe_next))
3161 if (!strcmp(ifg->ifg_group, IFG_EGRESS"egress"))
3162 break;
3163
3164 if (ifg != NULL((void *)0))
3165 TAILQ_FOREACH_SAFE(ifgm, &ifg->ifg_members, ifgm_next, next)for ((ifgm) = ((&ifg->ifg_members)->tqh_first); (ifgm
) != ((void *)0) && ((next) = ((ifgm)->ifgm_next.tqe_next
), 1); (ifgm) = (next))
3166 if_delgroup(ifgm->ifgm_ifp, IFG_EGRESS"egress");
3167
3168 bzero(&sa_in, sizeof(sa_in))__builtin_bzero((&sa_in), (sizeof(sa_in)));
3169 sa_in.sin_len = sizeof(sa_in);
3170 sa_in.sin_family = AF_INET2;
3171 rt = rtable_lookup(0, sintosa(&sa_in), sintosa(&sa_in), NULL((void *)0), RTP_ANY64);
3172 while (rt != NULL((void *)0)) {
3173 ifp = if_get(rt->rt_ifidx);
3174 if (ifp != NULL((void *)0)) {
3175 if_addgroup(ifp, IFG_EGRESS"egress");
3176 if_put(ifp);
3177 }
3178 rt = rtable_iterate(rt);
3179 }
3180
3181#ifdef INET61
3182 bcopy(&sa6_any, &sa_in6, sizeof(sa_in6));
3183 rt = rtable_lookup(0, sin6tosa(&sa_in6), sin6tosa(&sa_in6), NULL((void *)0),
3184 RTP_ANY64);
3185 while (rt != NULL((void *)0)) {
3186 ifp = if_get(rt->rt_ifidx);
3187 if (ifp != NULL((void *)0)) {
3188 if_addgroup(ifp, IFG_EGRESS"egress");
3189 if_put(ifp);
3190 }
3191 rt = rtable_iterate(rt);
3192 }
3193#endif /* INET6 */
3194
3195 return (0);
3196}
3197
3198/*
3199 * Set/clear promiscuous mode on interface ifp based on the truth value
3200 * of pswitch. The calls are reference counted so that only the first
3201 * "on" request actually has an effect, as does the final "off" request.
3202 * Results are undefined if the "off" and "on" requests are not matched.
3203 */
3204int
3205ifpromisc(struct ifnet *ifp, int pswitch)
3206{
3207 struct ifreq ifr;
3208 unsigned short oif_flags;
3209 int oif_pcount, error;
3210
3211 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
; /* modifying if_flags and if_pcount */
3212
3213 oif_flags = ifp->if_flags;
3214 oif_pcount = ifp->if_pcount;
3215 if (pswitch) {
3216 if (ifp->if_pcount++ != 0)
3217 return (0);
3218 ifp->if_flags |= IFF_PROMISC0x100;
3219 } else {
3220 if (--ifp->if_pcount > 0)
3221 return (0);
3222 ifp->if_flags &= ~IFF_PROMISC0x100;
3223 }
3224
3225 if ((ifp->if_flags & IFF_UP0x1) == 0)
3226 return (0);
3227
3228 memset(&ifr, 0, sizeof(ifr))__builtin_memset((&ifr), (0), (sizeof(ifr)));
3229 ifr.ifr_flagsifr_ifru.ifru_flags = ifp->if_flags;
3230 error = ((*ifp->if_ioctl)(ifp, SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
, (caddr_t)&ifr));
3231 if (error) {
3232 ifp->if_flags = oif_flags;
3233 ifp->if_pcount = oif_pcount;
3234 }
3235
3236 return (error);
3237}
3238
3239/* Set/clear LRO flag and restart interface if needed. */
3240int
3241ifsetlro(struct ifnet *ifp, int on)
3242{
3243 struct ifreq ifrq;
3244 int error = 0;
3245 int s = splnet()splraise(0x4);
3246 struct if_parent parent;
3247
3248 memset(&parent, 0, sizeof(parent))__builtin_memset((&parent), (0), (sizeof(parent)));
3249 if ((*ifp->if_ioctl)(ifp, SIOCGIFPARENT(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct if_parent) & 0x1fff) << 16) | ((('i')) <<
8) | ((179)))
, (caddr_t)&parent) != -1) {
3250 struct ifnet *ifp0 = if_unit(parent.ifp_parent);
3251
3252 if (ifp0 != NULL((void *)0)) {
3253 ifsetlro(ifp0, on);
3254 if_put(ifp0);
3255 }
3256 }
3257
3258 if (!ISSET(ifp->if_capabilities, IFCAP_LRO)((ifp->if_data.ifi_capabilities) & (0x00004000))) {
3259 error = ENOTSUP91;
3260 goto out;
3261 }
3262
3263 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
; /* for ioctl */
3264 KERNEL_ASSERT_LOCKED()((_kernel_lock_held()) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c"
, 3264, "_kernel_lock_held()"))
; /* for if_flags */
3265
3266 if (on && !ISSET(ifp->if_xflags, IFXF_LRO)((ifp->if_xflags) & (0x200))) {
3267 if (ifp->if_typeif_data.ifi_type == IFT_ETHER0x06 && ether_brport_isset(ifp)) {
3268 error = EBUSY16;
3269 goto out;
3270 }
3271 SET(ifp->if_xflags, IFXF_LRO)((ifp->if_xflags) |= (0x200));
3272 } else if (!on && ISSET(ifp->if_xflags, IFXF_LRO)((ifp->if_xflags) & (0x200)))
3273 CLR(ifp->if_xflags, IFXF_LRO)((ifp->if_xflags) &= ~(0x200));
3274 else
3275 goto out;
3276
3277 /* restart interface */
3278 if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) {
3279 /* go down for a moment... */
3280 CLR(ifp->if_flags, IFF_UP)((ifp->if_flags) &= ~(0x1));
3281 ifrq.ifr_flagsifr_ifru.ifru_flags = ifp->if_flags;
3282 (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
, (caddr_t)&ifrq);
3283
3284 /* ... and up again */
3285 SET(ifp->if_flags, IFF_UP)((ifp->if_flags) |= (0x1));
3286 ifrq.ifr_flagsifr_ifru.ifru_flags = ifp->if_flags;
3287 (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
, (caddr_t)&ifrq);
3288 }
3289 out:
3290 splx(s)spllower(s);
3291
3292 return error;
3293}
3294
3295void
3296ifa_add(struct ifnet *ifp, struct ifaddr *ifa)
3297{
3298 NET_ASSERT_LOCKED_EXCLUSIVE()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL)) splassert_fail(0x0001UL, _s,
__func__); } while (0)
;
3299 TAILQ_INSERT_TAIL(&ifp->if_addrlist, ifa, ifa_list)do { (ifa)->ifa_list.tqe_next = ((void *)0); (ifa)->ifa_list
.tqe_prev = (&ifp->if_addrlist)->tqh_last; *(&ifp
->if_addrlist)->tqh_last = (ifa); (&ifp->if_addrlist
)->tqh_last = &(ifa)->ifa_list.tqe_next; } while (0
)
;
3300}
3301
3302void
3303ifa_del(struct ifnet *ifp, struct ifaddr *ifa)
3304{
3305 NET_ASSERT_LOCKED_EXCLUSIVE()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL)) splassert_fail(0x0001UL, _s,
__func__); } while (0)
;
3306 TAILQ_REMOVE(&ifp->if_addrlist, ifa, ifa_list)do { if (((ifa)->ifa_list.tqe_next) != ((void *)0)) (ifa)->
ifa_list.tqe_next->ifa_list.tqe_prev = (ifa)->ifa_list.
tqe_prev; else (&ifp->if_addrlist)->tqh_last = (ifa
)->ifa_list.tqe_prev; *(ifa)->ifa_list.tqe_prev = (ifa)
->ifa_list.tqe_next; ((ifa)->ifa_list.tqe_prev) = ((void
*)-1); ((ifa)->ifa_list.tqe_next) = ((void *)-1); } while
(0)
;
3307}
3308
3309void
3310ifa_update_broadaddr(struct ifnet *ifp, struct ifaddr *ifa, struct sockaddr *sa)
3311{
3312 if (ifa->ifa_broadaddrifa_dstaddr->sa_len != sa->sa_len)
3313 panic("ifa_update_broadaddr does not support dynamic length");
3314 bcopy(sa, ifa->ifa_broadaddrifa_dstaddr, sa->sa_len);
3315}
3316
3317#ifdef DDB1
3318/* debug function, can be called from ddb> */
3319void
3320ifa_print_all(void)
3321{
3322 struct ifnet *ifp;
3323 struct ifaddr *ifa;
3324
3325 TAILQ_FOREACH(ifp, &ifnetlist, if_list)for((ifp) = ((&ifnetlist)->tqh_first); (ifp) != ((void
*)0); (ifp) = ((ifp)->if_list.tqe_next))
{
3326 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list)for((ifa) = ((&ifp->if_addrlist)->tqh_first); (ifa)
!= ((void *)0); (ifa) = ((ifa)->ifa_list.tqe_next))
{
3327 char addr[INET6_ADDRSTRLEN46];
3328
3329 switch (ifa->ifa_addr->sa_family) {
3330 case AF_INET2:
3331 printf("%s", inet_ntop(AF_INET2,
3332 &satosin(ifa->ifa_addr)->sin_addr,
3333 addr, sizeof(addr)));
3334 break;
3335#ifdef INET61
3336 case AF_INET624:
3337 printf("%s", inet_ntop(AF_INET624,
3338 &(satosin6(ifa->ifa_addr))->sin6_addr,
3339 addr, sizeof(addr)));
3340 break;
3341#endif
3342 }
3343 printf(" on %s\n", ifp->if_xname);
3344 }
3345 }
3346}
3347#endif /* DDB */
3348
3349void
3350ifnewlladdr(struct ifnet *ifp)
3351{
3352#ifdef INET61
3353 struct ifaddr *ifa;
3354#endif
3355 struct ifreq ifrq;
3356 short up;
3357
3358 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
; /* for ioctl and in6 */
3359 KERNEL_ASSERT_LOCKED()((_kernel_lock_held()) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c"
, 3359, "_kernel_lock_held()"))
; /* for if_flags */
3360
3361 up = ifp->if_flags & IFF_UP0x1;
3362
3363 if (up) {
3364 /* go down for a moment... */
3365 ifp->if_flags &= ~IFF_UP0x1;
3366 ifrq.ifr_flagsifr_ifru.ifru_flags = ifp->if_flags;
3367 (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
, (caddr_t)&ifrq);
3368 }
3369
3370 ifp->if_flags |= IFF_UP0x1;
3371 ifrq.ifr_flagsifr_ifru.ifru_flags = ifp->if_flags;
3372 (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
, (caddr_t)&ifrq);
3373
3374#ifdef INET61
3375 /*
3376 * Update the link-local address. Don't do it if we're
3377 * a router to avoid confusing hosts on the network.
3378 */
3379 if (!ip6_forwarding) {
3380 ifa = &in6ifa_ifpforlinklocal(ifp, 0)->ia_ifa;
3381 if (ifa) {
3382 in6_purgeaddr(ifa);
3383 if_hooks_run(&ifp->if_addrhooks);
3384 in6_ifattach(ifp);
3385 }
3386 }
3387#endif
3388 if (!up) {
3389 /* go back down */
3390 ifp->if_flags &= ~IFF_UP0x1;
3391 ifrq.ifr_flagsifr_ifru.ifru_flags = ifp->if_flags;
3392 (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
, (caddr_t)&ifrq);
3393 }
3394}
3395
3396void
3397if_addrhook_add(struct ifnet *ifp, struct task *t)
3398{
3399 mtx_enter(&if_hooks_mtx);
3400 TAILQ_INSERT_TAIL(&ifp->if_addrhooks, t, t_entry)do { (t)->t_entry.tqe_next = ((void *)0); (t)->t_entry.
tqe_prev = (&ifp->if_addrhooks)->tqh_last; *(&ifp
->if_addrhooks)->tqh_last = (t); (&ifp->if_addrhooks
)->tqh_last = &(t)->t_entry.tqe_next; } while (0)
;
3401 mtx_leave(&if_hooks_mtx);
3402}
3403
3404void
3405if_addrhook_del(struct ifnet *ifp, struct task *t)
3406{
3407 mtx_enter(&if_hooks_mtx);
3408 TAILQ_REMOVE(&ifp->if_addrhooks, t, t_entry)do { if (((t)->t_entry.tqe_next) != ((void *)0)) (t)->t_entry
.tqe_next->t_entry.tqe_prev = (t)->t_entry.tqe_prev; else
(&ifp->if_addrhooks)->tqh_last = (t)->t_entry.tqe_prev
; *(t)->t_entry.tqe_prev = (t)->t_entry.tqe_next; ((t)->
t_entry.tqe_prev) = ((void *)-1); ((t)->t_entry.tqe_next) =
((void *)-1); } while (0)
;
3409 mtx_leave(&if_hooks_mtx);
3410}
3411
3412void
3413if_addrhooks_run(struct ifnet *ifp)
3414{
3415 if_hooks_run(&ifp->if_addrhooks);
3416}
3417
3418void
3419if_rxr_init(struct if_rxring *rxr, u_int lwm, u_int hwm)
3420{
3421 extern int ticks;
3422
3423 memset(rxr, 0, sizeof(*rxr))__builtin_memset((rxr), (0), (sizeof(*rxr)));
3424
3425 rxr->rxr_adjusted = ticks;
3426 rxr->rxr_cwm = rxr->rxr_lwm = lwm;
3427 rxr->rxr_hwm = hwm;
3428}
3429
3430static inline void
3431if_rxr_adjust_cwm(struct if_rxring *rxr)
3432{
3433 extern int ticks;
3434
3435 if (rxr->rxr_alive >= rxr->rxr_lwm)
3436 return;
3437 else if (rxr->rxr_cwm < rxr->rxr_hwm)
3438 rxr->rxr_cwm++;
3439
3440 rxr->rxr_adjusted = ticks;
3441}
3442
3443void
3444if_rxr_livelocked(struct if_rxring *rxr)
3445{
3446 extern int ticks;
3447
3448 if (ticks - rxr->rxr_adjusted >= 1) {
3449 if (rxr->rxr_cwm > rxr->rxr_lwm)
3450 rxr->rxr_cwm--;
3451
3452 rxr->rxr_adjusted = ticks;
3453 }
3454}
3455
3456u_int
3457if_rxr_get(struct if_rxring *rxr, u_int max)
3458{
3459 extern int ticks;
3460 u_int diff;
3461
3462 if (ticks - rxr->rxr_adjusted >= 1) {
3463 /* we're free to try for an adjustment */
3464 if_rxr_adjust_cwm(rxr);
3465 }
3466
3467 if (rxr->rxr_alive >= rxr->rxr_cwm)
3468 return (0);
3469
3470 diff = min(rxr->rxr_cwm - rxr->rxr_alive, max);
3471 rxr->rxr_alive += diff;
3472
3473 return (diff);
3474}
3475
3476int
3477if_rxr_info_ioctl(struct if_rxrinfo *uifri, u_int t, struct if_rxring_info *e)
3478{
3479 struct if_rxrinfo kifri;
3480 int error;
3481 u_int n;
3482
3483 error = copyin(uifri, &kifri, sizeof(kifri));
3484 if (error)
3485 return (error);
3486
3487 n = min(t, kifri.ifri_total);
3488 kifri.ifri_total = t;
3489
3490 if (n > 0) {
3491 error = copyout(e, kifri.ifri_entries, sizeof(*e) * n);
3492 if (error)
3493 return (error);
3494 }
3495
3496 return (copyout(&kifri, uifri, sizeof(kifri)));
3497}
3498
3499int
3500if_rxr_ioctl(struct if_rxrinfo *ifri, const char *name, u_int size,
3501 struct if_rxring *rxr)
3502{
3503 struct if_rxring_info ifr;
3504
3505 memset(&ifr, 0, sizeof(ifr))__builtin_memset((&ifr), (0), (sizeof(ifr)));
3506
3507 if (name != NULL((void *)0))
3508 strlcpy(ifr.ifr_name, name, sizeof(ifr.ifr_name));
3509
3510 ifr.ifr_size = size;
3511 ifr.ifr_info = *rxr;
3512
3513 return (if_rxr_info_ioctl(ifri, 1, &ifr));
3514}
3515
3516/*
3517 * Network stack input queues.
3518 */
3519
3520void
3521niq_init(struct niqueue *niq, u_int maxlen, u_int isr)
3522{
3523 mq_init(&niq->ni_q, maxlen, IPL_NET0x4);
3524 niq->ni_isr = isr;
3525}
3526
3527int
3528niq_enqueue(struct niqueue *niq, struct mbuf *m)
3529{
3530 int rv;
3531
3532 rv = mq_enqueue(&niq->ni_q, m);
3533 if (rv == 0)
3534 schednetisr(niq->ni_isr)do { x86_atomic_setbits_u32(&netisr, (1 << (niq->
ni_isr))); task_add(net_tq(0), &if_input_task_locked); } while
( 0)
;
3535 else
3536 if_congestion();
3537
3538 return (rv);
3539}
3540
3541int
3542niq_enlist(struct niqueue *niq, struct mbuf_list *ml)
3543{
3544 int rv;
3545
3546 rv = mq_enlist(&niq->ni_q, ml);
3547 if (rv == 0)
3548 schednetisr(niq->ni_isr)do { x86_atomic_setbits_u32(&netisr, (1 << (niq->
ni_isr))); task_add(net_tq(0), &if_input_task_locked); } while
( 0)
;
3549 else
3550 if_congestion();
3551
3552 return (rv);
3553}
3554
3555__dead__attribute__((__noreturn__)) void
3556unhandled_af(int af)
3557{
3558 panic("unhandled af %d", af);
3559}
3560
3561struct taskq *
3562net_tq(unsigned int ifindex)
3563{
3564 struct softnet *sn;
3565 static int nettaskqs;
3566
3567 if (nettaskqs == 0)
3568 nettaskqs = min(NET_TASKQ4, ncpus);
3569
3570 sn = &softnets[ifindex % nettaskqs];
3571
3572 return (sn->sn_taskq);
3573}
3574
3575void
3576net_tq_barriers(const char *wmesg)
3577{
3578 struct task barriers[NET_TASKQ4];
3579 struct refcnt r = REFCNT_INITIALIZER(){ .r_refs = 1, .r_traceidx = 0 };
3580 int i;
3581
3582 for (i = 0; i < nitems(barriers)(sizeof((barriers)) / sizeof((barriers)[0])); i++) {
3583 task_set(&barriers[i], (void (*)(void *))refcnt_rele_wake, &r);
3584 refcnt_take(&r);
3585 task_add(softnets[i].sn_taskq, &barriers[i]);
3586 }
3587
3588 refcnt_finalize(&r, wmesg);
3589}