| File: | net/if.c |
| Warning: | line 351, column 3 Value stored to 'if_map' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* $OpenBSD: if.c,v 1.647 2022/01/07 16:39:18 deraadt Exp $ */ |
| 2 | /* $NetBSD: if.c,v 1.35 1996/05/07 05:26:04 thorpej Exp $ */ |
| 3 | |
| 4 | /* |
| 5 | * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. |
| 6 | * All rights reserved. |
| 7 | * |
| 8 | * Redistribution and use in source and binary forms, with or without |
| 9 | * modification, are permitted provided that the following conditions |
| 10 | * are met: |
| 11 | * 1. Redistributions of source code must retain the above copyright |
| 12 | * notice, this list of conditions and the following disclaimer. |
| 13 | * 2. Redistributions in binary form must reproduce the above copyright |
| 14 | * notice, this list of conditions and the following disclaimer in the |
| 15 | * documentation and/or other materials provided with the distribution. |
| 16 | * 3. Neither the name of the project nor the names of its contributors |
| 17 | * may be used to endorse or promote products derived from this software |
| 18 | * without specific prior written permission. |
| 19 | * |
| 20 | * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND |
| 21 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 22 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 23 | * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE |
| 24 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 25 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 26 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 27 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 28 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 29 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 30 | * SUCH DAMAGE. |
| 31 | */ |
| 32 | |
| 33 | /* |
| 34 | * Copyright (c) 1980, 1986, 1993 |
| 35 | * The Regents of the University of California. All rights reserved. |
| 36 | * |
| 37 | * Redistribution and use in source and binary forms, with or without |
| 38 | * modification, are permitted provided that the following conditions |
| 39 | * are met: |
| 40 | * 1. Redistributions of source code must retain the above copyright |
| 41 | * notice, this list of conditions and the following disclaimer. |
| 42 | * 2. Redistributions in binary form must reproduce the above copyright |
| 43 | * notice, this list of conditions and the following disclaimer in the |
| 44 | * documentation and/or other materials provided with the distribution. |
| 45 | * 3. Neither the name of the University nor the names of its contributors |
| 46 | * may be used to endorse or promote products derived from this software |
| 47 | * without specific prior written permission. |
| 48 | * |
| 49 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
| 50 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 51 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 52 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
| 53 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 54 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 55 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 56 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 57 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 58 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 59 | * SUCH DAMAGE. |
| 60 | * |
| 61 | * @(#)if.c 8.3 (Berkeley) 1/4/94 |
| 62 | */ |
| 63 | |
| 64 | #include "bpfilter.h" |
| 65 | #include "bridge.h" |
| 66 | #include "carp.h" |
| 67 | #include "ether.h" |
| 68 | #include "pf.h" |
| 69 | #include "pfsync.h" |
| 70 | #include "ppp.h" |
| 71 | #include "if_wg.h" |
| 72 | |
| 73 | #include <sys/param.h> |
| 74 | #include <sys/systm.h> |
| 75 | #include <sys/mbuf.h> |
| 76 | #include <sys/socket.h> |
| 77 | #include <sys/socketvar.h> |
| 78 | #include <sys/timeout.h> |
| 79 | #include <sys/protosw.h> |
| 80 | #include <sys/kernel.h> |
| 81 | #include <sys/ioctl.h> |
| 82 | #include <sys/domain.h> |
| 83 | #include <sys/task.h> |
| 84 | #include <sys/atomic.h> |
| 85 | #include <sys/percpu.h> |
| 86 | #include <sys/proc.h> |
| 87 | #include <sys/stdint.h> /* uintptr_t */ |
| 88 | #include <sys/rwlock.h> |
| 89 | |
| 90 | #include <net/if.h> |
| 91 | #include <net/if_dl.h> |
| 92 | #include <net/if_types.h> |
| 93 | #include <net/route.h> |
| 94 | #include <net/netisr.h> |
| 95 | |
| 96 | #include <netinet/in.h> |
| 97 | #include <netinet/if_ether.h> |
| 98 | #include <netinet/igmp.h> |
| 99 | #ifdef MROUTING1 |
| 100 | #include <netinet/ip_mroute.h> |
| 101 | #endif |
| 102 | |
| 103 | #ifdef INET61 |
| 104 | #include <netinet6/in6_var.h> |
| 105 | #include <netinet6/in6_ifattach.h> |
| 106 | #include <netinet6/nd6.h> |
| 107 | #include <netinet/ip6.h> |
| 108 | #include <netinet6/ip6_var.h> |
| 109 | #endif |
| 110 | |
| 111 | #ifdef MPLS1 |
| 112 | #include <netmpls/mpls.h> |
| 113 | #endif |
| 114 | |
| 115 | #if NBPFILTER1 > 0 |
| 116 | #include <net/bpf.h> |
| 117 | #endif |
| 118 | |
| 119 | #if NBRIDGE1 > 0 |
| 120 | #include <net/if_bridge.h> |
| 121 | #endif |
| 122 | |
| 123 | #if NCARP1 > 0 |
| 124 | #include <netinet/ip_carp.h> |
| 125 | #endif |
| 126 | |
| 127 | #if NPF1 > 0 |
| 128 | #include <net/pfvar.h> |
| 129 | #endif |
| 130 | |
| 131 | #include <sys/device.h> |
| 132 | |
| 133 | void if_attachsetup(struct ifnet *); |
| 134 | void if_attachdomain(struct ifnet *); |
| 135 | void if_attach_common(struct ifnet *); |
| 136 | void if_remove(struct ifnet *); |
| 137 | int if_createrdomain(int, struct ifnet *); |
| 138 | int if_setrdomain(struct ifnet *, int); |
| 139 | void if_slowtimo(void *); |
| 140 | |
| 141 | void if_detached_qstart(struct ifqueue *); |
| 142 | int if_detached_ioctl(struct ifnet *, u_long, caddr_t); |
| 143 | |
| 144 | int ifioctl_get(u_long, caddr_t); |
| 145 | int ifconf(caddr_t); |
| 146 | static int |
| 147 | if_sffpage_check(const caddr_t); |
| 148 | |
| 149 | int if_getgroup(caddr_t, struct ifnet *); |
| 150 | int if_getgroupmembers(caddr_t); |
| 151 | int if_getgroupattribs(caddr_t); |
| 152 | int if_setgroupattribs(caddr_t); |
| 153 | int if_getgrouplist(caddr_t); |
| 154 | |
| 155 | void if_linkstate(struct ifnet *); |
| 156 | void if_linkstate_task(void *); |
| 157 | |
| 158 | int if_clone_list(struct if_clonereq *); |
| 159 | struct if_clone *if_clone_lookup(const char *, int *); |
| 160 | |
| 161 | int if_group_egress_build(void); |
| 162 | |
| 163 | void if_watchdog_task(void *); |
| 164 | |
| 165 | void if_netisr(void *); |
| 166 | |
| 167 | #ifdef DDB1 |
| 168 | void ifa_print_all(void); |
| 169 | #endif |
| 170 | |
| 171 | void if_qstart_compat(struct ifqueue *); |
| 172 | |
| 173 | /* |
| 174 | * interface index map |
| 175 | * |
| 176 | * the kernel maintains a mapping of interface indexes to struct ifnet |
| 177 | * pointers. |
| 178 | * |
| 179 | * the map is an array of struct ifnet pointers prefixed by an if_map |
| 180 | * structure. the if_map structure stores the length of its array. |
| 181 | * |
| 182 | * as interfaces are attached to the system, the map is grown on demand |
| 183 | * up to USHRT_MAX entries. |
| 184 | * |
| 185 | * interface index 0 is reserved and represents no interface. this |
| 186 | * supports the use of the interface index as the scope for IPv6 link |
| 187 | * local addresses, where scope 0 means no scope has been specified. |
| 188 | * it also supports the use of interface index as the unique identifier |
| 189 | * for network interfaces in SNMP applications as per RFC2863. therefore |
| 190 | * if_get(0) returns NULL. |
| 191 | */ |
| 192 | |
| 193 | void if_ifp_dtor(void *, void *); |
| 194 | void if_map_dtor(void *, void *); |
| 195 | struct ifnet *if_ref(struct ifnet *); |
| 196 | |
| 197 | /* |
| 198 | * struct if_map |
| 199 | * |
| 200 | * bounded array of ifnet srp pointers used to fetch references of live |
| 201 | * interfaces with if_get(). |
| 202 | */ |
| 203 | |
| 204 | struct if_map { |
| 205 | unsigned long limit; |
| 206 | /* followed by limit ifnet srp pointers */ |
| 207 | }; |
| 208 | |
| 209 | /* |
| 210 | * struct if_idxmap |
| 211 | * |
| 212 | * infrastructure to manage updates and accesses to the current if_map. |
| 213 | */ |
| 214 | |
| 215 | struct if_idxmap { |
| 216 | unsigned int serial; |
| 217 | unsigned int count; |
| 218 | struct srp map; |
| 219 | }; |
| 220 | |
| 221 | void if_idxmap_init(unsigned int); |
| 222 | void if_idxmap_insert(struct ifnet *); |
| 223 | void if_idxmap_remove(struct ifnet *); |
| 224 | |
| 225 | TAILQ_HEAD(, ifg_group)struct { struct ifg_group *tqh_first; struct ifg_group **tqh_last ; } ifg_head = TAILQ_HEAD_INITIALIZER(ifg_head){ ((void *)0), &(ifg_head).tqh_first }; |
| 226 | |
| 227 | LIST_HEAD(, if_clone)struct { struct if_clone *lh_first; } if_cloners = LIST_HEAD_INITIALIZER(if_cloners){ ((void *)0) }; |
| 228 | int if_cloners_count; |
| 229 | |
| 230 | struct rwlock if_cloners_lock = RWLOCK_INITIALIZER("clonelk"){ 0, "clonelk" }; |
| 231 | |
| 232 | /* hooks should only be added, deleted, and run from a process context */ |
| 233 | struct mutex if_hooks_mtx = MUTEX_INITIALIZER(IPL_NONE){ ((void *)0), ((((0x0)) > 0x0 && ((0x0)) < 0x9 ) ? 0x9 : ((0x0))), 0x0 }; |
| 234 | void if_hooks_run(struct task_list *); |
| 235 | |
| 236 | int ifq_congestion; |
| 237 | |
| 238 | int netisr; |
| 239 | |
| 240 | #define NET_TASKQ1 1 |
| 241 | struct taskq *nettqmp[NET_TASKQ1]; |
| 242 | |
| 243 | struct task if_input_task_locked = TASK_INITIALIZER(if_netisr, NULL){{ ((void *)0), ((void *)0) }, (if_netisr), (((void *)0)), 0 }; |
| 244 | |
| 245 | /* |
| 246 | * Serialize socket operations to ensure no new sleeping points |
| 247 | * are introduced in IP output paths. |
| 248 | */ |
| 249 | struct rwlock netlock = RWLOCK_INITIALIZER("netlock"){ 0, "netlock" }; |
| 250 | |
| 251 | /* |
| 252 | * Network interface utility routines. |
| 253 | */ |
| 254 | void |
| 255 | ifinit(void) |
| 256 | { |
| 257 | unsigned int i; |
| 258 | |
| 259 | /* |
| 260 | * most machines boot with 4 or 5 interfaces, so size the initial map |
| 261 | * to accommodate this |
| 262 | */ |
| 263 | if_idxmap_init(8); |
| 264 | |
| 265 | for (i = 0; i < NET_TASKQ1; i++) { |
| 266 | nettqmp[i] = taskq_create("softnet", 1, IPL_NET0x7, TASKQ_MPSAFE(1 << 0)); |
| 267 | if (nettqmp[i] == NULL((void *)0)) |
| 268 | panic("unable to create network taskq %d", i); |
| 269 | } |
| 270 | } |
| 271 | |
| 272 | static struct if_idxmap if_idxmap = { |
| 273 | 0, |
| 274 | 0, |
| 275 | SRP_INITIALIZER(){ ((void *)0) } |
| 276 | }; |
| 277 | |
| 278 | struct srp_gc if_ifp_gc = SRP_GC_INITIALIZER(if_ifp_dtor, NULL){ (if_ifp_dtor), (((void *)0)), { .refs = 1 } }; |
| 279 | struct srp_gc if_map_gc = SRP_GC_INITIALIZER(if_map_dtor, NULL){ (if_map_dtor), (((void *)0)), { .refs = 1 } }; |
| 280 | |
| 281 | struct ifnet_head ifnet = TAILQ_HEAD_INITIALIZER(ifnet){ ((void *)0), &(ifnet).tqh_first }; |
| 282 | |
| 283 | void |
| 284 | if_idxmap_init(unsigned int limit) |
| 285 | { |
| 286 | struct if_map *if_map; |
| 287 | struct srp *map; |
| 288 | unsigned int i; |
| 289 | |
| 290 | if_idxmap.serial = 1; /* skip ifidx 0 so it can return NULL */ |
| 291 | |
| 292 | if_map = malloc(sizeof(*if_map) + limit * sizeof(*map), |
| 293 | M_IFADDR9, M_WAITOK0x0001); |
| 294 | |
| 295 | if_map->limit = limit; |
| 296 | map = (struct srp *)(if_map + 1); |
| 297 | for (i = 0; i < limit; i++) |
| 298 | srp_init(&map[i]); |
| 299 | |
| 300 | /* this is called early so there's nothing to race with */ |
| 301 | srp_update_locked(&if_map_gc, &if_idxmap.map, if_map); |
| 302 | } |
| 303 | |
| 304 | void |
| 305 | if_idxmap_insert(struct ifnet *ifp) |
| 306 | { |
| 307 | struct if_map *if_map; |
| 308 | struct srp *map; |
| 309 | unsigned int index, i; |
| 310 | |
| 311 | refcnt_init(&ifp->if_refcnt); |
| 312 | |
| 313 | /* the kernel lock guarantees serialised modifications to if_idxmap */ |
| 314 | KERNEL_ASSERT_LOCKED()((_kernel_lock_held()) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c" , 314, "_kernel_lock_held()")); |
| 315 | |
| 316 | if (++if_idxmap.count > USHRT_MAX0xffff) |
| 317 | panic("too many interfaces"); |
| 318 | |
| 319 | if_map = srp_get_locked(&if_idxmap.map); |
| 320 | map = (struct srp *)(if_map + 1); |
| 321 | |
| 322 | index = if_idxmap.serial++ & USHRT_MAX0xffff; |
| 323 | |
| 324 | if (index >= if_map->limit) { |
| 325 | struct if_map *nif_map; |
| 326 | struct srp *nmap; |
| 327 | unsigned int nlimit; |
| 328 | struct ifnet *nifp; |
| 329 | |
| 330 | nlimit = if_map->limit * 2; |
| 331 | nif_map = malloc(sizeof(*nif_map) + nlimit * sizeof(*nmap), |
| 332 | M_IFADDR9, M_WAITOK0x0001); |
| 333 | nmap = (struct srp *)(nif_map + 1); |
| 334 | |
| 335 | nif_map->limit = nlimit; |
| 336 | for (i = 0; i < if_map->limit; i++) { |
| 337 | srp_init(&nmap[i]); |
| 338 | nifp = srp_get_locked(&map[i]); |
| 339 | if (nifp != NULL((void *)0)) { |
| 340 | srp_update_locked(&if_ifp_gc, &nmap[i], |
| 341 | if_ref(nifp)); |
| 342 | } |
| 343 | } |
| 344 | |
| 345 | while (i < nlimit) { |
| 346 | srp_init(&nmap[i]); |
| 347 | i++; |
| 348 | } |
| 349 | |
| 350 | srp_update_locked(&if_map_gc, &if_idxmap.map, nif_map); |
| 351 | if_map = nif_map; |
Value stored to 'if_map' is never read | |
| 352 | map = nmap; |
| 353 | } |
| 354 | |
| 355 | /* pick the next free index */ |
| 356 | for (i = 0; i < USHRT_MAX0xffff; i++) { |
| 357 | if (index != 0 && srp_get_locked(&map[index]) == NULL((void *)0)) |
| 358 | break; |
| 359 | |
| 360 | index = if_idxmap.serial++ & USHRT_MAX0xffff; |
| 361 | } |
| 362 | |
| 363 | /* commit */ |
| 364 | ifp->if_index = index; |
| 365 | srp_update_locked(&if_ifp_gc, &map[index], if_ref(ifp)); |
| 366 | } |
| 367 | |
| 368 | void |
| 369 | if_idxmap_remove(struct ifnet *ifp) |
| 370 | { |
| 371 | struct if_map *if_map; |
| 372 | struct srp *map; |
| 373 | unsigned int index; |
| 374 | |
| 375 | index = ifp->if_index; |
| 376 | |
| 377 | /* the kernel lock guarantees serialised modifications to if_idxmap */ |
| 378 | KERNEL_ASSERT_LOCKED()((_kernel_lock_held()) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c" , 378, "_kernel_lock_held()")); |
| 379 | |
| 380 | if_map = srp_get_locked(&if_idxmap.map); |
| 381 | KASSERT(index < if_map->limit)((index < if_map->limit) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/if.c", 381, "index < if_map->limit" )); |
| 382 | |
| 383 | map = (struct srp *)(if_map + 1); |
| 384 | KASSERT(ifp == (struct ifnet *)srp_get_locked(&map[index]))((ifp == (struct ifnet *)srp_get_locked(&map[index])) ? ( void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c", 384 , "ifp == (struct ifnet *)srp_get_locked(&map[index])")); |
| 385 | |
| 386 | srp_update_locked(&if_ifp_gc, &map[index], NULL((void *)0)); |
| 387 | if_idxmap.count--; |
| 388 | /* end of if_idxmap modifications */ |
| 389 | } |
| 390 | |
| 391 | void |
| 392 | if_ifp_dtor(void *null, void *ifp) |
| 393 | { |
| 394 | if_put(ifp); |
| 395 | } |
| 396 | |
| 397 | void |
| 398 | if_map_dtor(void *null, void *m) |
| 399 | { |
| 400 | struct if_map *if_map = m; |
| 401 | struct srp *map = (struct srp *)(if_map + 1); |
| 402 | unsigned int i; |
| 403 | |
| 404 | /* |
| 405 | * dont need to serialize the use of update_locked since this is |
| 406 | * the last reference to this map. there's nothing to race against. |
| 407 | */ |
| 408 | for (i = 0; i < if_map->limit; i++) |
| 409 | srp_update_locked(&if_ifp_gc, &map[i], NULL((void *)0)); |
| 410 | |
| 411 | free(if_map, M_IFADDR9, sizeof(*if_map) + if_map->limit * sizeof(*map)); |
| 412 | } |
| 413 | |
| 414 | /* |
| 415 | * Attach an interface to the |
| 416 | * list of "active" interfaces. |
| 417 | */ |
| 418 | void |
| 419 | if_attachsetup(struct ifnet *ifp) |
| 420 | { |
| 421 | unsigned long ifidx; |
| 422 | |
| 423 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
| 424 | |
| 425 | if_addgroup(ifp, IFG_ALL"all"); |
| 426 | |
| 427 | if_attachdomain(ifp); |
| 428 | #if NPF1 > 0 |
| 429 | pfi_attach_ifnet(ifp); |
| 430 | #endif |
| 431 | |
| 432 | timeout_set(&ifp->if_slowtimo, if_slowtimo, ifp); |
| 433 | if_slowtimo(ifp); |
| 434 | |
| 435 | if_idxmap_insert(ifp); |
| 436 | KASSERT(if_get(0) == NULL)((if_get(0) == ((void *)0)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/if.c", 436, "if_get(0) == NULL")); |
| 437 | |
| 438 | ifidx = ifp->if_index; |
| 439 | |
| 440 | task_set(&ifp->if_watchdogtask, if_watchdog_task, (void *)ifidx); |
| 441 | task_set(&ifp->if_linkstatetask, if_linkstate_task, (void *)ifidx); |
| 442 | |
| 443 | /* Announce the interface. */ |
| 444 | rtm_ifannounce(ifp, IFAN_ARRIVAL0); |
| 445 | } |
| 446 | |
| 447 | /* |
| 448 | * Allocate the link level name for the specified interface. This |
| 449 | * is an attachment helper. It must be called after ifp->if_addrlen |
| 450 | * is initialized, which may not be the case when if_attach() is |
| 451 | * called. |
| 452 | */ |
| 453 | void |
| 454 | if_alloc_sadl(struct ifnet *ifp) |
| 455 | { |
| 456 | unsigned int socksize; |
| 457 | int namelen, masklen; |
| 458 | struct sockaddr_dl *sdl; |
| 459 | |
| 460 | /* |
| 461 | * If the interface already has a link name, release it |
| 462 | * now. This is useful for interfaces that can change |
| 463 | * link types, and thus switch link names often. |
| 464 | */ |
| 465 | if_free_sadl(ifp); |
| 466 | |
| 467 | namelen = strlen(ifp->if_xname); |
| 468 | masklen = offsetof(struct sockaddr_dl, sdl_data[0])__builtin_offsetof(struct sockaddr_dl, sdl_data[0]) + namelen; |
| 469 | socksize = masklen + ifp->if_addrlenif_data.ifi_addrlen; |
| 470 | #define ROUNDUP(a)(1 + (((a) - 1) | (sizeof(long) - 1))) (1 + (((a) - 1) | (sizeof(long) - 1))) |
| 471 | if (socksize < sizeof(*sdl)) |
| 472 | socksize = sizeof(*sdl); |
| 473 | socksize = ROUNDUP(socksize)(1 + (((socksize) - 1) | (sizeof(long) - 1))); |
| 474 | sdl = malloc(socksize, M_IFADDR9, M_WAITOK0x0001|M_ZERO0x0008); |
| 475 | sdl->sdl_len = socksize; |
| 476 | sdl->sdl_family = AF_LINK18; |
| 477 | bcopy(ifp->if_xname, sdl->sdl_data, namelen); |
| 478 | sdl->sdl_nlen = namelen; |
| 479 | sdl->sdl_alen = ifp->if_addrlenif_data.ifi_addrlen; |
| 480 | sdl->sdl_index = ifp->if_index; |
| 481 | sdl->sdl_type = ifp->if_typeif_data.ifi_type; |
| 482 | ifp->if_sadl = sdl; |
| 483 | } |
| 484 | |
| 485 | /* |
| 486 | * Free the link level name for the specified interface. This is |
| 487 | * a detach helper. This is called from if_detach() or from |
| 488 | * link layer type specific detach functions. |
| 489 | */ |
| 490 | void |
| 491 | if_free_sadl(struct ifnet *ifp) |
| 492 | { |
| 493 | if (ifp->if_sadl == NULL((void *)0)) |
| 494 | return; |
| 495 | |
| 496 | free(ifp->if_sadl, M_IFADDR9, ifp->if_sadl->sdl_len); |
| 497 | ifp->if_sadl = NULL((void *)0); |
| 498 | } |
| 499 | |
| 500 | void |
| 501 | if_attachdomain(struct ifnet *ifp) |
| 502 | { |
| 503 | const struct domain *dp; |
| 504 | int i, s; |
| 505 | |
| 506 | s = splnet()splraise(0x7); |
| 507 | |
| 508 | /* address family dependent data region */ |
| 509 | bzero(ifp->if_afdata, sizeof(ifp->if_afdata))__builtin_bzero((ifp->if_afdata), (sizeof(ifp->if_afdata ))); |
| 510 | for (i = 0; (dp = domains[i]) != NULL((void *)0); i++) { |
| 511 | if (dp->dom_ifattach) |
| 512 | ifp->if_afdata[dp->dom_family] = |
| 513 | (*dp->dom_ifattach)(ifp); |
| 514 | } |
| 515 | |
| 516 | splx(s)spllower(s); |
| 517 | } |
| 518 | |
| 519 | void |
| 520 | if_attachhead(struct ifnet *ifp) |
| 521 | { |
| 522 | if_attach_common(ifp); |
| 523 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 524 | TAILQ_INSERT_HEAD(&ifnet, ifp, if_list)do { if (((ifp)->if_list.tqe_next = (&ifnet)->tqh_first ) != ((void *)0)) (&ifnet)->tqh_first->if_list.tqe_prev = &(ifp)->if_list.tqe_next; else (&ifnet)->tqh_last = &(ifp)->if_list.tqe_next; (&ifnet)->tqh_first = (ifp); (ifp)->if_list.tqe_prev = &(&ifnet)-> tqh_first; } while (0); |
| 525 | if_attachsetup(ifp); |
| 526 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 527 | } |
| 528 | |
| 529 | void |
| 530 | if_attach(struct ifnet *ifp) |
| 531 | { |
| 532 | if_attach_common(ifp); |
| 533 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 534 | TAILQ_INSERT_TAIL(&ifnet, ifp, if_list)do { (ifp)->if_list.tqe_next = ((void *)0); (ifp)->if_list .tqe_prev = (&ifnet)->tqh_last; *(&ifnet)->tqh_last = (ifp); (&ifnet)->tqh_last = &(ifp)->if_list. tqe_next; } while (0); |
| 535 | if_attachsetup(ifp); |
| 536 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 537 | } |
| 538 | |
| 539 | void |
| 540 | if_attach_queues(struct ifnet *ifp, unsigned int nqs) |
| 541 | { |
| 542 | struct ifqueue **map; |
| 543 | struct ifqueue *ifq; |
| 544 | int i; |
| 545 | |
| 546 | KASSERT(ifp->if_ifqs == ifp->if_snd.ifq_ifqs)((ifp->if_ifqs == ifp->if_snd._ifq_ptr._ifq_ifqs) ? (void )0 : __assert("diagnostic ", "/usr/src/sys/net/if.c", 546, "ifp->if_ifqs == ifp->if_snd.ifq_ifqs" )); |
| 547 | KASSERT(nqs != 0)((nqs != 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c" , 547, "nqs != 0")); |
| 548 | |
| 549 | map = mallocarray(sizeof(*map), nqs, M_DEVBUF2, M_WAITOK0x0001); |
| 550 | |
| 551 | ifp->if_snd.ifq_softc_ifq_ptr._ifq_softc = NULL((void *)0); |
| 552 | map[0] = &ifp->if_snd; |
| 553 | |
| 554 | for (i = 1; i < nqs; i++) { |
| 555 | ifq = malloc(sizeof(*ifq), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008); |
| 556 | ifq_set_maxlen(ifq, ifp->if_snd.ifq_maxlen)((ifq)->ifq_maxlen = (ifp->if_snd.ifq_maxlen)); |
| 557 | ifq_init(ifq, ifp, i); |
| 558 | map[i] = ifq; |
| 559 | } |
| 560 | |
| 561 | ifp->if_ifqs = map; |
| 562 | ifp->if_nifqs = nqs; |
| 563 | } |
| 564 | |
| 565 | void |
| 566 | if_attach_iqueues(struct ifnet *ifp, unsigned int niqs) |
| 567 | { |
| 568 | struct ifiqueue **map; |
| 569 | struct ifiqueue *ifiq; |
| 570 | unsigned int i; |
| 571 | |
| 572 | KASSERT(niqs != 0)((niqs != 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c" , 572, "niqs != 0")); |
| 573 | |
| 574 | map = mallocarray(niqs, sizeof(*map), M_DEVBUF2, M_WAITOK0x0001); |
| 575 | |
| 576 | ifp->if_rcv.ifiq_softc_ifiq_ptr._ifiq_softc = NULL((void *)0); |
| 577 | map[0] = &ifp->if_rcv; |
| 578 | |
| 579 | for (i = 1; i < niqs; i++) { |
| 580 | ifiq = malloc(sizeof(*ifiq), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008); |
| 581 | ifiq_init(ifiq, ifp, i); |
| 582 | map[i] = ifiq; |
| 583 | } |
| 584 | |
| 585 | ifp->if_iqs = map; |
| 586 | ifp->if_niqs = niqs; |
| 587 | } |
| 588 | |
| 589 | void |
| 590 | if_attach_common(struct ifnet *ifp) |
| 591 | { |
| 592 | KASSERT(ifp->if_ioctl != NULL)((ifp->if_ioctl != ((void *)0)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/if.c", 592, "ifp->if_ioctl != NULL")); |
| 593 | |
| 594 | TAILQ_INIT(&ifp->if_addrlist)do { (&ifp->if_addrlist)->tqh_first = ((void *)0); ( &ifp->if_addrlist)->tqh_last = &(&ifp->if_addrlist )->tqh_first; } while (0); |
| 595 | TAILQ_INIT(&ifp->if_maddrlist)do { (&ifp->if_maddrlist)->tqh_first = ((void *)0); (&ifp->if_maddrlist)->tqh_last = &(&ifp-> if_maddrlist)->tqh_first; } while (0); |
| 596 | TAILQ_INIT(&ifp->if_groups)do { (&ifp->if_groups)->tqh_first = ((void *)0); (& ifp->if_groups)->tqh_last = &(&ifp->if_groups )->tqh_first; } while (0); |
| 597 | |
| 598 | if (!ISSET(ifp->if_xflags, IFXF_MPSAFE)((ifp->if_xflags) & (0x1))) { |
| 599 | KASSERTMSG(ifp->if_qstart == NULL,((ifp->if_qstart == ((void *)0)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d" " " "%s: if_qstart set without MPSAFE set", "diagnostic ", "ifp->if_qstart == NULL" , "/usr/src/sys/net/if.c", 600, ifp->if_xname)) |
| 600 | "%s: if_qstart set without MPSAFE set", ifp->if_xname)((ifp->if_qstart == ((void *)0)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d" " " "%s: if_qstart set without MPSAFE set", "diagnostic ", "ifp->if_qstart == NULL" , "/usr/src/sys/net/if.c", 600, ifp->if_xname)); |
| 601 | ifp->if_qstart = if_qstart_compat; |
| 602 | } else { |
| 603 | KASSERTMSG(ifp->if_start == NULL,((ifp->if_start == ((void *)0)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d" " " "%s: if_start set with MPSAFE set", "diagnostic ", "ifp->if_start == NULL" , "/usr/src/sys/net/if.c", 604, ifp->if_xname)) |
| 604 | "%s: if_start set with MPSAFE set", ifp->if_xname)((ifp->if_start == ((void *)0)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d" " " "%s: if_start set with MPSAFE set", "diagnostic ", "ifp->if_start == NULL" , "/usr/src/sys/net/if.c", 604, ifp->if_xname)); |
| 605 | KASSERTMSG(ifp->if_qstart != NULL,((ifp->if_qstart != ((void *)0)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d" " " "%s: if_qstart not set with MPSAFE set", "diagnostic ", "ifp->if_qstart != NULL" , "/usr/src/sys/net/if.c", 606, ifp->if_xname)) |
| 606 | "%s: if_qstart not set with MPSAFE set", ifp->if_xname)((ifp->if_qstart != ((void *)0)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d" " " "%s: if_qstart not set with MPSAFE set", "diagnostic ", "ifp->if_qstart != NULL" , "/usr/src/sys/net/if.c", 606, ifp->if_xname)); |
| 607 | } |
| 608 | |
| 609 | ifq_init(&ifp->if_snd, ifp, 0); |
| 610 | |
| 611 | ifp->if_snd.ifq_ifqs_ifq_ptr._ifq_ifqs[0] = &ifp->if_snd; |
| 612 | ifp->if_ifqs = ifp->if_snd.ifq_ifqs_ifq_ptr._ifq_ifqs; |
| 613 | ifp->if_nifqs = 1; |
| 614 | if (ifp->if_txmit == 0) |
| 615 | ifp->if_txmit = IF_TXMIT_DEFAULT16; |
| 616 | |
| 617 | ifiq_init(&ifp->if_rcv, ifp, 0); |
| 618 | |
| 619 | ifp->if_rcv.ifiq_ifiqs_ifiq_ptr._ifiq_ifiqs[0] = &ifp->if_rcv; |
| 620 | ifp->if_iqs = ifp->if_rcv.ifiq_ifiqs_ifiq_ptr._ifiq_ifiqs; |
| 621 | ifp->if_niqs = 1; |
| 622 | |
| 623 | TAILQ_INIT(&ifp->if_addrhooks)do { (&ifp->if_addrhooks)->tqh_first = ((void *)0); (&ifp->if_addrhooks)->tqh_last = &(&ifp-> if_addrhooks)->tqh_first; } while (0); |
| 624 | TAILQ_INIT(&ifp->if_linkstatehooks)do { (&ifp->if_linkstatehooks)->tqh_first = ((void * )0); (&ifp->if_linkstatehooks)->tqh_last = &(& ifp->if_linkstatehooks)->tqh_first; } while (0); |
| 625 | TAILQ_INIT(&ifp->if_detachhooks)do { (&ifp->if_detachhooks)->tqh_first = ((void *)0 ); (&ifp->if_detachhooks)->tqh_last = &(&ifp ->if_detachhooks)->tqh_first; } while (0); |
| 626 | |
| 627 | if (ifp->if_rtrequest == NULL((void *)0)) |
| 628 | ifp->if_rtrequest = if_rtrequest_dummy; |
| 629 | if (ifp->if_enqueue == NULL((void *)0)) |
| 630 | ifp->if_enqueue = if_enqueue_ifq; |
| 631 | #if NBPFILTER1 > 0 |
| 632 | if (ifp->if_bpf_mtap == NULL((void *)0)) |
| 633 | ifp->if_bpf_mtap = bpf_mtap_ether; |
| 634 | #endif |
| 635 | ifp->if_llprio = IFQ_DEFPRIO3; |
| 636 | } |
| 637 | |
| 638 | void |
| 639 | if_attach_ifq(struct ifnet *ifp, const struct ifq_ops *newops, void *args) |
| 640 | { |
| 641 | /* |
| 642 | * only switch the ifq_ops on the first ifq on an interface. |
| 643 | * |
| 644 | * the only ifq_ops we provide priq and hfsc, and hfsc only |
| 645 | * works on a single ifq. because the code uses the ifq_ops |
| 646 | * on the first ifq (if_snd) to select a queue for an mbuf, |
| 647 | * by switching only the first one we change both the algorithm |
| 648 | * and force the routing of all new packets to it. |
| 649 | */ |
| 650 | ifq_attach(&ifp->if_snd, newops, args); |
| 651 | } |
| 652 | |
| 653 | void |
| 654 | if_start(struct ifnet *ifp) |
| 655 | { |
| 656 | KASSERT(ifp->if_qstart == if_qstart_compat)((ifp->if_qstart == if_qstart_compat) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/net/if.c", 656, "ifp->if_qstart == if_qstart_compat" )); |
| 657 | if_qstart_compat(&ifp->if_snd); |
| 658 | } |
| 659 | void |
| 660 | if_qstart_compat(struct ifqueue *ifq) |
| 661 | { |
| 662 | struct ifnet *ifp = ifq->ifq_if; |
| 663 | int s; |
| 664 | |
| 665 | /* |
| 666 | * the stack assumes that an interface can have multiple |
| 667 | * transmit rings, but a lot of drivers are still written |
| 668 | * so that interfaces and send rings have a 1:1 mapping. |
| 669 | * this provides compatibility between the stack and the older |
| 670 | * drivers by translating from the only queue they have |
| 671 | * (ifp->if_snd) back to the interface and calling if_start. |
| 672 | */ |
| 673 | |
| 674 | KERNEL_LOCK()_kernel_lock(); |
| 675 | s = splnet()splraise(0x7); |
| 676 | (*ifp->if_start)(ifp); |
| 677 | splx(s)spllower(s); |
| 678 | KERNEL_UNLOCK()_kernel_unlock(); |
| 679 | } |
| 680 | |
| 681 | int |
| 682 | if_enqueue(struct ifnet *ifp, struct mbuf *m) |
| 683 | { |
| 684 | CLR(m->m_pkthdr.csum_flags, M_TIMESTAMP)((m->M_dat.MH.MH_pkthdr.csum_flags) &= ~(0x2000)); |
| 685 | |
| 686 | #if NPF1 > 0 |
| 687 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.pf.delay > 0) |
| 688 | return (pf_delay_pkt(m, ifp->if_index)); |
| 689 | #endif |
| 690 | |
| 691 | #if NBRIDGE1 > 0 |
| 692 | if (ifp->if_bridgeidx && (m->m_flagsm_hdr.mh_flags & M_PROTO10x0010) == 0) { |
| 693 | int error; |
| 694 | |
| 695 | error = bridge_enqueue(ifp, m); |
| 696 | return (error); |
| 697 | } |
| 698 | #endif |
| 699 | |
| 700 | #if NPF1 > 0 |
| 701 | pf_pkt_addr_changed(m); |
| 702 | #endif /* NPF > 0 */ |
| 703 | |
| 704 | return ((*ifp->if_enqueue)(ifp, m)); |
| 705 | } |
| 706 | |
| 707 | int |
| 708 | if_enqueue_ifq(struct ifnet *ifp, struct mbuf *m) |
| 709 | { |
| 710 | struct ifqueue *ifq = &ifp->if_snd; |
| 711 | int error; |
| 712 | |
| 713 | if (ifp->if_nifqs > 1) { |
| 714 | unsigned int idx; |
| 715 | |
| 716 | /* |
| 717 | * use the operations on the first ifq to pick which of |
| 718 | * the array gets this mbuf. |
| 719 | */ |
| 720 | |
| 721 | idx = ifq_idx(&ifp->if_snd, ifp->if_nifqs, m); |
| 722 | ifq = ifp->if_ifqs[idx]; |
| 723 | } |
| 724 | |
| 725 | error = ifq_enqueue(ifq, m); |
| 726 | if (error) |
| 727 | return (error); |
| 728 | |
| 729 | ifq_start(ifq); |
| 730 | |
| 731 | return (0); |
| 732 | } |
| 733 | |
| 734 | void |
| 735 | if_input(struct ifnet *ifp, struct mbuf_list *ml) |
| 736 | { |
| 737 | ifiq_input(&ifp->if_rcv, ml); |
| 738 | } |
| 739 | |
| 740 | int |
| 741 | if_input_local(struct ifnet *ifp, struct mbuf *m, sa_family_t af) |
| 742 | { |
| 743 | int keepflags; |
| 744 | |
| 745 | #if NBPFILTER1 > 0 |
| 746 | /* |
| 747 | * Only send packets to bpf if they are destined to local |
| 748 | * addresses. |
| 749 | * |
| 750 | * if_input_local() is also called for SIMPLEX interfaces to |
| 751 | * duplicate packets for local use. But don't dup them to bpf. |
| 752 | */ |
| 753 | if (ifp->if_flags & IFF_LOOPBACK0x8) { |
| 754 | caddr_t if_bpf = ifp->if_bpf; |
| 755 | |
| 756 | if (if_bpf) |
| 757 | bpf_mtap_af(if_bpf, af, m, BPF_DIRECTION_OUT(1 << 1)); |
| 758 | } |
| 759 | #endif |
| 760 | keepflags = m->m_flagsm_hdr.mh_flags & (M_BCAST0x0100|M_MCAST0x0200); |
| 761 | m_resethdr(m); |
| 762 | m->m_flagsm_hdr.mh_flags |= M_LOOP0x0040 | keepflags; |
| 763 | m->m_pkthdrM_dat.MH.MH_pkthdr.ph_ifidx = ifp->if_index; |
| 764 | m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = ifp->if_rdomainif_data.ifi_rdomain; |
| 765 | |
| 766 | ifp->if_opacketsif_data.ifi_opackets++; |
| 767 | ifp->if_obytesif_data.ifi_obytes += m->m_pkthdrM_dat.MH.MH_pkthdr.len; |
| 768 | |
| 769 | ifp->if_ipacketsif_data.ifi_ipackets++; |
| 770 | ifp->if_ibytesif_data.ifi_ibytes += m->m_pkthdrM_dat.MH.MH_pkthdr.len; |
| 771 | |
| 772 | switch (af) { |
| 773 | case AF_INET2: |
| 774 | ipv4_input(ifp, m); |
| 775 | break; |
| 776 | #ifdef INET61 |
| 777 | case AF_INET624: |
| 778 | ipv6_input(ifp, m); |
| 779 | break; |
| 780 | #endif /* INET6 */ |
| 781 | #ifdef MPLS1 |
| 782 | case AF_MPLS33: |
| 783 | mpls_input(ifp, m); |
| 784 | break; |
| 785 | #endif /* MPLS */ |
| 786 | default: |
| 787 | printf("%s: can't handle af%d\n", ifp->if_xname, af); |
| 788 | m_freem(m); |
| 789 | return (EAFNOSUPPORT47); |
| 790 | } |
| 791 | |
| 792 | return (0); |
| 793 | } |
| 794 | |
| 795 | int |
| 796 | if_output_local(struct ifnet *ifp, struct mbuf *m, sa_family_t af) |
| 797 | { |
| 798 | struct ifiqueue *ifiq; |
| 799 | unsigned int flow = 0; |
| 800 | |
| 801 | m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family = af; |
| 802 | m->m_pkthdrM_dat.MH.MH_pkthdr.ph_ifidx = ifp->if_index; |
| 803 | m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = ifp->if_rdomainif_data.ifi_rdomain; |
| 804 | |
| 805 | if (ISSET(m->m_pkthdr.csum_flags, M_FLOWID)((m->M_dat.MH.MH_pkthdr.csum_flags) & (0x4000))) |
| 806 | flow = m->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid; |
| 807 | |
| 808 | ifiq = ifp->if_iqs[flow % ifp->if_niqs]; |
| 809 | |
| 810 | return (ifiq_enqueue(ifiq, m) == 0 ? 0 : ENOBUFS55); |
| 811 | } |
| 812 | |
| 813 | void |
| 814 | if_input_process(struct ifnet *ifp, struct mbuf_list *ml) |
| 815 | { |
| 816 | struct mbuf *m; |
| 817 | |
| 818 | if (ml_empty(ml)((ml)->ml_len == 0)) |
| 819 | return; |
| 820 | |
| 821 | if (!ISSET(ifp->if_xflags, IFXF_CLONED)((ifp->if_xflags) & (0x2))) |
| 822 | enqueue_randomness(ml_len(ml)((ml)->ml_len) ^ (uintptr_t)MBUF_LIST_FIRST(ml)((ml)->ml_head)); |
| 823 | |
| 824 | /* |
| 825 | * We grab the NET_LOCK() before processing any packet to |
| 826 | * ensure there's no contention on the routing table lock. |
| 827 | * |
| 828 | * Without it we could race with a userland thread to insert |
| 829 | * a L2 entry in ip{6,}_output(). Such race would result in |
| 830 | * one of the threads sleeping *inside* the IP output path. |
| 831 | * |
| 832 | * Since we have a NET_LOCK() we also use it to serialize access |
| 833 | * to PF globals, pipex globals, unicast and multicast addresses |
| 834 | * lists and the socket layer. |
| 835 | */ |
| 836 | |
| 837 | /* |
| 838 | * XXXSMP IPsec data structures are not ready to be accessed |
| 839 | * by multiple network threads in parallel. In this case |
| 840 | * use an exclusive lock. |
| 841 | */ |
| 842 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 843 | while ((m = ml_dequeue(ml)) != NULL((void *)0)) |
| 844 | (*ifp->if_input)(ifp, m); |
| 845 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 846 | } |
| 847 | |
| 848 | void |
| 849 | if_vinput(struct ifnet *ifp, struct mbuf *m) |
| 850 | { |
| 851 | #if NBPFILTER1 > 0 |
| 852 | caddr_t if_bpf; |
| 853 | #endif |
| 854 | |
| 855 | m->m_pkthdrM_dat.MH.MH_pkthdr.ph_ifidx = ifp->if_index; |
| 856 | m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = ifp->if_rdomainif_data.ifi_rdomain; |
| 857 | |
| 858 | counters_pkt(ifp->if_counters, |
| 859 | ifc_ipackets, ifc_ibytes, m->m_pkthdrM_dat.MH.MH_pkthdr.len); |
| 860 | |
| 861 | #if NPF1 > 0 |
| 862 | pf_pkt_addr_changed(m); |
| 863 | #endif |
| 864 | |
| 865 | #if NBPFILTER1 > 0 |
| 866 | if_bpf = ifp->if_bpf; |
| 867 | if (if_bpf) { |
| 868 | if ((*ifp->if_bpf_mtap)(if_bpf, m, BPF_DIRECTION_IN(1 << 0))) { |
| 869 | m_freem(m); |
| 870 | return; |
| 871 | } |
| 872 | } |
| 873 | #endif |
| 874 | |
| 875 | if (__predict_true(!ISSET(ifp->if_xflags, IFXF_MONITOR))__builtin_expect(((!((ifp->if_xflags) & (0x100))) != 0 ), 1)) |
| 876 | (*ifp->if_input)(ifp, m); |
| 877 | } |
| 878 | |
| 879 | void |
| 880 | if_netisr(void *unused) |
| 881 | { |
| 882 | int n, t = 0; |
| 883 | |
| 884 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 885 | |
| 886 | while ((n = netisr) != 0) { |
| 887 | /* Like sched_pause() but with a rwlock dance. */ |
| 888 | if (curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_schedstate.spc_schedflags & SPCF_SHOULDYIELD0x0002) { |
| 889 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 890 | yield(); |
| 891 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 892 | } |
| 893 | |
| 894 | atomic_clearbits_intx86_atomic_clearbits_u32(&netisr, n); |
| 895 | |
| 896 | #if NETHER1 > 0 |
| 897 | if (n & (1 << NETISR_ARP18)) { |
| 898 | KERNEL_LOCK()_kernel_lock(); |
| 899 | arpintr(); |
| 900 | KERNEL_UNLOCK()_kernel_unlock(); |
| 901 | } |
| 902 | #endif |
| 903 | #if NPPP1 > 0 |
| 904 | if (n & (1 << NETISR_PPP28)) { |
| 905 | KERNEL_LOCK()_kernel_lock(); |
| 906 | pppintr(); |
| 907 | KERNEL_UNLOCK()_kernel_unlock(); |
| 908 | } |
| 909 | #endif |
| 910 | #if NBRIDGE1 > 0 |
| 911 | if (n & (1 << NETISR_BRIDGE29)) |
| 912 | bridgeintr(); |
| 913 | #endif |
| 914 | t |= n; |
| 915 | } |
| 916 | |
| 917 | #if NPFSYNC1 > 0 |
| 918 | if (t & (1 << NETISR_PFSYNC5)) { |
| 919 | KERNEL_LOCK()_kernel_lock(); |
| 920 | pfsyncintr(); |
| 921 | KERNEL_UNLOCK()_kernel_unlock(); |
| 922 | } |
| 923 | #endif |
| 924 | |
| 925 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 926 | } |
| 927 | |
| 928 | void |
| 929 | if_hooks_run(struct task_list *hooks) |
| 930 | { |
| 931 | struct task *t, *nt; |
| 932 | struct task cursor = { .t_func = NULL((void *)0) }; |
| 933 | void (*func)(void *); |
| 934 | void *arg; |
| 935 | |
| 936 | mtx_enter(&if_hooks_mtx); |
| 937 | for (t = TAILQ_FIRST(hooks)((hooks)->tqh_first); t != NULL((void *)0); t = nt) { |
| 938 | if (t->t_func == NULL((void *)0)) { /* skip cursors */ |
| 939 | nt = TAILQ_NEXT(t, t_entry)((t)->t_entry.tqe_next); |
| 940 | continue; |
| 941 | } |
| 942 | func = t->t_func; |
| 943 | arg = t->t_arg; |
| 944 | |
| 945 | TAILQ_INSERT_AFTER(hooks, t, &cursor, t_entry)do { if (((&cursor)->t_entry.tqe_next = (t)->t_entry .tqe_next) != ((void *)0)) (&cursor)->t_entry.tqe_next ->t_entry.tqe_prev = &(&cursor)->t_entry.tqe_next ; else (hooks)->tqh_last = &(&cursor)->t_entry. tqe_next; (t)->t_entry.tqe_next = (&cursor); (&cursor )->t_entry.tqe_prev = &(t)->t_entry.tqe_next; } while (0); |
| 946 | mtx_leave(&if_hooks_mtx); |
| 947 | |
| 948 | (*func)(arg); |
| 949 | |
| 950 | mtx_enter(&if_hooks_mtx); |
| 951 | nt = TAILQ_NEXT(&cursor, t_entry)((&cursor)->t_entry.tqe_next); /* avoid _Q_INVALIDATE */ |
| 952 | TAILQ_REMOVE(hooks, &cursor, t_entry)do { if (((&cursor)->t_entry.tqe_next) != ((void *)0)) (&cursor)->t_entry.tqe_next->t_entry.tqe_prev = (& cursor)->t_entry.tqe_prev; else (hooks)->tqh_last = (& cursor)->t_entry.tqe_prev; *(&cursor)->t_entry.tqe_prev = (&cursor)->t_entry.tqe_next; ((&cursor)->t_entry .tqe_prev) = ((void *)-1); ((&cursor)->t_entry.tqe_next ) = ((void *)-1); } while (0); |
| 953 | } |
| 954 | mtx_leave(&if_hooks_mtx); |
| 955 | } |
| 956 | |
| 957 | void |
| 958 | if_remove(struct ifnet *ifp) |
| 959 | { |
| 960 | /* Remove the interface from the list of all interfaces. */ |
| 961 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 962 | TAILQ_REMOVE(&ifnet, ifp, if_list)do { if (((ifp)->if_list.tqe_next) != ((void *)0)) (ifp)-> if_list.tqe_next->if_list.tqe_prev = (ifp)->if_list.tqe_prev ; else (&ifnet)->tqh_last = (ifp)->if_list.tqe_prev ; *(ifp)->if_list.tqe_prev = (ifp)->if_list.tqe_next; ( (ifp)->if_list.tqe_prev) = ((void *)-1); ((ifp)->if_list .tqe_next) = ((void *)-1); } while (0); |
| 963 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 964 | |
| 965 | /* Remove the interface from the interface index map. */ |
| 966 | if_idxmap_remove(ifp); |
| 967 | |
| 968 | /* Sleep until the last reference is released. */ |
| 969 | refcnt_finalize(&ifp->if_refcnt, "ifrm"); |
| 970 | } |
| 971 | |
| 972 | void |
| 973 | if_deactivate(struct ifnet *ifp) |
| 974 | { |
| 975 | /* |
| 976 | * Call detach hooks from head to tail. To make sure detach |
| 977 | * hooks are executed in the reverse order they were added, all |
| 978 | * the hooks have to be added to the head! |
| 979 | */ |
| 980 | |
| 981 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 982 | if_hooks_run(&ifp->if_detachhooks); |
| 983 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 984 | } |
| 985 | |
| 986 | void |
| 987 | if_detachhook_add(struct ifnet *ifp, struct task *t) |
| 988 | { |
| 989 | mtx_enter(&if_hooks_mtx); |
| 990 | TAILQ_INSERT_HEAD(&ifp->if_detachhooks, t, t_entry)do { if (((t)->t_entry.tqe_next = (&ifp->if_detachhooks )->tqh_first) != ((void *)0)) (&ifp->if_detachhooks )->tqh_first->t_entry.tqe_prev = &(t)->t_entry.tqe_next ; else (&ifp->if_detachhooks)->tqh_last = &(t)-> t_entry.tqe_next; (&ifp->if_detachhooks)->tqh_first = (t); (t)->t_entry.tqe_prev = &(&ifp->if_detachhooks )->tqh_first; } while (0); |
| 991 | mtx_leave(&if_hooks_mtx); |
| 992 | } |
| 993 | |
| 994 | void |
| 995 | if_detachhook_del(struct ifnet *ifp, struct task *t) |
| 996 | { |
| 997 | mtx_enter(&if_hooks_mtx); |
| 998 | TAILQ_REMOVE(&ifp->if_detachhooks, t, t_entry)do { if (((t)->t_entry.tqe_next) != ((void *)0)) (t)->t_entry .tqe_next->t_entry.tqe_prev = (t)->t_entry.tqe_prev; else (&ifp->if_detachhooks)->tqh_last = (t)->t_entry .tqe_prev; *(t)->t_entry.tqe_prev = (t)->t_entry.tqe_next ; ((t)->t_entry.tqe_prev) = ((void *)-1); ((t)->t_entry .tqe_next) = ((void *)-1); } while (0); |
| 999 | mtx_leave(&if_hooks_mtx); |
| 1000 | } |
| 1001 | |
| 1002 | /* |
| 1003 | * Detach an interface from everything in the kernel. Also deallocate |
| 1004 | * private resources. |
| 1005 | */ |
| 1006 | void |
| 1007 | if_detach(struct ifnet *ifp) |
| 1008 | { |
| 1009 | struct ifaddr *ifa; |
| 1010 | struct ifg_list *ifg; |
| 1011 | const struct domain *dp; |
| 1012 | int i, s; |
| 1013 | |
| 1014 | /* Undo pseudo-driver changes. */ |
| 1015 | if_deactivate(ifp); |
| 1016 | |
| 1017 | /* Other CPUs must not have a reference before we start destroying. */ |
| 1018 | if_remove(ifp); |
| 1019 | |
| 1020 | ifq_clr_oactive(&ifp->if_snd); |
| 1021 | |
| 1022 | #if NBPFILTER1 > 0 |
| 1023 | bpfdetach(ifp); |
| 1024 | #endif |
| 1025 | |
| 1026 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 1027 | s = splnet()splraise(0x7); |
| 1028 | ifp->if_qstart = if_detached_qstart; |
| 1029 | ifp->if_ioctl = if_detached_ioctl; |
| 1030 | ifp->if_watchdog = NULL((void *)0); |
| 1031 | |
| 1032 | /* Remove the watchdog timeout & task */ |
| 1033 | timeout_del(&ifp->if_slowtimo); |
| 1034 | task_del(net_tq(ifp->if_index), &ifp->if_watchdogtask); |
| 1035 | |
| 1036 | /* Remove the link state task */ |
| 1037 | task_del(net_tq(ifp->if_index), &ifp->if_linkstatetask); |
| 1038 | |
| 1039 | rti_delete(ifp); |
| 1040 | #if NETHER1 > 0 && defined(NFSCLIENT1) |
| 1041 | if (ifp->if_index == revarp_ifidx) |
| 1042 | revarp_ifidx = 0; |
| 1043 | #endif |
| 1044 | #ifdef MROUTING1 |
| 1045 | vif_delete(ifp); |
| 1046 | #endif |
| 1047 | in_ifdetach(ifp); |
| 1048 | #ifdef INET61 |
| 1049 | in6_ifdetach(ifp); |
| 1050 | #endif |
| 1051 | #if NPF1 > 0 |
| 1052 | pfi_detach_ifnet(ifp); |
| 1053 | #endif |
| 1054 | |
| 1055 | while ((ifg = TAILQ_FIRST(&ifp->if_groups)((&ifp->if_groups)->tqh_first)) != NULL((void *)0)) |
| 1056 | if_delgroup(ifp, ifg->ifgl_group->ifg_group); |
| 1057 | |
| 1058 | if_free_sadl(ifp); |
| 1059 | |
| 1060 | /* We should not have any address left at this point. */ |
| 1061 | if (!TAILQ_EMPTY(&ifp->if_addrlist)(((&ifp->if_addrlist)->tqh_first) == ((void *)0))) { |
| 1062 | #ifdef DIAGNOSTIC1 |
| 1063 | printf("%s: address list non empty\n", ifp->if_xname); |
| 1064 | #endif |
| 1065 | while ((ifa = TAILQ_FIRST(&ifp->if_addrlist)((&ifp->if_addrlist)->tqh_first)) != NULL((void *)0)) { |
| 1066 | ifa_del(ifp, ifa); |
| 1067 | ifa->ifa_ifp = NULL((void *)0); |
| 1068 | ifafree(ifa); |
| 1069 | } |
| 1070 | } |
| 1071 | |
| 1072 | KASSERT(TAILQ_EMPTY(&ifp->if_addrhooks))(((((&ifp->if_addrhooks)->tqh_first) == ((void *)0) )) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c" , 1072, "TAILQ_EMPTY(&ifp->if_addrhooks)")); |
| 1073 | KASSERT(TAILQ_EMPTY(&ifp->if_linkstatehooks))(((((&ifp->if_linkstatehooks)->tqh_first) == ((void *)0))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c" , 1073, "TAILQ_EMPTY(&ifp->if_linkstatehooks)")); |
| 1074 | KASSERT(TAILQ_EMPTY(&ifp->if_detachhooks))(((((&ifp->if_detachhooks)->tqh_first) == ((void *) 0))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c" , 1074, "TAILQ_EMPTY(&ifp->if_detachhooks)")); |
| 1075 | |
| 1076 | for (i = 0; (dp = domains[i]) != NULL((void *)0); i++) { |
| 1077 | if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family]) |
| 1078 | (*dp->dom_ifdetach)(ifp, |
| 1079 | ifp->if_afdata[dp->dom_family]); |
| 1080 | } |
| 1081 | |
| 1082 | /* Announce that the interface is gone. */ |
| 1083 | rtm_ifannounce(ifp, IFAN_DEPARTURE1); |
| 1084 | splx(s)spllower(s); |
| 1085 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 1086 | |
| 1087 | if (ifp->if_counters != NULL((void *)0)) |
| 1088 | if_counters_free(ifp); |
| 1089 | |
| 1090 | for (i = 0; i < ifp->if_nifqs; i++) |
| 1091 | ifq_destroy(ifp->if_ifqs[i]); |
| 1092 | if (ifp->if_ifqs != ifp->if_snd.ifq_ifqs_ifq_ptr._ifq_ifqs) { |
| 1093 | for (i = 1; i < ifp->if_nifqs; i++) { |
| 1094 | free(ifp->if_ifqs[i], M_DEVBUF2, |
| 1095 | sizeof(struct ifqueue)); |
| 1096 | } |
| 1097 | free(ifp->if_ifqs, M_DEVBUF2, |
| 1098 | sizeof(struct ifqueue *) * ifp->if_nifqs); |
| 1099 | } |
| 1100 | |
| 1101 | for (i = 0; i < ifp->if_niqs; i++) |
| 1102 | ifiq_destroy(ifp->if_iqs[i]); |
| 1103 | if (ifp->if_iqs != ifp->if_rcv.ifiq_ifiqs_ifiq_ptr._ifiq_ifiqs) { |
| 1104 | for (i = 1; i < ifp->if_niqs; i++) { |
| 1105 | free(ifp->if_iqs[i], M_DEVBUF2, |
| 1106 | sizeof(struct ifiqueue)); |
| 1107 | } |
| 1108 | free(ifp->if_iqs, M_DEVBUF2, |
| 1109 | sizeof(struct ifiqueue *) * ifp->if_niqs); |
| 1110 | } |
| 1111 | } |
| 1112 | |
| 1113 | /* |
| 1114 | * Returns true if ``ifp0'' is connected to the interface with index ``ifidx''. |
| 1115 | */ |
| 1116 | int |
| 1117 | if_isconnected(const struct ifnet *ifp0, unsigned int ifidx) |
| 1118 | { |
| 1119 | struct ifnet *ifp; |
| 1120 | int connected = 0; |
| 1121 | |
| 1122 | ifp = if_get(ifidx); |
| 1123 | if (ifp == NULL((void *)0)) |
| 1124 | return (0); |
| 1125 | |
| 1126 | if (ifp0->if_index == ifp->if_index) |
| 1127 | connected = 1; |
| 1128 | |
| 1129 | #if NBRIDGE1 > 0 |
| 1130 | if (ifp0->if_bridgeidx != 0 && ifp0->if_bridgeidx == ifp->if_bridgeidx) |
| 1131 | connected = 1; |
| 1132 | #endif |
| 1133 | #if NCARP1 > 0 |
| 1134 | if ((ifp0->if_typeif_data.ifi_type == IFT_CARP0xf7 && |
| 1135 | ifp0->if_carpdevidxif_carp_ptr.carp_idx == ifp->if_index) || |
| 1136 | (ifp->if_typeif_data.ifi_type == IFT_CARP0xf7 && ifp->if_carpdevidxif_carp_ptr.carp_idx == ifp0->if_index)) |
| 1137 | connected = 1; |
| 1138 | #endif |
| 1139 | |
| 1140 | if_put(ifp); |
| 1141 | return (connected); |
| 1142 | } |
| 1143 | |
| 1144 | /* |
| 1145 | * Create a clone network interface. |
| 1146 | */ |
| 1147 | int |
| 1148 | if_clone_create(const char *name, int rdomain) |
| 1149 | { |
| 1150 | struct if_clone *ifc; |
| 1151 | struct ifnet *ifp; |
| 1152 | int unit, ret; |
| 1153 | |
| 1154 | ifc = if_clone_lookup(name, &unit); |
| 1155 | if (ifc == NULL((void *)0)) |
| 1156 | return (EINVAL22); |
| 1157 | |
| 1158 | rw_enter_write(&if_cloners_lock); |
| 1159 | |
| 1160 | if ((ifp = if_unit(name)) != NULL((void *)0)) { |
| 1161 | ret = EEXIST17; |
| 1162 | goto unlock; |
| 1163 | } |
| 1164 | |
| 1165 | ret = (*ifc->ifc_create)(ifc, unit); |
| 1166 | |
| 1167 | if (ret != 0 || (ifp = if_unit(name)) == NULL((void *)0)) |
| 1168 | goto unlock; |
| 1169 | |
| 1170 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 1171 | if_addgroup(ifp, ifc->ifc_name); |
| 1172 | if (rdomain != 0) |
| 1173 | if_setrdomain(ifp, rdomain); |
| 1174 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 1175 | unlock: |
| 1176 | rw_exit_write(&if_cloners_lock); |
| 1177 | if_put(ifp); |
| 1178 | |
| 1179 | return (ret); |
| 1180 | } |
| 1181 | |
| 1182 | /* |
| 1183 | * Destroy a clone network interface. |
| 1184 | */ |
| 1185 | int |
| 1186 | if_clone_destroy(const char *name) |
| 1187 | { |
| 1188 | struct if_clone *ifc; |
| 1189 | struct ifnet *ifp; |
| 1190 | int ret; |
| 1191 | |
| 1192 | ifc = if_clone_lookup(name, NULL((void *)0)); |
| 1193 | if (ifc == NULL((void *)0)) |
| 1194 | return (EINVAL22); |
| 1195 | |
| 1196 | if (ifc->ifc_destroy == NULL((void *)0)) |
| 1197 | return (EOPNOTSUPP45); |
| 1198 | |
| 1199 | rw_enter_write(&if_cloners_lock); |
| 1200 | |
| 1201 | TAILQ_FOREACH(ifp, &ifnet, if_list)for((ifp) = ((&ifnet)->tqh_first); (ifp) != ((void *)0 ); (ifp) = ((ifp)->if_list.tqe_next)) { |
| 1202 | if (strcmp(ifp->if_xname, name) == 0) |
| 1203 | break; |
| 1204 | } |
| 1205 | if (ifp == NULL((void *)0)) { |
| 1206 | rw_exit_write(&if_cloners_lock); |
| 1207 | return (ENXIO6); |
| 1208 | } |
| 1209 | |
| 1210 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 1211 | if (ifp->if_flags & IFF_UP0x1) { |
| 1212 | int s; |
| 1213 | s = splnet()splraise(0x7); |
| 1214 | if_down(ifp); |
| 1215 | splx(s)spllower(s); |
| 1216 | } |
| 1217 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 1218 | ret = (*ifc->ifc_destroy)(ifp); |
| 1219 | |
| 1220 | rw_exit_write(&if_cloners_lock); |
| 1221 | |
| 1222 | return (ret); |
| 1223 | } |
| 1224 | |
| 1225 | /* |
| 1226 | * Look up a network interface cloner. |
| 1227 | */ |
| 1228 | struct if_clone * |
| 1229 | if_clone_lookup(const char *name, int *unitp) |
| 1230 | { |
| 1231 | struct if_clone *ifc; |
| 1232 | const char *cp; |
| 1233 | int unit; |
| 1234 | |
| 1235 | /* separate interface name from unit */ |
| 1236 | for (cp = name; |
| 1237 | cp - name < IFNAMSIZ16 && *cp && (*cp < '0' || *cp > '9'); |
| 1238 | cp++) |
| 1239 | continue; |
| 1240 | |
| 1241 | if (cp == name || cp - name == IFNAMSIZ16 || !*cp) |
| 1242 | return (NULL((void *)0)); /* No name or unit number */ |
| 1243 | |
| 1244 | if (cp - name < IFNAMSIZ16-1 && *cp == '0' && cp[1] != '\0') |
| 1245 | return (NULL((void *)0)); /* unit number 0 padded */ |
| 1246 | |
| 1247 | LIST_FOREACH(ifc, &if_cloners, ifc_list)for((ifc) = ((&if_cloners)->lh_first); (ifc)!= ((void * )0); (ifc) = ((ifc)->ifc_list.le_next)) { |
| 1248 | if (strlen(ifc->ifc_name) == cp - name && |
| 1249 | !strncmp(name, ifc->ifc_name, cp - name)) |
| 1250 | break; |
| 1251 | } |
| 1252 | |
| 1253 | if (ifc == NULL((void *)0)) |
| 1254 | return (NULL((void *)0)); |
| 1255 | |
| 1256 | unit = 0; |
| 1257 | while (cp - name < IFNAMSIZ16 && *cp) { |
| 1258 | if (*cp < '0' || *cp > '9' || |
| 1259 | unit > (INT_MAX0x7fffffff - (*cp - '0')) / 10) { |
| 1260 | /* Bogus unit number. */ |
| 1261 | return (NULL((void *)0)); |
| 1262 | } |
| 1263 | unit = (unit * 10) + (*cp++ - '0'); |
| 1264 | } |
| 1265 | |
| 1266 | if (unitp != NULL((void *)0)) |
| 1267 | *unitp = unit; |
| 1268 | return (ifc); |
| 1269 | } |
| 1270 | |
| 1271 | /* |
| 1272 | * Register a network interface cloner. |
| 1273 | */ |
| 1274 | void |
| 1275 | if_clone_attach(struct if_clone *ifc) |
| 1276 | { |
| 1277 | /* |
| 1278 | * we are called at kernel boot by main(), when pseudo devices are |
| 1279 | * being attached. The main() is the only guy which may alter the |
| 1280 | * if_cloners. While system is running and main() is done with |
| 1281 | * initialization, the if_cloners becomes immutable. |
| 1282 | */ |
| 1283 | KASSERT(pdevinit_done == 0)((pdevinit_done == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c" , 1283, "pdevinit_done == 0")); |
| 1284 | LIST_INSERT_HEAD(&if_cloners, ifc, ifc_list)do { if (((ifc)->ifc_list.le_next = (&if_cloners)-> lh_first) != ((void *)0)) (&if_cloners)->lh_first-> ifc_list.le_prev = &(ifc)->ifc_list.le_next; (&if_cloners )->lh_first = (ifc); (ifc)->ifc_list.le_prev = &(& if_cloners)->lh_first; } while (0); |
| 1285 | if_cloners_count++; |
| 1286 | } |
| 1287 | |
| 1288 | /* |
| 1289 | * Provide list of interface cloners to userspace. |
| 1290 | */ |
| 1291 | int |
| 1292 | if_clone_list(struct if_clonereq *ifcr) |
| 1293 | { |
| 1294 | char outbuf[IFNAMSIZ16], *dst; |
| 1295 | struct if_clone *ifc; |
| 1296 | int count, error = 0; |
| 1297 | |
| 1298 | if ((dst = ifcr->ifcr_buffer) == NULL((void *)0)) { |
| 1299 | /* Just asking how many there are. */ |
| 1300 | ifcr->ifcr_total = if_cloners_count; |
| 1301 | return (0); |
| 1302 | } |
| 1303 | |
| 1304 | if (ifcr->ifcr_count < 0) |
| 1305 | return (EINVAL22); |
| 1306 | |
| 1307 | ifcr->ifcr_total = if_cloners_count; |
| 1308 | count = MIN(if_cloners_count, ifcr->ifcr_count)(((if_cloners_count)<(ifcr->ifcr_count))?(if_cloners_count ):(ifcr->ifcr_count)); |
| 1309 | |
| 1310 | LIST_FOREACH(ifc, &if_cloners, ifc_list)for((ifc) = ((&if_cloners)->lh_first); (ifc)!= ((void * )0); (ifc) = ((ifc)->ifc_list.le_next)) { |
| 1311 | if (count == 0) |
| 1312 | break; |
| 1313 | bzero(outbuf, sizeof outbuf)__builtin_bzero((outbuf), (sizeof outbuf)); |
| 1314 | strlcpy(outbuf, ifc->ifc_name, IFNAMSIZ16); |
| 1315 | error = copyout(outbuf, dst, IFNAMSIZ16); |
| 1316 | if (error) |
| 1317 | break; |
| 1318 | count--; |
| 1319 | dst += IFNAMSIZ16; |
| 1320 | } |
| 1321 | |
| 1322 | return (error); |
| 1323 | } |
| 1324 | |
| 1325 | /* |
| 1326 | * set queue congestion marker |
| 1327 | */ |
| 1328 | void |
| 1329 | if_congestion(void) |
| 1330 | { |
| 1331 | extern int ticks; |
| 1332 | |
| 1333 | ifq_congestion = ticks; |
| 1334 | } |
| 1335 | |
| 1336 | int |
| 1337 | if_congested(void) |
| 1338 | { |
| 1339 | extern int ticks; |
| 1340 | int diff; |
| 1341 | |
| 1342 | diff = ticks - ifq_congestion; |
| 1343 | if (diff < 0) { |
| 1344 | ifq_congestion = ticks - hz; |
| 1345 | return (0); |
| 1346 | } |
| 1347 | |
| 1348 | return (diff <= (hz / 100)); |
| 1349 | } |
| 1350 | |
| 1351 | #define equal(a1, a2)(bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0) \ |
| 1352 | (bcmp((caddr_t)(a1), (caddr_t)(a2), \ |
| 1353 | (a1)->sa_len) == 0) |
| 1354 | |
| 1355 | /* |
| 1356 | * Locate an interface based on a complete address. |
| 1357 | */ |
| 1358 | struct ifaddr * |
| 1359 | ifa_ifwithaddr(struct sockaddr *addr, u_int rtableid) |
| 1360 | { |
| 1361 | struct ifnet *ifp; |
| 1362 | struct ifaddr *ifa; |
| 1363 | u_int rdomain; |
| 1364 | |
| 1365 | rdomain = rtable_l2(rtableid); |
| 1366 | KERNEL_LOCK()_kernel_lock(); |
| 1367 | TAILQ_FOREACH(ifp, &ifnet, if_list)for((ifp) = ((&ifnet)->tqh_first); (ifp) != ((void *)0 ); (ifp) = ((ifp)->if_list.tqe_next)) { |
| 1368 | if (ifp->if_rdomainif_data.ifi_rdomain != rdomain) |
| 1369 | continue; |
| 1370 | |
| 1371 | TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list)for((ifa) = ((&ifp->if_addrlist)->tqh_first); (ifa) != ((void *)0); (ifa) = ((ifa)->ifa_list.tqe_next)) { |
| 1372 | if (ifa->ifa_addr->sa_family != addr->sa_family) |
| 1373 | continue; |
| 1374 | |
| 1375 | if (equal(addr, ifa->ifa_addr)(bcmp((caddr_t)(addr), (caddr_t)(ifa->ifa_addr), (addr)-> sa_len) == 0)) { |
| 1376 | KERNEL_UNLOCK()_kernel_unlock(); |
| 1377 | return (ifa); |
| 1378 | } |
| 1379 | } |
| 1380 | } |
| 1381 | KERNEL_UNLOCK()_kernel_unlock(); |
| 1382 | return (NULL((void *)0)); |
| 1383 | } |
| 1384 | |
| 1385 | /* |
| 1386 | * Locate the point to point interface with a given destination address. |
| 1387 | */ |
| 1388 | struct ifaddr * |
| 1389 | ifa_ifwithdstaddr(struct sockaddr *addr, u_int rdomain) |
| 1390 | { |
| 1391 | struct ifnet *ifp; |
| 1392 | struct ifaddr *ifa; |
| 1393 | |
| 1394 | rdomain = rtable_l2(rdomain); |
| 1395 | KERNEL_LOCK()_kernel_lock(); |
| 1396 | TAILQ_FOREACH(ifp, &ifnet, if_list)for((ifp) = ((&ifnet)->tqh_first); (ifp) != ((void *)0 ); (ifp) = ((ifp)->if_list.tqe_next)) { |
| 1397 | if (ifp->if_rdomainif_data.ifi_rdomain != rdomain) |
| 1398 | continue; |
| 1399 | if (ifp->if_flags & IFF_POINTOPOINT0x10) { |
| 1400 | TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list)for((ifa) = ((&ifp->if_addrlist)->tqh_first); (ifa) != ((void *)0); (ifa) = ((ifa)->ifa_list.tqe_next)) { |
| 1401 | if (ifa->ifa_addr->sa_family != |
| 1402 | addr->sa_family || ifa->ifa_dstaddr == NULL((void *)0)) |
| 1403 | continue; |
| 1404 | if (equal(addr, ifa->ifa_dstaddr)(bcmp((caddr_t)(addr), (caddr_t)(ifa->ifa_dstaddr), (addr) ->sa_len) == 0)) { |
| 1405 | KERNEL_UNLOCK()_kernel_unlock(); |
| 1406 | return (ifa); |
| 1407 | } |
| 1408 | } |
| 1409 | } |
| 1410 | } |
| 1411 | KERNEL_UNLOCK()_kernel_unlock(); |
| 1412 | return (NULL((void *)0)); |
| 1413 | } |
| 1414 | |
| 1415 | /* |
| 1416 | * Find an interface address specific to an interface best matching |
| 1417 | * a given address. |
| 1418 | */ |
| 1419 | struct ifaddr * |
| 1420 | ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp) |
| 1421 | { |
| 1422 | struct ifaddr *ifa; |
| 1423 | char *cp, *cp2, *cp3; |
| 1424 | char *cplim; |
| 1425 | struct ifaddr *ifa_maybe = NULL((void *)0); |
| 1426 | u_int af = addr->sa_family; |
| 1427 | |
| 1428 | if (af >= AF_MAX36) |
| 1429 | return (NULL((void *)0)); |
| 1430 | TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list)for((ifa) = ((&ifp->if_addrlist)->tqh_first); (ifa) != ((void *)0); (ifa) = ((ifa)->ifa_list.tqe_next)) { |
| 1431 | if (ifa->ifa_addr->sa_family != af) |
| 1432 | continue; |
| 1433 | if (ifa_maybe == NULL((void *)0)) |
| 1434 | ifa_maybe = ifa; |
| 1435 | if (ifa->ifa_netmask == 0 || ifp->if_flags & IFF_POINTOPOINT0x10) { |
| 1436 | if (equal(addr, ifa->ifa_addr)(bcmp((caddr_t)(addr), (caddr_t)(ifa->ifa_addr), (addr)-> sa_len) == 0) || |
| 1437 | (ifa->ifa_dstaddr && equal(addr, ifa->ifa_dstaddr)(bcmp((caddr_t)(addr), (caddr_t)(ifa->ifa_dstaddr), (addr) ->sa_len) == 0))) |
| 1438 | return (ifa); |
| 1439 | continue; |
| 1440 | } |
| 1441 | cp = addr->sa_data; |
| 1442 | cp2 = ifa->ifa_addr->sa_data; |
| 1443 | cp3 = ifa->ifa_netmask->sa_data; |
| 1444 | cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; |
| 1445 | for (; cp3 < cplim; cp3++) |
| 1446 | if ((*cp++ ^ *cp2++) & *cp3) |
| 1447 | break; |
| 1448 | if (cp3 == cplim) |
| 1449 | return (ifa); |
| 1450 | } |
| 1451 | return (ifa_maybe); |
| 1452 | } |
| 1453 | |
| 1454 | void |
| 1455 | if_rtrequest_dummy(struct ifnet *ifp, int req, struct rtentry *rt) |
| 1456 | { |
| 1457 | } |
| 1458 | |
| 1459 | /* |
| 1460 | * Default action when installing a local route on a point-to-point |
| 1461 | * interface. |
| 1462 | */ |
| 1463 | void |
| 1464 | p2p_rtrequest(struct ifnet *ifp, int req, struct rtentry *rt) |
| 1465 | { |
| 1466 | struct ifnet *lo0ifp; |
| 1467 | struct ifaddr *ifa, *lo0ifa; |
| 1468 | |
| 1469 | switch (req) { |
| 1470 | case RTM_ADD0x1: |
| 1471 | if (!ISSET(rt->rt_flags, RTF_LOCAL)((rt->rt_flags) & (0x200000))) |
| 1472 | break; |
| 1473 | |
| 1474 | TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list)for((ifa) = ((&ifp->if_addrlist)->tqh_first); (ifa) != ((void *)0); (ifa) = ((ifa)->ifa_list.tqe_next)) { |
| 1475 | if (memcmp(rt_key(rt), ifa->ifa_addr,__builtin_memcmp((((rt)->rt_dest)), (ifa->ifa_addr), (( (rt)->rt_dest)->sa_len)) |
| 1476 | rt_key(rt)->sa_len)__builtin_memcmp((((rt)->rt_dest)), (ifa->ifa_addr), (( (rt)->rt_dest)->sa_len)) == 0) |
| 1477 | break; |
| 1478 | } |
| 1479 | |
| 1480 | if (ifa == NULL((void *)0)) |
| 1481 | break; |
| 1482 | |
| 1483 | KASSERT(ifa == rt->rt_ifa)((ifa == rt->rt_ifa) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c" , 1483, "ifa == rt->rt_ifa")); |
| 1484 | |
| 1485 | lo0ifp = if_get(rtable_loindex(ifp->if_rdomainif_data.ifi_rdomain)); |
| 1486 | KASSERT(lo0ifp != NULL)((lo0ifp != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c" , 1486, "lo0ifp != NULL")); |
| 1487 | TAILQ_FOREACH(lo0ifa, &lo0ifp->if_addrlist, ifa_list)for((lo0ifa) = ((&lo0ifp->if_addrlist)->tqh_first); (lo0ifa) != ((void *)0); (lo0ifa) = ((lo0ifa)->ifa_list.tqe_next )) { |
| 1488 | if (lo0ifa->ifa_addr->sa_family == |
| 1489 | ifa->ifa_addr->sa_family) |
| 1490 | break; |
| 1491 | } |
| 1492 | if_put(lo0ifp); |
| 1493 | |
| 1494 | if (lo0ifa == NULL((void *)0)) |
| 1495 | break; |
| 1496 | |
| 1497 | rt->rt_flags &= ~RTF_LLINFO0x400; |
| 1498 | break; |
| 1499 | case RTM_DELETE0x2: |
| 1500 | case RTM_RESOLVE0xb: |
| 1501 | default: |
| 1502 | break; |
| 1503 | } |
| 1504 | } |
| 1505 | |
| 1506 | int |
| 1507 | p2p_bpf_mtap(caddr_t if_bpf, const struct mbuf *m, u_int dir) |
| 1508 | { |
| 1509 | #if NBPFILTER1 > 0 |
| 1510 | return (bpf_mtap_af(if_bpf, m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family, m, dir)); |
| 1511 | #else |
| 1512 | return (0); |
| 1513 | #endif |
| 1514 | } |
| 1515 | |
| 1516 | void |
| 1517 | p2p_input(struct ifnet *ifp, struct mbuf *m) |
| 1518 | { |
| 1519 | void (*input)(struct ifnet *, struct mbuf *); |
| 1520 | |
| 1521 | switch (m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family) { |
| 1522 | case AF_INET2: |
| 1523 | input = ipv4_input; |
| 1524 | break; |
| 1525 | #ifdef INET61 |
| 1526 | case AF_INET624: |
| 1527 | input = ipv6_input; |
| 1528 | break; |
| 1529 | #endif |
| 1530 | #ifdef MPLS1 |
| 1531 | case AF_MPLS33: |
| 1532 | input = mpls_input; |
| 1533 | break; |
| 1534 | #endif |
| 1535 | default: |
| 1536 | m_freem(m); |
| 1537 | return; |
| 1538 | } |
| 1539 | |
| 1540 | (*input)(ifp, m); |
| 1541 | } |
| 1542 | |
| 1543 | /* |
| 1544 | * Bring down all interfaces |
| 1545 | */ |
| 1546 | void |
| 1547 | if_downall(void) |
| 1548 | { |
| 1549 | struct ifreq ifrq; /* XXX only partly built */ |
| 1550 | struct ifnet *ifp; |
| 1551 | |
| 1552 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 1553 | TAILQ_FOREACH(ifp, &ifnet, if_list)for((ifp) = ((&ifnet)->tqh_first); (ifp) != ((void *)0 ); (ifp) = ((ifp)->if_list.tqe_next)) { |
| 1554 | if ((ifp->if_flags & IFF_UP0x1) == 0) |
| 1555 | continue; |
| 1556 | if_down(ifp); |
| 1557 | ifrq.ifr_flagsifr_ifru.ifru_flags = ifp->if_flags; |
| 1558 | (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))), (caddr_t)&ifrq); |
| 1559 | } |
| 1560 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 1561 | } |
| 1562 | |
| 1563 | /* |
| 1564 | * Mark an interface down and notify protocols of |
| 1565 | * the transition. |
| 1566 | */ |
| 1567 | void |
| 1568 | if_down(struct ifnet *ifp) |
| 1569 | { |
| 1570 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
| 1571 | |
| 1572 | ifp->if_flags &= ~IFF_UP0x1; |
| 1573 | getmicrotime(&ifp->if_lastchangeif_data.ifi_lastchange); |
| 1574 | ifq_purge(&ifp->if_snd); |
| 1575 | |
| 1576 | if_linkstate(ifp); |
| 1577 | } |
| 1578 | |
| 1579 | /* |
| 1580 | * Mark an interface up and notify protocols of |
| 1581 | * the transition. |
| 1582 | */ |
| 1583 | void |
| 1584 | if_up(struct ifnet *ifp) |
| 1585 | { |
| 1586 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
| 1587 | |
| 1588 | ifp->if_flags |= IFF_UP0x1; |
| 1589 | getmicrotime(&ifp->if_lastchangeif_data.ifi_lastchange); |
| 1590 | |
| 1591 | #ifdef INET61 |
| 1592 | /* Userland expects the kernel to set ::1 on default lo(4). */ |
| 1593 | if (ifp->if_index == rtable_loindex(ifp->if_rdomainif_data.ifi_rdomain)) |
| 1594 | in6_ifattach(ifp); |
| 1595 | #endif |
| 1596 | |
| 1597 | if_linkstate(ifp); |
| 1598 | } |
| 1599 | |
| 1600 | /* |
| 1601 | * Notify userland, the routing table and hooks owner of |
| 1602 | * a link-state transition. |
| 1603 | */ |
| 1604 | void |
| 1605 | if_linkstate_task(void *xifidx) |
| 1606 | { |
| 1607 | unsigned int ifidx = (unsigned long)xifidx; |
| 1608 | struct ifnet *ifp; |
| 1609 | |
| 1610 | KERNEL_LOCK()_kernel_lock(); |
| 1611 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 1612 | |
| 1613 | ifp = if_get(ifidx); |
| 1614 | if (ifp != NULL((void *)0)) |
| 1615 | if_linkstate(ifp); |
| 1616 | if_put(ifp); |
| 1617 | |
| 1618 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 1619 | KERNEL_UNLOCK()_kernel_unlock(); |
| 1620 | } |
| 1621 | |
| 1622 | void |
| 1623 | if_linkstate(struct ifnet *ifp) |
| 1624 | { |
| 1625 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
| 1626 | |
| 1627 | rtm_ifchg(ifp); |
| 1628 | rt_if_track(ifp); |
| 1629 | |
| 1630 | if_hooks_run(&ifp->if_linkstatehooks); |
| 1631 | } |
| 1632 | |
| 1633 | void |
| 1634 | if_linkstatehook_add(struct ifnet *ifp, struct task *t) |
| 1635 | { |
| 1636 | mtx_enter(&if_hooks_mtx); |
| 1637 | TAILQ_INSERT_HEAD(&ifp->if_linkstatehooks, t, t_entry)do { if (((t)->t_entry.tqe_next = (&ifp->if_linkstatehooks )->tqh_first) != ((void *)0)) (&ifp->if_linkstatehooks )->tqh_first->t_entry.tqe_prev = &(t)->t_entry.tqe_next ; else (&ifp->if_linkstatehooks)->tqh_last = &( t)->t_entry.tqe_next; (&ifp->if_linkstatehooks)-> tqh_first = (t); (t)->t_entry.tqe_prev = &(&ifp-> if_linkstatehooks)->tqh_first; } while (0); |
| 1638 | mtx_leave(&if_hooks_mtx); |
| 1639 | } |
| 1640 | |
| 1641 | void |
| 1642 | if_linkstatehook_del(struct ifnet *ifp, struct task *t) |
| 1643 | { |
| 1644 | mtx_enter(&if_hooks_mtx); |
| 1645 | TAILQ_REMOVE(&ifp->if_linkstatehooks, t, t_entry)do { if (((t)->t_entry.tqe_next) != ((void *)0)) (t)->t_entry .tqe_next->t_entry.tqe_prev = (t)->t_entry.tqe_prev; else (&ifp->if_linkstatehooks)->tqh_last = (t)->t_entry .tqe_prev; *(t)->t_entry.tqe_prev = (t)->t_entry.tqe_next ; ((t)->t_entry.tqe_prev) = ((void *)-1); ((t)->t_entry .tqe_next) = ((void *)-1); } while (0); |
| 1646 | mtx_leave(&if_hooks_mtx); |
| 1647 | } |
| 1648 | |
| 1649 | /* |
| 1650 | * Schedule a link state change task. |
| 1651 | */ |
| 1652 | void |
| 1653 | if_link_state_change(struct ifnet *ifp) |
| 1654 | { |
| 1655 | task_add(net_tq(ifp->if_index), &ifp->if_linkstatetask); |
| 1656 | } |
| 1657 | |
| 1658 | /* |
| 1659 | * Handle interface watchdog timer routine. Called |
| 1660 | * from softclock, we decrement timer (if set) and |
| 1661 | * call the appropriate interface routine on expiration. |
| 1662 | */ |
| 1663 | void |
| 1664 | if_slowtimo(void *arg) |
| 1665 | { |
| 1666 | struct ifnet *ifp = arg; |
| 1667 | int s = splnet()splraise(0x7); |
| 1668 | |
| 1669 | if (ifp->if_watchdog) { |
| 1670 | if (ifp->if_timer > 0 && --ifp->if_timer == 0) |
| 1671 | task_add(net_tq(ifp->if_index), &ifp->if_watchdogtask); |
| 1672 | timeout_add_sec(&ifp->if_slowtimo, IFNET_SLOWTIMO1); |
| 1673 | } |
| 1674 | splx(s)spllower(s); |
| 1675 | } |
| 1676 | |
| 1677 | void |
| 1678 | if_watchdog_task(void *xifidx) |
| 1679 | { |
| 1680 | unsigned int ifidx = (unsigned long)xifidx; |
| 1681 | struct ifnet *ifp; |
| 1682 | int s; |
| 1683 | |
| 1684 | ifp = if_get(ifidx); |
| 1685 | if (ifp == NULL((void *)0)) |
| 1686 | return; |
| 1687 | |
| 1688 | KERNEL_LOCK()_kernel_lock(); |
| 1689 | s = splnet()splraise(0x7); |
| 1690 | if (ifp->if_watchdog) |
| 1691 | (*ifp->if_watchdog)(ifp); |
| 1692 | splx(s)spllower(s); |
| 1693 | KERNEL_UNLOCK()_kernel_unlock(); |
| 1694 | |
| 1695 | if_put(ifp); |
| 1696 | } |
| 1697 | |
| 1698 | /* |
| 1699 | * Map interface name to interface structure pointer. |
| 1700 | */ |
| 1701 | struct ifnet * |
| 1702 | if_unit(const char *name) |
| 1703 | { |
| 1704 | struct ifnet *ifp; |
| 1705 | |
| 1706 | KERNEL_ASSERT_LOCKED()((_kernel_lock_held()) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c" , 1706, "_kernel_lock_held()")); |
| 1707 | |
| 1708 | TAILQ_FOREACH(ifp, &ifnet, if_list)for((ifp) = ((&ifnet)->tqh_first); (ifp) != ((void *)0 ); (ifp) = ((ifp)->if_list.tqe_next)) { |
| 1709 | if (strcmp(ifp->if_xname, name) == 0) { |
| 1710 | if_ref(ifp); |
| 1711 | return (ifp); |
| 1712 | } |
| 1713 | } |
| 1714 | |
| 1715 | return (NULL((void *)0)); |
| 1716 | } |
| 1717 | |
| 1718 | /* |
| 1719 | * Map interface index to interface structure pointer. |
| 1720 | */ |
| 1721 | struct ifnet * |
| 1722 | if_get(unsigned int index) |
| 1723 | { |
| 1724 | struct srp_ref sr; |
| 1725 | struct if_map *if_map; |
| 1726 | struct srp *map; |
| 1727 | struct ifnet *ifp = NULL((void *)0); |
| 1728 | |
| 1729 | if_map = srp_enter(&sr, &if_idxmap.map); |
| 1730 | if (index < if_map->limit) { |
| 1731 | map = (struct srp *)(if_map + 1); |
| 1732 | |
| 1733 | ifp = srp_follow(&sr, &map[index]); |
| 1734 | if (ifp != NULL((void *)0)) { |
| 1735 | KASSERT(ifp->if_index == index)((ifp->if_index == index) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/if.c", 1735, "ifp->if_index == index") ); |
| 1736 | if_ref(ifp); |
| 1737 | } |
| 1738 | } |
| 1739 | srp_leave(&sr); |
| 1740 | |
| 1741 | return (ifp); |
| 1742 | } |
| 1743 | |
| 1744 | struct ifnet * |
| 1745 | if_ref(struct ifnet *ifp) |
| 1746 | { |
| 1747 | refcnt_take(&ifp->if_refcnt); |
| 1748 | |
| 1749 | return (ifp); |
| 1750 | } |
| 1751 | |
| 1752 | void |
| 1753 | if_put(struct ifnet *ifp) |
| 1754 | { |
| 1755 | if (ifp == NULL((void *)0)) |
| 1756 | return; |
| 1757 | |
| 1758 | refcnt_rele_wake(&ifp->if_refcnt); |
| 1759 | } |
| 1760 | |
| 1761 | int |
| 1762 | if_setlladdr(struct ifnet *ifp, const uint8_t *lladdr) |
| 1763 | { |
| 1764 | if (ifp->if_sadl == NULL((void *)0)) |
| 1765 | return (EINVAL22); |
| 1766 | |
| 1767 | memcpy(((struct arpcom *)ifp)->ac_enaddr, lladdr, ETHER_ADDR_LEN)__builtin_memcpy((((struct arpcom *)ifp)->ac_enaddr), (lladdr ), (6)); |
| 1768 | memcpy(LLADDR(ifp->if_sadl), lladdr, ETHER_ADDR_LEN)__builtin_memcpy((((caddr_t)((ifp->if_sadl)->sdl_data + (ifp->if_sadl)->sdl_nlen))), (lladdr), (6)); |
| 1769 | |
| 1770 | return (0); |
| 1771 | } |
| 1772 | |
| 1773 | int |
| 1774 | if_createrdomain(int rdomain, struct ifnet *ifp) |
| 1775 | { |
| 1776 | int error; |
| 1777 | struct ifnet *loifp; |
| 1778 | char loifname[IFNAMSIZ16]; |
| 1779 | unsigned int unit = rdomain; |
| 1780 | |
| 1781 | if ((error = rtable_add(rdomain)) != 0) |
| 1782 | return (error); |
| 1783 | if (!rtable_empty(rdomain)) |
| 1784 | return (EEXIST17); |
| 1785 | |
| 1786 | /* Create rdomain including its loopback if with unit == rdomain */ |
| 1787 | snprintf(loifname, sizeof(loifname), "lo%u", unit); |
| 1788 | error = if_clone_create(loifname, 0); |
| 1789 | if ((loifp = if_unit(loifname)) == NULL((void *)0)) |
| 1790 | return (ENXIO6); |
| 1791 | if (error && (ifp != loifp || error != EEXIST17)) { |
| 1792 | if_put(loifp); |
| 1793 | return (error); |
| 1794 | } |
| 1795 | |
| 1796 | rtable_l2set(rdomain, rdomain, loifp->if_index); |
| 1797 | loifp->if_rdomainif_data.ifi_rdomain = rdomain; |
| 1798 | if_put(loifp); |
| 1799 | |
| 1800 | return (0); |
| 1801 | } |
| 1802 | |
| 1803 | int |
| 1804 | if_setrdomain(struct ifnet *ifp, int rdomain) |
| 1805 | { |
| 1806 | struct ifreq ifr; |
| 1807 | int error, up = 0, s; |
| 1808 | |
| 1809 | if (rdomain < 0 || rdomain > RT_TABLEID_MAX255) |
| 1810 | return (EINVAL22); |
| 1811 | |
| 1812 | if (rdomain != ifp->if_rdomainif_data.ifi_rdomain && |
| 1813 | (ifp->if_flags & IFF_LOOPBACK0x8) && |
| 1814 | (ifp->if_index == rtable_loindex(ifp->if_rdomainif_data.ifi_rdomain))) |
| 1815 | return (EPERM1); |
| 1816 | |
| 1817 | if (!rtable_exists(rdomain)) |
| 1818 | return (ESRCH3); |
| 1819 | |
| 1820 | /* make sure that the routing table is a real rdomain */ |
| 1821 | if (rdomain != rtable_l2(rdomain)) |
| 1822 | return (EINVAL22); |
| 1823 | |
| 1824 | if (rdomain != ifp->if_rdomainif_data.ifi_rdomain) { |
| 1825 | s = splnet()splraise(0x7); |
| 1826 | /* |
| 1827 | * We are tearing down the world. |
| 1828 | * Take down the IF so: |
| 1829 | * 1. everything that cares gets a message |
| 1830 | * 2. the automagic IPv6 bits are recreated |
| 1831 | */ |
| 1832 | if (ifp->if_flags & IFF_UP0x1) { |
| 1833 | up = 1; |
| 1834 | if_down(ifp); |
| 1835 | } |
| 1836 | rti_delete(ifp); |
| 1837 | #ifdef MROUTING1 |
| 1838 | vif_delete(ifp); |
| 1839 | #endif |
| 1840 | in_ifdetach(ifp); |
| 1841 | #ifdef INET61 |
| 1842 | in6_ifdetach(ifp); |
| 1843 | #endif |
| 1844 | splx(s)spllower(s); |
| 1845 | } |
| 1846 | |
| 1847 | /* Let devices like enc(4) or mpe(4) know about the change */ |
| 1848 | ifr.ifr_rdomainidifr_ifru.ifru_metric = rdomain; |
| 1849 | if ((error = (*ifp->if_ioctl)(ifp, SIOCSIFRDOMAIN((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((159))), |
| 1850 | (caddr_t)&ifr)) != ENOTTY25) |
| 1851 | return (error); |
| 1852 | error = 0; |
| 1853 | |
| 1854 | /* Add interface to the specified rdomain */ |
| 1855 | ifp->if_rdomainif_data.ifi_rdomain = rdomain; |
| 1856 | |
| 1857 | /* If we took down the IF, bring it back */ |
| 1858 | if (up) { |
| 1859 | s = splnet()splraise(0x7); |
| 1860 | if_up(ifp); |
| 1861 | splx(s)spllower(s); |
| 1862 | } |
| 1863 | |
| 1864 | return (0); |
| 1865 | } |
| 1866 | |
| 1867 | /* |
| 1868 | * Interface ioctls. |
| 1869 | */ |
| 1870 | int |
| 1871 | ifioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) |
| 1872 | { |
| 1873 | struct ifnet *ifp; |
| 1874 | struct ifreq *ifr = (struct ifreq *)data; |
| 1875 | struct ifgroupreq *ifgr = (struct ifgroupreq *)data; |
| 1876 | struct if_afreq *ifar = (struct if_afreq *)data; |
| 1877 | char ifdescrbuf[IFDESCRSIZE64]; |
| 1878 | char ifrtlabelbuf[RTLABEL_LEN32]; |
| 1879 | int s, error = 0, oif_xflags; |
| 1880 | size_t bytesdone; |
| 1881 | unsigned short oif_flags; |
| 1882 | |
| 1883 | switch (cmd) { |
| 1884 | case SIOCIFCREATE((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((122))): |
| 1885 | if ((error = suser(p)) != 0) |
| 1886 | return (error); |
| 1887 | error = if_clone_create(ifr->ifr_name, 0); |
| 1888 | return (error); |
| 1889 | case SIOCIFDESTROY((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((121))): |
| 1890 | if ((error = suser(p)) != 0) |
| 1891 | return (error); |
| 1892 | error = if_clone_destroy(ifr->ifr_name); |
| 1893 | return (error); |
| 1894 | case SIOCSIFGATTR((unsigned long)0x80000000 | ((sizeof(struct ifgroupreq) & 0x1fff) << 16) | ((('i')) << 8) | ((140))): |
| 1895 | if ((error = suser(p)) != 0) |
| 1896 | return (error); |
| 1897 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 1898 | error = if_setgroupattribs(data); |
| 1899 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 1900 | return (error); |
| 1901 | case SIOCGIFCONF(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifconf) & 0x1fff) << 16) | ((('i')) << 8) | ((36))): |
| 1902 | case SIOCIFGCLONERS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct if_clonereq) & 0x1fff) << 16) | ((('i')) << 8) | ((120))): |
| 1903 | case SIOCGIFGMEMB(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifgroupreq) & 0x1fff) << 16) | ((('i')) << 8) | ((138))): |
| 1904 | case SIOCGIFGATTR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifgroupreq) & 0x1fff) << 16) | ((('i')) << 8) | ((139))): |
| 1905 | case SIOCGIFGLIST(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifgroupreq) & 0x1fff) << 16) | ((('i')) << 8) | ((141))): |
| 1906 | case SIOCGIFFLAGS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((17))): |
| 1907 | case SIOCGIFXFLAGS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((158))): |
| 1908 | case SIOCGIFMETRIC(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((23))): |
| 1909 | case SIOCGIFMTU(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((126))): |
| 1910 | case SIOCGIFHARDMTU(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((165))): |
| 1911 | case SIOCGIFDATA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((27))): |
| 1912 | case SIOCGIFDESCR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((129))): |
| 1913 | case SIOCGIFRTLABEL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((131))): |
| 1914 | case SIOCGIFPRIORITY(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((156))): |
| 1915 | case SIOCGIFRDOMAIN(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((160))): |
| 1916 | case SIOCGIFGROUP(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifgroupreq) & 0x1fff) << 16) | ((('i')) << 8) | ((136))): |
| 1917 | case SIOCGIFLLPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((182))): |
| 1918 | return (ifioctl_get(cmd, data)); |
| 1919 | } |
| 1920 | |
| 1921 | ifp = if_unit(ifr->ifr_name); |
| 1922 | if (ifp == NULL((void *)0)) |
| 1923 | return (ENXIO6); |
| 1924 | oif_flags = ifp->if_flags; |
| 1925 | oif_xflags = ifp->if_xflags; |
| 1926 | |
| 1927 | switch (cmd) { |
| 1928 | case SIOCIFAFATTACH((unsigned long)0x80000000 | ((sizeof(struct if_afreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((171))): |
| 1929 | case SIOCIFAFDETACH((unsigned long)0x80000000 | ((sizeof(struct if_afreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((172))): |
| 1930 | if ((error = suser(p)) != 0) |
| 1931 | break; |
| 1932 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 1933 | switch (ifar->ifar_af) { |
| 1934 | case AF_INET2: |
| 1935 | /* attach is a noop for AF_INET */ |
| 1936 | if (cmd == SIOCIFAFDETACH((unsigned long)0x80000000 | ((sizeof(struct if_afreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((172)))) |
| 1937 | in_ifdetach(ifp); |
| 1938 | break; |
| 1939 | #ifdef INET61 |
| 1940 | case AF_INET624: |
| 1941 | if (cmd == SIOCIFAFATTACH((unsigned long)0x80000000 | ((sizeof(struct if_afreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((171)))) |
| 1942 | error = in6_ifattach(ifp); |
| 1943 | else |
| 1944 | in6_ifdetach(ifp); |
| 1945 | break; |
| 1946 | #endif /* INET6 */ |
| 1947 | default: |
| 1948 | error = EAFNOSUPPORT47; |
| 1949 | } |
| 1950 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 1951 | break; |
| 1952 | |
| 1953 | case SIOCSIFXFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((157))): |
| 1954 | if ((error = suser(p)) != 0) |
| 1955 | break; |
| 1956 | |
| 1957 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 1958 | #ifdef INET61 |
| 1959 | if ((ISSET(ifr->ifr_flags, IFXF_AUTOCONF6)((ifr->ifr_ifru.ifru_flags) & (0x20)) || |
| 1960 | ISSET(ifr->ifr_flags, IFXF_AUTOCONF6TEMP)((ifr->ifr_ifru.ifru_flags) & (0x4))) && |
| 1961 | !ISSET(ifp->if_xflags, IFXF_AUTOCONF6)((ifp->if_xflags) & (0x20)) && |
| 1962 | !ISSET(ifp->if_xflags, IFXF_AUTOCONF6TEMP)((ifp->if_xflags) & (0x4))) { |
| 1963 | error = in6_ifattach(ifp); |
| 1964 | if (error != 0) { |
| 1965 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 1966 | break; |
| 1967 | } |
| 1968 | } |
| 1969 | |
| 1970 | if (ISSET(ifr->ifr_flags, IFXF_INET6_NOSOII)((ifr->ifr_ifru.ifru_flags) & (0x40)) && |
| 1971 | !ISSET(ifp->if_xflags, IFXF_INET6_NOSOII)((ifp->if_xflags) & (0x40))) |
| 1972 | ifp->if_xflags |= IFXF_INET6_NOSOII0x40; |
| 1973 | |
| 1974 | if (!ISSET(ifr->ifr_flags, IFXF_INET6_NOSOII)((ifr->ifr_ifru.ifru_flags) & (0x40)) && |
| 1975 | ISSET(ifp->if_xflags, IFXF_INET6_NOSOII)((ifp->if_xflags) & (0x40))) |
| 1976 | ifp->if_xflags &= ~IFXF_INET6_NOSOII0x40; |
| 1977 | |
| 1978 | #endif /* INET6 */ |
| 1979 | |
| 1980 | #ifdef MPLS1 |
| 1981 | if (ISSET(ifr->ifr_flags, IFXF_MPLS)((ifr->ifr_ifru.ifru_flags) & (0x8)) && |
| 1982 | !ISSET(ifp->if_xflags, IFXF_MPLS)((ifp->if_xflags) & (0x8))) { |
| 1983 | s = splnet()splraise(0x7); |
| 1984 | ifp->if_xflags |= IFXF_MPLS0x8; |
| 1985 | ifp->if_ll_output = ifp->if_output; |
| 1986 | ifp->if_output = mpls_output; |
| 1987 | splx(s)spllower(s); |
| 1988 | } |
| 1989 | if (ISSET(ifp->if_xflags, IFXF_MPLS)((ifp->if_xflags) & (0x8)) && |
| 1990 | !ISSET(ifr->ifr_flags, IFXF_MPLS)((ifr->ifr_ifru.ifru_flags) & (0x8))) { |
| 1991 | s = splnet()splraise(0x7); |
| 1992 | ifp->if_xflags &= ~IFXF_MPLS0x8; |
| 1993 | ifp->if_output = ifp->if_ll_output; |
| 1994 | ifp->if_ll_output = NULL((void *)0); |
| 1995 | splx(s)spllower(s); |
| 1996 | } |
| 1997 | #endif /* MPLS */ |
| 1998 | |
| 1999 | #ifndef SMALL_KERNEL |
| 2000 | if (ifp->if_capabilitiesif_data.ifi_capabilities & IFCAP_WOL0x00008000) { |
| 2001 | if (ISSET(ifr->ifr_flags, IFXF_WOL)((ifr->ifr_ifru.ifru_flags) & (0x10)) && |
| 2002 | !ISSET(ifp->if_xflags, IFXF_WOL)((ifp->if_xflags) & (0x10))) { |
| 2003 | s = splnet()splraise(0x7); |
| 2004 | ifp->if_xflags |= IFXF_WOL0x10; |
| 2005 | error = ifp->if_wol(ifp, 1); |
| 2006 | splx(s)spllower(s); |
| 2007 | } |
| 2008 | if (ISSET(ifp->if_xflags, IFXF_WOL)((ifp->if_xflags) & (0x10)) && |
| 2009 | !ISSET(ifr->ifr_flags, IFXF_WOL)((ifr->ifr_ifru.ifru_flags) & (0x10))) { |
| 2010 | s = splnet()splraise(0x7); |
| 2011 | ifp->if_xflags &= ~IFXF_WOL0x10; |
| 2012 | error = ifp->if_wol(ifp, 0); |
| 2013 | splx(s)spllower(s); |
| 2014 | } |
| 2015 | } else if (ISSET(ifr->ifr_flags, IFXF_WOL)((ifr->ifr_ifru.ifru_flags) & (0x10))) { |
| 2016 | ifr->ifr_flagsifr_ifru.ifru_flags &= ~IFXF_WOL0x10; |
| 2017 | error = ENOTSUP91; |
| 2018 | } |
| 2019 | #endif |
| 2020 | |
| 2021 | if (error == 0) |
| 2022 | ifp->if_xflags = (ifp->if_xflags & IFXF_CANTCHANGE(0x1|0x2)) | |
| 2023 | (ifr->ifr_flagsifr_ifru.ifru_flags & ~IFXF_CANTCHANGE(0x1|0x2)); |
| 2024 | |
| 2025 | if (!ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1)) && |
| 2026 | ((!ISSET(oif_xflags, IFXF_AUTOCONF4)((oif_xflags) & (0x80)) && |
| 2027 | ISSET(ifp->if_xflags, IFXF_AUTOCONF4)((ifp->if_xflags) & (0x80))) || |
| 2028 | (!ISSET(oif_xflags, IFXF_AUTOCONF6)((oif_xflags) & (0x20)) && |
| 2029 | ISSET(ifp->if_xflags, IFXF_AUTOCONF6)((ifp->if_xflags) & (0x20))) || |
| 2030 | (!ISSET(oif_xflags, IFXF_AUTOCONF6TEMP)((oif_xflags) & (0x4)) && |
| 2031 | ISSET(ifp->if_xflags, IFXF_AUTOCONF6TEMP)((ifp->if_xflags) & (0x4))))) { |
| 2032 | ifr->ifr_flagsifr_ifru.ifru_flags = ifp->if_flags | IFF_UP0x1; |
| 2033 | goto forceup; |
| 2034 | } |
| 2035 | |
| 2036 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 2037 | break; |
| 2038 | |
| 2039 | case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))): |
| 2040 | if ((error = suser(p)) != 0) |
| 2041 | break; |
| 2042 | |
| 2043 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 2044 | forceup: |
| 2045 | ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE(0x2|0x10|0x40|0x400| 0x800|0x8000|0x200)) | |
| 2046 | (ifr->ifr_flagsifr_ifru.ifru_flags & ~IFF_CANTCHANGE(0x2|0x10|0x40|0x400| 0x800|0x8000|0x200)); |
| 2047 | error = (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))), data); |
| 2048 | if (error != 0) { |
| 2049 | ifp->if_flags = oif_flags; |
| 2050 | if (cmd == SIOCSIFXFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((157)))) |
| 2051 | ifp->if_xflags = oif_xflags; |
| 2052 | } else if (ISSET(oif_flags ^ ifp->if_flags, IFF_UP)((oif_flags ^ ifp->if_flags) & (0x1))) { |
| 2053 | s = splnet()splraise(0x7); |
| 2054 | if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) |
| 2055 | if_up(ifp); |
| 2056 | else |
| 2057 | if_down(ifp); |
| 2058 | splx(s)spllower(s); |
| 2059 | } |
| 2060 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 2061 | break; |
| 2062 | |
| 2063 | case SIOCSIFMETRIC((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((24))): |
| 2064 | if ((error = suser(p)) != 0) |
| 2065 | break; |
| 2066 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 2067 | ifp->if_metricif_data.ifi_metric = ifr->ifr_metricifr_ifru.ifru_metric; |
| 2068 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 2069 | break; |
| 2070 | |
| 2071 | case SIOCSIFMTU((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((127))): |
| 2072 | if ((error = suser(p)) != 0) |
| 2073 | break; |
| 2074 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 2075 | error = (*ifp->if_ioctl)(ifp, cmd, data); |
| 2076 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 2077 | if (error == 0) |
| 2078 | rtm_ifchg(ifp); |
| 2079 | break; |
| 2080 | |
| 2081 | case SIOCSIFDESCR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((128))): |
| 2082 | if ((error = suser(p)) != 0) |
| 2083 | break; |
| 2084 | error = copyinstr(ifr->ifr_dataifr_ifru.ifru_data, ifdescrbuf, |
| 2085 | IFDESCRSIZE64, &bytesdone); |
| 2086 | if (error == 0) { |
| 2087 | (void)memset(ifp->if_description, 0, IFDESCRSIZE)__builtin_memset((ifp->if_description), (0), (64)); |
| 2088 | strlcpy(ifp->if_description, ifdescrbuf, IFDESCRSIZE64); |
| 2089 | } |
| 2090 | break; |
| 2091 | |
| 2092 | case SIOCSIFRTLABEL((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((130))): |
| 2093 | if ((error = suser(p)) != 0) |
| 2094 | break; |
| 2095 | error = copyinstr(ifr->ifr_dataifr_ifru.ifru_data, ifrtlabelbuf, |
| 2096 | RTLABEL_LEN32, &bytesdone); |
| 2097 | if (error == 0) { |
| 2098 | rtlabel_unref(ifp->if_rtlabelid); |
| 2099 | ifp->if_rtlabelid = rtlabel_name2id(ifrtlabelbuf); |
| 2100 | } |
| 2101 | break; |
| 2102 | |
| 2103 | case SIOCSIFPRIORITY((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((155))): |
| 2104 | if ((error = suser(p)) != 0) |
| 2105 | break; |
| 2106 | if (ifr->ifr_metricifr_ifru.ifru_metric < 0 || ifr->ifr_metricifr_ifru.ifru_metric > 15) { |
| 2107 | error = EINVAL22; |
| 2108 | break; |
| 2109 | } |
| 2110 | ifp->if_priority = ifr->ifr_metricifr_ifru.ifru_metric; |
| 2111 | break; |
| 2112 | |
| 2113 | case SIOCSIFRDOMAIN((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((159))): |
| 2114 | if ((error = suser(p)) != 0) |
| 2115 | break; |
| 2116 | error = if_createrdomain(ifr->ifr_rdomainidifr_ifru.ifru_metric, ifp); |
| 2117 | if (!error || error == EEXIST17) { |
| 2118 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 2119 | error = if_setrdomain(ifp, ifr->ifr_rdomainidifr_ifru.ifru_metric); |
| 2120 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 2121 | } |
| 2122 | break; |
| 2123 | |
| 2124 | case SIOCAIFGROUP((unsigned long)0x80000000 | ((sizeof(struct ifgroupreq) & 0x1fff) << 16) | ((('i')) << 8) | ((135))): |
| 2125 | if ((error = suser(p))) |
| 2126 | break; |
| 2127 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 2128 | error = if_addgroup(ifp, ifgr->ifgr_groupifgr_ifgru.ifgru_group); |
| 2129 | if (error == 0) { |
| 2130 | error = (*ifp->if_ioctl)(ifp, cmd, data); |
| 2131 | if (error == ENOTTY25) |
| 2132 | error = 0; |
| 2133 | } |
| 2134 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 2135 | break; |
| 2136 | |
| 2137 | case SIOCDIFGROUP((unsigned long)0x80000000 | ((sizeof(struct ifgroupreq) & 0x1fff) << 16) | ((('i')) << 8) | ((137))): |
| 2138 | if ((error = suser(p))) |
| 2139 | break; |
| 2140 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 2141 | error = (*ifp->if_ioctl)(ifp, cmd, data); |
| 2142 | if (error == ENOTTY25) |
| 2143 | error = 0; |
| 2144 | if (error == 0) |
| 2145 | error = if_delgroup(ifp, ifgr->ifgr_groupifgr_ifgru.ifgru_group); |
| 2146 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 2147 | break; |
| 2148 | |
| 2149 | case SIOCSIFLLADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((31))): |
| 2150 | if ((error = suser(p))) |
| 2151 | break; |
| 2152 | if ((ifp->if_sadl == NULL((void *)0)) || |
| 2153 | (ifr->ifr_addrifr_ifru.ifru_addr.sa_len != ETHER_ADDR_LEN6) || |
| 2154 | (ETHER_IS_MULTICAST(ifr->ifr_addr.sa_data)(*(ifr->ifr_ifru.ifru_addr.sa_data) & 0x01))) { |
| 2155 | error = EINVAL22; |
| 2156 | break; |
| 2157 | } |
| 2158 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 2159 | switch (ifp->if_typeif_data.ifi_type) { |
| 2160 | case IFT_ETHER0x06: |
| 2161 | case IFT_CARP0xf7: |
| 2162 | case IFT_XETHER0x1a: |
| 2163 | case IFT_ISO880250x09: |
| 2164 | error = (*ifp->if_ioctl)(ifp, cmd, data); |
| 2165 | if (error == ENOTTY25) |
| 2166 | error = 0; |
| 2167 | if (error == 0) |
| 2168 | error = if_setlladdr(ifp, |
| 2169 | ifr->ifr_addrifr_ifru.ifru_addr.sa_data); |
| 2170 | break; |
| 2171 | default: |
| 2172 | error = ENODEV19; |
| 2173 | } |
| 2174 | |
| 2175 | if (error == 0) |
| 2176 | ifnewlladdr(ifp); |
| 2177 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 2178 | if (error == 0) |
| 2179 | rtm_ifchg(ifp); |
| 2180 | break; |
| 2181 | |
| 2182 | case SIOCSIFLLPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((181))): |
| 2183 | if ((error = suser(p))) |
| 2184 | break; |
| 2185 | if (ifr->ifr_llprioifr_ifru.ifru_metric < IFQ_MINPRIO0 || |
| 2186 | ifr->ifr_llprioifr_ifru.ifru_metric > IFQ_MAXPRIO8 - 1) { |
| 2187 | error = EINVAL22; |
| 2188 | break; |
| 2189 | } |
| 2190 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 2191 | ifp->if_llprio = ifr->ifr_llprioifr_ifru.ifru_metric; |
| 2192 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 2193 | break; |
| 2194 | |
| 2195 | case SIOCGIFSFFPAGE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct if_sffpage) & 0x1fff) << 16) | ((('i')) << 8) | ((57))): |
| 2196 | error = suser(p); |
| 2197 | if (error != 0) |
| 2198 | break; |
| 2199 | |
| 2200 | error = if_sffpage_check(data); |
| 2201 | if (error != 0) |
| 2202 | break; |
| 2203 | |
| 2204 | /* don't take NET_LOCK because i2c reads take a long time */ |
| 2205 | error = ((*ifp->if_ioctl)(ifp, cmd, data)); |
| 2206 | break; |
| 2207 | |
| 2208 | case SIOCSETKALIVE((unsigned long)0x80000000 | ((sizeof(struct ifkalivereq) & 0x1fff) << 16) | ((('i')) << 8) | ((163))): |
| 2209 | case SIOCDIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((73))): |
| 2210 | case SIOCSLIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct if_laddrreq) & 0x1fff) << 16) | ((('i')) << 8) | ((74))): |
| 2211 | case SIOCSLIFPHYRTABLE((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((161))): |
| 2212 | case SIOCSLIFPHYTTL((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((168))): |
| 2213 | case SIOCSLIFPHYDF((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((193))): |
| 2214 | case SIOCSLIFPHYECN((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((199))): |
| 2215 | case SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((49))): |
| 2216 | case SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((50))): |
| 2217 | case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((55))): |
| 2218 | case SIOCSVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((166))): |
| 2219 | case SIOCDVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((175))): |
| 2220 | case SIOCSVNETFLOWID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((195))): |
| 2221 | case SIOCSTXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((197))): |
| 2222 | case SIOCSRXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((219))): |
| 2223 | case SIOCSIFPAIR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((176))): |
| 2224 | case SIOCSIFPARENT((unsigned long)0x80000000 | ((sizeof(struct if_parent) & 0x1fff) << 16) | ((('i')) << 8) | ((178))): |
| 2225 | case SIOCDIFPARENT((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((180))): |
| 2226 | case SIOCSETMPWCFG((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((173))): |
| 2227 | case SIOCSETLABEL((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((153))): |
| 2228 | case SIOCDELLABEL((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((151))): |
| 2229 | case SIOCSPWE3CTRLWORD((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((220))): |
| 2230 | case SIOCSPWE3FAT((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((221))): |
| 2231 | case SIOCSPWE3NEIGHBOR((unsigned long)0x80000000 | ((sizeof(struct if_laddrreq) & 0x1fff) << 16) | ((('i')) << 8) | ((222))): |
| 2232 | case SIOCDPWE3NEIGHBOR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((222))): |
| 2233 | #if NBRIDGE1 > 0 |
| 2234 | case SIOCBRDGADD((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((60))): |
| 2235 | case SIOCBRDGDEL((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((61))): |
| 2236 | case SIOCBRDGSIFFLGS((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((63))): |
| 2237 | case SIOCBRDGSCACHE((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) & 0x1fff) << 16) | ((('i')) << 8) | ((64))): |
| 2238 | case SIOCBRDGADDS((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((65))): |
| 2239 | case SIOCBRDGDELS((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((66))): |
| 2240 | case SIOCBRDGSADDR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifbareq) & 0x1fff) << 16) | ((('i')) << 8) | ((68))): |
| 2241 | case SIOCBRDGSTO((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) & 0x1fff) << 16) | ((('i')) << 8) | ((69))): |
| 2242 | case SIOCBRDGDADDR((unsigned long)0x80000000 | ((sizeof(struct ifbareq) & 0x1fff ) << 16) | ((('i')) << 8) | ((71))): |
| 2243 | case SIOCBRDGFLUSH((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((72))): |
| 2244 | case SIOCBRDGADDL((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((73))): |
| 2245 | case SIOCBRDGSIFPROT((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((74))): |
| 2246 | case SIOCBRDGARL((unsigned long)0x80000000 | ((sizeof(struct ifbrlreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((77))): |
| 2247 | case SIOCBRDGFRL((unsigned long)0x80000000 | ((sizeof(struct ifbrlreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((78))): |
| 2248 | case SIOCBRDGSPRI((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) & 0x1fff) << 16) | ((('i')) << 8) | ((80))): |
| 2249 | case SIOCBRDGSHT((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) & 0x1fff) << 16) | ((('i')) << 8) | ((81))): |
| 2250 | case SIOCBRDGSFD((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) & 0x1fff) << 16) | ((('i')) << 8) | ((82))): |
| 2251 | case SIOCBRDGSMA((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) & 0x1fff) << 16) | ((('i')) << 8) | ((83))): |
| 2252 | case SIOCBRDGSIFPRIO((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((84))): |
| 2253 | case SIOCBRDGSIFCOST((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((85))): |
| 2254 | case SIOCBRDGSTXHC((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) & 0x1fff) << 16) | ((('i')) << 8) | ((89))): |
| 2255 | case SIOCBRDGSPROTO((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) & 0x1fff) << 16) | ((('i')) << 8) | ((90))): |
| 2256 | #endif |
| 2257 | if ((error = suser(p)) != 0) |
| 2258 | break; |
| 2259 | /* FALLTHROUGH */ |
| 2260 | default: |
| 2261 | error = ((*so->so_proto->pr_usrreq)(so, PRU_CONTROL11, |
| 2262 | (struct mbuf *) cmd, (struct mbuf *) data, |
| 2263 | (struct mbuf *) ifp, p)); |
| 2264 | if (error != EOPNOTSUPP45) |
| 2265 | break; |
| 2266 | switch (cmd) { |
| 2267 | case SIOCAIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifaliasreq) & 0x1fff) << 16) | ((('i')) << 8) | ((26))): |
| 2268 | case SIOCDIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((25))): |
| 2269 | case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((12))): |
| 2270 | case SIOCSIFNETMASK((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((22))): |
| 2271 | case SIOCSIFDSTADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((14))): |
| 2272 | case SIOCSIFBRDADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((19))): |
| 2273 | #ifdef INET61 |
| 2274 | case SIOCAIFADDR_IN6((unsigned long)0x80000000 | ((sizeof(struct in6_aliasreq) & 0x1fff) << 16) | ((('i')) << 8) | ((26))): |
| 2275 | case SIOCDIFADDR_IN6((unsigned long)0x80000000 | ((sizeof(struct in6_ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((25))): |
| 2276 | #endif |
| 2277 | error = suser(p); |
| 2278 | break; |
| 2279 | default: |
| 2280 | error = 0; |
| 2281 | break; |
| 2282 | } |
| 2283 | if (error) |
| 2284 | break; |
| 2285 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
| 2286 | error = ((*ifp->if_ioctl)(ifp, cmd, data)); |
| 2287 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
| 2288 | break; |
| 2289 | } |
| 2290 | |
| 2291 | if (oif_flags != ifp->if_flags || oif_xflags != ifp->if_xflags) { |
| 2292 | /* if_up() and if_down() already sent an update, skip here */ |
| 2293 | if (((oif_flags ^ ifp->if_flags) & IFF_UP0x1) == 0) |
| 2294 | rtm_ifchg(ifp); |
| 2295 | } |
| 2296 | |
| 2297 | if (((oif_flags ^ ifp->if_flags) & IFF_UP0x1) != 0) |
| 2298 | getmicrotime(&ifp->if_lastchangeif_data.ifi_lastchange); |
| 2299 | |
| 2300 | if_put(ifp); |
| 2301 | |
| 2302 | return (error); |
| 2303 | } |
| 2304 | |
| 2305 | int |
| 2306 | ifioctl_get(u_long cmd, caddr_t data) |
| 2307 | { |
| 2308 | struct ifnet *ifp; |
| 2309 | struct ifreq *ifr = (struct ifreq *)data; |
| 2310 | char ifdescrbuf[IFDESCRSIZE64]; |
| 2311 | char ifrtlabelbuf[RTLABEL_LEN32]; |
| 2312 | int error = 0; |
| 2313 | size_t bytesdone; |
| 2314 | const char *label; |
| 2315 | |
| 2316 | switch(cmd) { |
| 2317 | case SIOCGIFCONF(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifconf) & 0x1fff) << 16) | ((('i')) << 8) | ((36))): |
| 2318 | NET_RLOCK_IN_IOCTL()do { rw_enter_read(&netlock); } while (0); |
| 2319 | error = ifconf(data); |
| 2320 | NET_RUNLOCK_IN_IOCTL()do { rw_exit_read(&netlock); } while (0); |
| 2321 | return (error); |
| 2322 | case SIOCIFGCLONERS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct if_clonereq) & 0x1fff) << 16) | ((('i')) << 8) | ((120))): |
| 2323 | error = if_clone_list((struct if_clonereq *)data); |
| 2324 | return (error); |
| 2325 | case SIOCGIFGMEMB(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifgroupreq) & 0x1fff) << 16) | ((('i')) << 8) | ((138))): |
| 2326 | NET_RLOCK_IN_IOCTL()do { rw_enter_read(&netlock); } while (0); |
| 2327 | error = if_getgroupmembers(data); |
| 2328 | NET_RUNLOCK_IN_IOCTL()do { rw_exit_read(&netlock); } while (0); |
| 2329 | return (error); |
| 2330 | case SIOCGIFGATTR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifgroupreq) & 0x1fff) << 16) | ((('i')) << 8) | ((139))): |
| 2331 | NET_RLOCK_IN_IOCTL()do { rw_enter_read(&netlock); } while (0); |
| 2332 | error = if_getgroupattribs(data); |
| 2333 | NET_RUNLOCK_IN_IOCTL()do { rw_exit_read(&netlock); } while (0); |
| 2334 | return (error); |
| 2335 | case SIOCGIFGLIST(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifgroupreq) & 0x1fff) << 16) | ((('i')) << 8) | ((141))): |
| 2336 | NET_RLOCK_IN_IOCTL()do { rw_enter_read(&netlock); } while (0); |
| 2337 | error = if_getgrouplist(data); |
| 2338 | NET_RUNLOCK_IN_IOCTL()do { rw_exit_read(&netlock); } while (0); |
| 2339 | return (error); |
| 2340 | } |
| 2341 | |
| 2342 | ifp = if_unit(ifr->ifr_name); |
| 2343 | if (ifp == NULL((void *)0)) |
| 2344 | return (ENXIO6); |
| 2345 | |
| 2346 | NET_RLOCK_IN_IOCTL()do { rw_enter_read(&netlock); } while (0); |
| 2347 | |
| 2348 | switch(cmd) { |
| 2349 | case SIOCGIFFLAGS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((17))): |
| 2350 | ifr->ifr_flagsifr_ifru.ifru_flags = ifp->if_flags; |
| 2351 | if (ifq_is_oactive(&ifp->if_snd)) |
| 2352 | ifr->ifr_flagsifr_ifru.ifru_flags |= IFF_OACTIVE0x400; |
| 2353 | break; |
| 2354 | |
| 2355 | case SIOCGIFXFLAGS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((158))): |
| 2356 | ifr->ifr_flagsifr_ifru.ifru_flags = ifp->if_xflags & ~(IFXF_MPSAFE0x1|IFXF_CLONED0x2); |
| 2357 | break; |
| 2358 | |
| 2359 | case SIOCGIFMETRIC(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((23))): |
| 2360 | ifr->ifr_metricifr_ifru.ifru_metric = ifp->if_metricif_data.ifi_metric; |
| 2361 | break; |
| 2362 | |
| 2363 | case SIOCGIFMTU(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((126))): |
| 2364 | ifr->ifr_mtuifr_ifru.ifru_metric = ifp->if_mtuif_data.ifi_mtu; |
| 2365 | break; |
| 2366 | |
| 2367 | case SIOCGIFHARDMTU(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((165))): |
| 2368 | ifr->ifr_hardmtuifr_ifru.ifru_metric = ifp->if_hardmtu; |
| 2369 | break; |
| 2370 | |
| 2371 | case SIOCGIFDATA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((27))): { |
| 2372 | struct if_data ifdata; |
| 2373 | if_getdata(ifp, &ifdata); |
| 2374 | error = copyout(&ifdata, ifr->ifr_dataifr_ifru.ifru_data, sizeof(ifdata)); |
| 2375 | break; |
| 2376 | } |
| 2377 | |
| 2378 | case SIOCGIFDESCR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((129))): |
| 2379 | strlcpy(ifdescrbuf, ifp->if_description, IFDESCRSIZE64); |
| 2380 | error = copyoutstr(ifdescrbuf, ifr->ifr_dataifr_ifru.ifru_data, IFDESCRSIZE64, |
| 2381 | &bytesdone); |
| 2382 | break; |
| 2383 | |
| 2384 | case SIOCGIFRTLABEL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((131))): |
| 2385 | if (ifp->if_rtlabelid && |
| 2386 | (label = rtlabel_id2name(ifp->if_rtlabelid)) != NULL((void *)0)) { |
| 2387 | strlcpy(ifrtlabelbuf, label, RTLABEL_LEN32); |
| 2388 | error = copyoutstr(ifrtlabelbuf, ifr->ifr_dataifr_ifru.ifru_data, |
| 2389 | RTLABEL_LEN32, &bytesdone); |
| 2390 | } else |
| 2391 | error = ENOENT2; |
| 2392 | break; |
| 2393 | |
| 2394 | case SIOCGIFPRIORITY(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((156))): |
| 2395 | ifr->ifr_metricifr_ifru.ifru_metric = ifp->if_priority; |
| 2396 | break; |
| 2397 | |
| 2398 | case SIOCGIFRDOMAIN(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((160))): |
| 2399 | ifr->ifr_rdomainidifr_ifru.ifru_metric = ifp->if_rdomainif_data.ifi_rdomain; |
| 2400 | break; |
| 2401 | |
| 2402 | case SIOCGIFGROUP(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifgroupreq) & 0x1fff) << 16) | ((('i')) << 8) | ((136))): |
| 2403 | error = if_getgroup(data, ifp); |
| 2404 | break; |
| 2405 | |
| 2406 | case SIOCGIFLLPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((182))): |
| 2407 | ifr->ifr_llprioifr_ifru.ifru_metric = ifp->if_llprio; |
| 2408 | break; |
| 2409 | |
| 2410 | default: |
| 2411 | panic("invalid ioctl %lu", cmd); |
| 2412 | } |
| 2413 | |
| 2414 | NET_RUNLOCK_IN_IOCTL()do { rw_exit_read(&netlock); } while (0); |
| 2415 | |
| 2416 | if_put(ifp); |
| 2417 | |
| 2418 | return (error); |
| 2419 | } |
| 2420 | |
| 2421 | static int |
| 2422 | if_sffpage_check(const caddr_t data) |
| 2423 | { |
| 2424 | const struct if_sffpage *sff = (const struct if_sffpage *)data; |
| 2425 | |
| 2426 | switch (sff->sff_addr) { |
| 2427 | case IFSFF_ADDR_EEPROM0xa0: |
| 2428 | case IFSFF_ADDR_DDM0xa2: |
| 2429 | break; |
| 2430 | default: |
| 2431 | return (EINVAL22); |
| 2432 | } |
| 2433 | |
| 2434 | return (0); |
| 2435 | } |
| 2436 | |
| 2437 | int |
| 2438 | if_txhprio_l2_check(int hdrprio) |
| 2439 | { |
| 2440 | switch (hdrprio) { |
| 2441 | case IF_HDRPRIO_PACKET-1: |
| 2442 | return (0); |
| 2443 | default: |
| 2444 | if (hdrprio >= IF_HDRPRIO_MIN0 && hdrprio <= IF_HDRPRIO_MAX8 - 1) |
| 2445 | return (0); |
| 2446 | break; |
| 2447 | } |
| 2448 | |
| 2449 | return (EINVAL22); |
| 2450 | } |
| 2451 | |
| 2452 | int |
| 2453 | if_txhprio_l3_check(int hdrprio) |
| 2454 | { |
| 2455 | switch (hdrprio) { |
| 2456 | case IF_HDRPRIO_PACKET-1: |
| 2457 | case IF_HDRPRIO_PAYLOAD-2: |
| 2458 | return (0); |
| 2459 | default: |
| 2460 | if (hdrprio >= IF_HDRPRIO_MIN0 && hdrprio <= IF_HDRPRIO_MAX8 - 1) |
| 2461 | return (0); |
| 2462 | break; |
| 2463 | } |
| 2464 | |
| 2465 | return (EINVAL22); |
| 2466 | } |
| 2467 | |
| 2468 | int |
| 2469 | if_rxhprio_l2_check(int hdrprio) |
| 2470 | { |
| 2471 | switch (hdrprio) { |
| 2472 | case IF_HDRPRIO_PACKET-1: |
| 2473 | case IF_HDRPRIO_OUTER-3: |
| 2474 | return (0); |
| 2475 | default: |
| 2476 | if (hdrprio >= IF_HDRPRIO_MIN0 && hdrprio <= IF_HDRPRIO_MAX8 - 1) |
| 2477 | return (0); |
| 2478 | break; |
| 2479 | } |
| 2480 | |
| 2481 | return (EINVAL22); |
| 2482 | } |
| 2483 | |
| 2484 | int |
| 2485 | if_rxhprio_l3_check(int hdrprio) |
| 2486 | { |
| 2487 | switch (hdrprio) { |
| 2488 | case IF_HDRPRIO_PACKET-1: |
| 2489 | case IF_HDRPRIO_PAYLOAD-2: |
| 2490 | case IF_HDRPRIO_OUTER-3: |
| 2491 | return (0); |
| 2492 | default: |
| 2493 | if (hdrprio >= IF_HDRPRIO_MIN0 && hdrprio <= IF_HDRPRIO_MAX8 - 1) |
| 2494 | return (0); |
| 2495 | break; |
| 2496 | } |
| 2497 | |
| 2498 | return (EINVAL22); |
| 2499 | } |
| 2500 | |
| 2501 | /* |
| 2502 | * Return interface configuration |
| 2503 | * of system. List may be used |
| 2504 | * in later ioctl's (above) to get |
| 2505 | * other information. |
| 2506 | */ |
| 2507 | int |
| 2508 | ifconf(caddr_t data) |
| 2509 | { |
| 2510 | struct ifconf *ifc = (struct ifconf *)data; |
| 2511 | struct ifnet *ifp; |
| 2512 | struct ifaddr *ifa; |
| 2513 | struct ifreq ifr, *ifrp; |
| 2514 | int space = ifc->ifc_len, error = 0; |
| 2515 | |
| 2516 | /* If ifc->ifc_len is 0, fill it in with the needed size and return. */ |
| 2517 | if (space == 0) { |
| 2518 | TAILQ_FOREACH(ifp, &ifnet, if_list)for((ifp) = ((&ifnet)->tqh_first); (ifp) != ((void *)0 ); (ifp) = ((ifp)->if_list.tqe_next)) { |
| 2519 | struct sockaddr *sa; |
| 2520 | |
| 2521 | if (TAILQ_EMPTY(&ifp->if_addrlist)(((&ifp->if_addrlist)->tqh_first) == ((void *)0))) |
| 2522 | space += sizeof (ifr); |
| 2523 | else |
| 2524 | TAILQ_FOREACH(ifa,for((ifa) = ((&ifp->if_addrlist)->tqh_first); (ifa) != ((void *)0); (ifa) = ((ifa)->ifa_list.tqe_next)) |
| 2525 | &ifp->if_addrlist, ifa_list)for((ifa) = ((&ifp->if_addrlist)->tqh_first); (ifa) != ((void *)0); (ifa) = ((ifa)->ifa_list.tqe_next)) { |
| 2526 | sa = ifa->ifa_addr; |
| 2527 | if (sa->sa_len > sizeof(*sa)) |
| 2528 | space += sa->sa_len - |
| 2529 | sizeof(*sa); |
| 2530 | space += sizeof(ifr); |
| 2531 | } |
| 2532 | } |
| 2533 | ifc->ifc_len = space; |
| 2534 | return (0); |
| 2535 | } |
| 2536 | |
| 2537 | ifrp = ifc->ifc_reqifc_ifcu.ifcu_req; |
| 2538 | TAILQ_FOREACH(ifp, &ifnet, if_list)for((ifp) = ((&ifnet)->tqh_first); (ifp) != ((void *)0 ); (ifp) = ((ifp)->if_list.tqe_next)) { |
| 2539 | if (space < sizeof(ifr)) |
| 2540 | break; |
| 2541 | bcopy(ifp->if_xname, ifr.ifr_name, IFNAMSIZ16); |
| 2542 | if (TAILQ_EMPTY(&ifp->if_addrlist)(((&ifp->if_addrlist)->tqh_first) == ((void *)0))) { |
| 2543 | bzero((caddr_t)&ifr.ifr_addr, sizeof(ifr.ifr_addr))__builtin_bzero(((caddr_t)&ifr.ifr_ifru.ifru_addr), (sizeof (ifr.ifr_ifru.ifru_addr))); |
| 2544 | error = copyout((caddr_t)&ifr, (caddr_t)ifrp, |
| 2545 | sizeof(ifr)); |
| 2546 | if (error) |
| 2547 | break; |
| 2548 | space -= sizeof (ifr), ifrp++; |
| 2549 | } else |
| 2550 | TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list)for((ifa) = ((&ifp->if_addrlist)->tqh_first); (ifa) != ((void *)0); (ifa) = ((ifa)->ifa_list.tqe_next)) { |
| 2551 | struct sockaddr *sa = ifa->ifa_addr; |
| 2552 | |
| 2553 | if (space < sizeof(ifr)) |
| 2554 | break; |
| 2555 | if (sa->sa_len <= sizeof(*sa)) { |
| 2556 | ifr.ifr_addrifr_ifru.ifru_addr = *sa; |
| 2557 | error = copyout((caddr_t)&ifr, |
| 2558 | (caddr_t)ifrp, sizeof (ifr)); |
| 2559 | ifrp++; |
| 2560 | } else { |
| 2561 | space -= sa->sa_len - sizeof(*sa); |
| 2562 | if (space < sizeof (ifr)) |
| 2563 | break; |
| 2564 | error = copyout((caddr_t)&ifr, |
| 2565 | (caddr_t)ifrp, |
| 2566 | sizeof(ifr.ifr_name)); |
| 2567 | if (error == 0) |
| 2568 | error = copyout((caddr_t)sa, |
| 2569 | (caddr_t)&ifrp->ifr_addrifr_ifru.ifru_addr, |
| 2570 | sa->sa_len); |
| 2571 | ifrp = (struct ifreq *)(sa->sa_len + |
| 2572 | (caddr_t)&ifrp->ifr_addrifr_ifru.ifru_addr); |
| 2573 | } |
| 2574 | if (error) |
| 2575 | break; |
| 2576 | space -= sizeof (ifr); |
| 2577 | } |
| 2578 | } |
| 2579 | ifc->ifc_len -= space; |
| 2580 | return (error); |
| 2581 | } |
| 2582 | |
| 2583 | void |
| 2584 | if_counters_alloc(struct ifnet *ifp) |
| 2585 | { |
| 2586 | KASSERT(ifp->if_counters == NULL)((ifp->if_counters == ((void *)0)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/if.c", 2586, "ifp->if_counters == NULL" )); |
| 2587 | |
| 2588 | ifp->if_counters = counters_alloc(ifc_ncounters); |
| 2589 | } |
| 2590 | |
| 2591 | void |
| 2592 | if_counters_free(struct ifnet *ifp) |
| 2593 | { |
| 2594 | KASSERT(ifp->if_counters != NULL)((ifp->if_counters != ((void *)0)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/if.c", 2594, "ifp->if_counters != NULL" )); |
| 2595 | |
| 2596 | counters_free(ifp->if_counters, ifc_ncounters); |
| 2597 | ifp->if_counters = NULL((void *)0); |
| 2598 | } |
| 2599 | |
| 2600 | void |
| 2601 | if_getdata(struct ifnet *ifp, struct if_data *data) |
| 2602 | { |
| 2603 | unsigned int i; |
| 2604 | |
| 2605 | *data = ifp->if_data; |
| 2606 | |
| 2607 | if (ifp->if_counters != NULL((void *)0)) { |
| 2608 | uint64_t counters[ifc_ncounters]; |
| 2609 | |
| 2610 | counters_read(ifp->if_counters, counters, nitems(counters)(sizeof((counters)) / sizeof((counters)[0]))); |
| 2611 | |
| 2612 | data->ifi_ipackets += counters[ifc_ipackets]; |
| 2613 | data->ifi_ierrors += counters[ifc_ierrors]; |
| 2614 | data->ifi_opackets += counters[ifc_opackets]; |
| 2615 | data->ifi_oerrors += counters[ifc_oerrors]; |
| 2616 | data->ifi_collisions += counters[ifc_collisions]; |
| 2617 | data->ifi_ibytes += counters[ifc_ibytes]; |
| 2618 | data->ifi_obytes += counters[ifc_obytes]; |
| 2619 | data->ifi_imcasts += counters[ifc_imcasts]; |
| 2620 | data->ifi_omcasts += counters[ifc_omcasts]; |
| 2621 | data->ifi_iqdrops += counters[ifc_iqdrops]; |
| 2622 | data->ifi_oqdrops += counters[ifc_oqdrops]; |
| 2623 | data->ifi_noproto += counters[ifc_noproto]; |
| 2624 | } |
| 2625 | |
| 2626 | for (i = 0; i < ifp->if_nifqs; i++) { |
| 2627 | struct ifqueue *ifq = ifp->if_ifqs[i]; |
| 2628 | |
| 2629 | ifq_add_data(ifq, data); |
| 2630 | } |
| 2631 | |
| 2632 | for (i = 0; i < ifp->if_niqs; i++) { |
| 2633 | struct ifiqueue *ifiq = ifp->if_iqs[i]; |
| 2634 | |
| 2635 | ifiq_add_data(ifiq, data); |
| 2636 | } |
| 2637 | } |
| 2638 | |
| 2639 | /* |
| 2640 | * Dummy functions replaced in ifnet during detach (if protocols decide to |
| 2641 | * fiddle with the if during detach. |
| 2642 | */ |
| 2643 | void |
| 2644 | if_detached_qstart(struct ifqueue *ifq) |
| 2645 | { |
| 2646 | ifq_purge(ifq); |
| 2647 | } |
| 2648 | |
| 2649 | int |
| 2650 | if_detached_ioctl(struct ifnet *ifp, u_long a, caddr_t b) |
| 2651 | { |
| 2652 | return ENODEV19; |
| 2653 | } |
| 2654 | |
| 2655 | /* |
| 2656 | * Create interface group without members |
| 2657 | */ |
| 2658 | struct ifg_group * |
| 2659 | if_creategroup(const char *groupname) |
| 2660 | { |
| 2661 | struct ifg_group *ifg; |
| 2662 | |
| 2663 | if ((ifg = malloc(sizeof(*ifg), M_TEMP127, M_NOWAIT0x0002)) == NULL((void *)0)) |
| 2664 | return (NULL((void *)0)); |
| 2665 | |
| 2666 | strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group)); |
| 2667 | ifg->ifg_refcnt = 1; |
| 2668 | ifg->ifg_carp_demoted = 0; |
| 2669 | TAILQ_INIT(&ifg->ifg_members)do { (&ifg->ifg_members)->tqh_first = ((void *)0); ( &ifg->ifg_members)->tqh_last = &(&ifg->ifg_members )->tqh_first; } while (0); |
| 2670 | #if NPF1 > 0 |
| 2671 | pfi_attach_ifgroup(ifg); |
| 2672 | #endif |
| 2673 | TAILQ_INSERT_TAIL(&ifg_head, ifg, ifg_next)do { (ifg)->ifg_next.tqe_next = ((void *)0); (ifg)->ifg_next .tqe_prev = (&ifg_head)->tqh_last; *(&ifg_head)-> tqh_last = (ifg); (&ifg_head)->tqh_last = &(ifg)-> ifg_next.tqe_next; } while (0); |
| 2674 | |
| 2675 | return (ifg); |
| 2676 | } |
| 2677 | |
| 2678 | /* |
| 2679 | * Add a group to an interface |
| 2680 | */ |
| 2681 | int |
| 2682 | if_addgroup(struct ifnet *ifp, const char *groupname) |
| 2683 | { |
| 2684 | struct ifg_list *ifgl; |
| 2685 | struct ifg_group *ifg = NULL((void *)0); |
| 2686 | struct ifg_member *ifgm; |
| 2687 | size_t namelen; |
| 2688 | |
| 2689 | namelen = strlen(groupname); |
| 2690 | if (namelen == 0 || namelen >= IFNAMSIZ16 || |
| 2691 | (groupname[namelen - 1] >= '0' && groupname[namelen - 1] <= '9')) |
| 2692 | return (EINVAL22); |
| 2693 | |
| 2694 | TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)for((ifgl) = ((&ifp->if_groups)->tqh_first); (ifgl) != ((void *)0); (ifgl) = ((ifgl)->ifgl_next.tqe_next)) |
| 2695 | if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) |
| 2696 | return (EEXIST17); |
| 2697 | |
| 2698 | if ((ifgl = malloc(sizeof(*ifgl), M_TEMP127, M_NOWAIT0x0002)) == NULL((void *)0)) |
| 2699 | return (ENOMEM12); |
| 2700 | |
| 2701 | if ((ifgm = malloc(sizeof(*ifgm), M_TEMP127, M_NOWAIT0x0002)) == NULL((void *)0)) { |
| 2702 | free(ifgl, M_TEMP127, sizeof(*ifgl)); |
| 2703 | return (ENOMEM12); |
| 2704 | } |
| 2705 | |
| 2706 | TAILQ_FOREACH(ifg, &ifg_head, ifg_next)for((ifg) = ((&ifg_head)->tqh_first); (ifg) != ((void * )0); (ifg) = ((ifg)->ifg_next.tqe_next)) |
| 2707 | if (!strcmp(ifg->ifg_group, groupname)) |
| 2708 | break; |
| 2709 | |
| 2710 | if (ifg == NULL((void *)0)) { |
| 2711 | ifg = if_creategroup(groupname); |
| 2712 | if (ifg == NULL((void *)0)) { |
| 2713 | free(ifgl, M_TEMP127, sizeof(*ifgl)); |
| 2714 | free(ifgm, M_TEMP127, sizeof(*ifgm)); |
| 2715 | return (ENOMEM12); |
| 2716 | } |
| 2717 | } else |
| 2718 | ifg->ifg_refcnt++; |
| 2719 | KASSERT(ifg->ifg_refcnt != 0)((ifg->ifg_refcnt != 0) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/if.c", 2719, "ifg->ifg_refcnt != 0")); |
| 2720 | |
| 2721 | ifgl->ifgl_group = ifg; |
| 2722 | ifgm->ifgm_ifp = ifp; |
| 2723 | |
| 2724 | TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next)do { (ifgm)->ifgm_next.tqe_next = ((void *)0); (ifgm)-> ifgm_next.tqe_prev = (&ifg->ifg_members)->tqh_last; *(&ifg->ifg_members)->tqh_last = (ifgm); (&ifg ->ifg_members)->tqh_last = &(ifgm)->ifgm_next.tqe_next ; } while (0); |
| 2725 | TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next)do { (ifgl)->ifgl_next.tqe_next = ((void *)0); (ifgl)-> ifgl_next.tqe_prev = (&ifp->if_groups)->tqh_last; * (&ifp->if_groups)->tqh_last = (ifgl); (&ifp-> if_groups)->tqh_last = &(ifgl)->ifgl_next.tqe_next; } while (0); |
| 2726 | |
| 2727 | #if NPF1 > 0 |
| 2728 | pfi_group_addmember(groupname); |
| 2729 | #endif |
| 2730 | |
| 2731 | return (0); |
| 2732 | } |
| 2733 | |
| 2734 | /* |
| 2735 | * Remove a group from an interface |
| 2736 | */ |
| 2737 | int |
| 2738 | if_delgroup(struct ifnet *ifp, const char *groupname) |
| 2739 | { |
| 2740 | struct ifg_list *ifgl; |
| 2741 | struct ifg_member *ifgm; |
| 2742 | |
| 2743 | TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)for((ifgl) = ((&ifp->if_groups)->tqh_first); (ifgl) != ((void *)0); (ifgl) = ((ifgl)->ifgl_next.tqe_next)) |
| 2744 | if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) |
| 2745 | break; |
| 2746 | if (ifgl == NULL((void *)0)) |
| 2747 | return (ENOENT2); |
| 2748 | |
| 2749 | TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next)do { if (((ifgl)->ifgl_next.tqe_next) != ((void *)0)) (ifgl )->ifgl_next.tqe_next->ifgl_next.tqe_prev = (ifgl)-> ifgl_next.tqe_prev; else (&ifp->if_groups)->tqh_last = (ifgl)->ifgl_next.tqe_prev; *(ifgl)->ifgl_next.tqe_prev = (ifgl)->ifgl_next.tqe_next; ((ifgl)->ifgl_next.tqe_prev ) = ((void *)-1); ((ifgl)->ifgl_next.tqe_next) = ((void *) -1); } while (0); |
| 2750 | |
| 2751 | TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next)for((ifgm) = ((&ifgl->ifgl_group->ifg_members)-> tqh_first); (ifgm) != ((void *)0); (ifgm) = ((ifgm)->ifgm_next .tqe_next)) |
| 2752 | if (ifgm->ifgm_ifp == ifp) |
| 2753 | break; |
| 2754 | |
| 2755 | if (ifgm != NULL((void *)0)) { |
| 2756 | TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next)do { if (((ifgm)->ifgm_next.tqe_next) != ((void *)0)) (ifgm )->ifgm_next.tqe_next->ifgm_next.tqe_prev = (ifgm)-> ifgm_next.tqe_prev; else (&ifgl->ifgl_group->ifg_members )->tqh_last = (ifgm)->ifgm_next.tqe_prev; *(ifgm)->ifgm_next .tqe_prev = (ifgm)->ifgm_next.tqe_next; ((ifgm)->ifgm_next .tqe_prev) = ((void *)-1); ((ifgm)->ifgm_next.tqe_next) = ( (void *)-1); } while (0); |
| 2757 | free(ifgm, M_TEMP127, sizeof(*ifgm)); |
| 2758 | } |
| 2759 | |
| 2760 | #if NPF1 > 0 |
| 2761 | pfi_group_delmember(groupname); |
| 2762 | #endif |
| 2763 | |
| 2764 | KASSERT(ifgl->ifgl_group->ifg_refcnt != 0)((ifgl->ifgl_group->ifg_refcnt != 0) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/net/if.c", 2764, "ifgl->ifgl_group->ifg_refcnt != 0" )); |
| 2765 | if (--ifgl->ifgl_group->ifg_refcnt == 0) { |
| 2766 | TAILQ_REMOVE(&ifg_head, ifgl->ifgl_group, ifg_next)do { if (((ifgl->ifgl_group)->ifg_next.tqe_next) != ((void *)0)) (ifgl->ifgl_group)->ifg_next.tqe_next->ifg_next .tqe_prev = (ifgl->ifgl_group)->ifg_next.tqe_prev; else (&ifg_head)->tqh_last = (ifgl->ifgl_group)->ifg_next .tqe_prev; *(ifgl->ifgl_group)->ifg_next.tqe_prev = (ifgl ->ifgl_group)->ifg_next.tqe_next; ((ifgl->ifgl_group )->ifg_next.tqe_prev) = ((void *)-1); ((ifgl->ifgl_group )->ifg_next.tqe_next) = ((void *)-1); } while (0); |
| 2767 | #if NPF1 > 0 |
| 2768 | pfi_detach_ifgroup(ifgl->ifgl_group); |
| 2769 | #endif |
| 2770 | free(ifgl->ifgl_group, M_TEMP127, sizeof(*ifgl->ifgl_group)); |
| 2771 | } |
| 2772 | |
| 2773 | free(ifgl, M_TEMP127, sizeof(*ifgl)); |
| 2774 | |
| 2775 | return (0); |
| 2776 | } |
| 2777 | |
| 2778 | /* |
| 2779 | * Stores all groups from an interface in memory pointed |
| 2780 | * to by data |
| 2781 | */ |
| 2782 | int |
| 2783 | if_getgroup(caddr_t data, struct ifnet *ifp) |
| 2784 | { |
| 2785 | int len, error; |
| 2786 | struct ifg_list *ifgl; |
| 2787 | struct ifg_req ifgrq, *ifgp; |
| 2788 | struct ifgroupreq *ifgr = (struct ifgroupreq *)data; |
| 2789 | |
| 2790 | if (ifgr->ifgr_len == 0) { |
| 2791 | TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)for((ifgl) = ((&ifp->if_groups)->tqh_first); (ifgl) != ((void *)0); (ifgl) = ((ifgl)->ifgl_next.tqe_next)) |
| 2792 | ifgr->ifgr_len += sizeof(struct ifg_req); |
| 2793 | return (0); |
| 2794 | } |
| 2795 | |
| 2796 | len = ifgr->ifgr_len; |
| 2797 | ifgp = ifgr->ifgr_groupsifgr_ifgru.ifgru_groups; |
| 2798 | TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)for((ifgl) = ((&ifp->if_groups)->tqh_first); (ifgl) != ((void *)0); (ifgl) = ((ifgl)->ifgl_next.tqe_next)) { |
| 2799 | if (len < sizeof(ifgrq)) |
| 2800 | return (EINVAL22); |
| 2801 | bzero(&ifgrq, sizeof ifgrq)__builtin_bzero((&ifgrq), (sizeof ifgrq)); |
| 2802 | strlcpy(ifgrq.ifgrq_groupifgrq_ifgrqu.ifgrqu_group, ifgl->ifgl_group->ifg_group, |
| 2803 | sizeof(ifgrq.ifgrq_groupifgrq_ifgrqu.ifgrqu_group)); |
| 2804 | if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp, |
| 2805 | sizeof(struct ifg_req)))) |
| 2806 | return (error); |
| 2807 | len -= sizeof(ifgrq); |
| 2808 | ifgp++; |
| 2809 | } |
| 2810 | |
| 2811 | return (0); |
| 2812 | } |
| 2813 | |
| 2814 | /* |
| 2815 | * Stores all members of a group in memory pointed to by data |
| 2816 | */ |
| 2817 | int |
| 2818 | if_getgroupmembers(caddr_t data) |
| 2819 | { |
| 2820 | struct ifgroupreq *ifgr = (struct ifgroupreq *)data; |
| 2821 | struct ifg_group *ifg; |
| 2822 | struct ifg_member *ifgm; |
| 2823 | struct ifg_req ifgrq, *ifgp; |
| 2824 | int len, error; |
| 2825 | |
| 2826 | TAILQ_FOREACH(ifg, &ifg_head, ifg_next)for((ifg) = ((&ifg_head)->tqh_first); (ifg) != ((void * )0); (ifg) = ((ifg)->ifg_next.tqe_next)) |
| 2827 | if (!strcmp(ifg->ifg_group, ifgr->ifgr_name)) |
| 2828 | break; |
| 2829 | if (ifg == NULL((void *)0)) |
| 2830 | return (ENOENT2); |
| 2831 | |
| 2832 | if (ifgr->ifgr_len == 0) { |
| 2833 | TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next)for((ifgm) = ((&ifg->ifg_members)->tqh_first); (ifgm ) != ((void *)0); (ifgm) = ((ifgm)->ifgm_next.tqe_next)) |
| 2834 | ifgr->ifgr_len += sizeof(ifgrq); |
| 2835 | return (0); |
| 2836 | } |
| 2837 | |
| 2838 | len = ifgr->ifgr_len; |
| 2839 | ifgp = ifgr->ifgr_groupsifgr_ifgru.ifgru_groups; |
| 2840 | TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next)for((ifgm) = ((&ifg->ifg_members)->tqh_first); (ifgm ) != ((void *)0); (ifgm) = ((ifgm)->ifgm_next.tqe_next)) { |
| 2841 | if (len < sizeof(ifgrq)) |
| 2842 | return (EINVAL22); |
| 2843 | bzero(&ifgrq, sizeof ifgrq)__builtin_bzero((&ifgrq), (sizeof ifgrq)); |
| 2844 | strlcpy(ifgrq.ifgrq_memberifgrq_ifgrqu.ifgrqu_member, ifgm->ifgm_ifp->if_xname, |
| 2845 | sizeof(ifgrq.ifgrq_memberifgrq_ifgrqu.ifgrqu_member)); |
| 2846 | if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp, |
| 2847 | sizeof(struct ifg_req)))) |
| 2848 | return (error); |
| 2849 | len -= sizeof(ifgrq); |
| 2850 | ifgp++; |
| 2851 | } |
| 2852 | |
| 2853 | return (0); |
| 2854 | } |
| 2855 | |
| 2856 | int |
| 2857 | if_getgroupattribs(caddr_t data) |
| 2858 | { |
| 2859 | struct ifgroupreq *ifgr = (struct ifgroupreq *)data; |
| 2860 | struct ifg_group *ifg; |
| 2861 | |
| 2862 | TAILQ_FOREACH(ifg, &ifg_head, ifg_next)for((ifg) = ((&ifg_head)->tqh_first); (ifg) != ((void * )0); (ifg) = ((ifg)->ifg_next.tqe_next)) |
| 2863 | if (!strcmp(ifg->ifg_group, ifgr->ifgr_name)) |
| 2864 | break; |
| 2865 | if (ifg == NULL((void *)0)) |
| 2866 | return (ENOENT2); |
| 2867 | |
| 2868 | ifgr->ifgr_attribifgr_ifgru.ifgru_attrib.ifg_carp_demoted = ifg->ifg_carp_demoted; |
| 2869 | |
| 2870 | return (0); |
| 2871 | } |
| 2872 | |
| 2873 | int |
| 2874 | if_setgroupattribs(caddr_t data) |
| 2875 | { |
| 2876 | struct ifgroupreq *ifgr = (struct ifgroupreq *)data; |
| 2877 | struct ifg_group *ifg; |
| 2878 | struct ifg_member *ifgm; |
| 2879 | int demote; |
| 2880 | |
| 2881 | TAILQ_FOREACH(ifg, &ifg_head, ifg_next)for((ifg) = ((&ifg_head)->tqh_first); (ifg) != ((void * )0); (ifg) = ((ifg)->ifg_next.tqe_next)) |
| 2882 | if (!strcmp(ifg->ifg_group, ifgr->ifgr_name)) |
| 2883 | break; |
| 2884 | if (ifg == NULL((void *)0)) |
| 2885 | return (ENOENT2); |
| 2886 | |
| 2887 | demote = ifgr->ifgr_attribifgr_ifgru.ifgru_attrib.ifg_carp_demoted; |
| 2888 | if (demote + ifg->ifg_carp_demoted > 0xff || |
| 2889 | demote + ifg->ifg_carp_demoted < 0) |
| 2890 | return (EINVAL22); |
| 2891 | |
| 2892 | ifg->ifg_carp_demoted += demote; |
| 2893 | |
| 2894 | TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next)for((ifgm) = ((&ifg->ifg_members)->tqh_first); (ifgm ) != ((void *)0); (ifgm) = ((ifgm)->ifgm_next.tqe_next)) |
| 2895 | ifgm->ifgm_ifp->if_ioctl(ifgm->ifgm_ifp, SIOCSIFGATTR((unsigned long)0x80000000 | ((sizeof(struct ifgroupreq) & 0x1fff) << 16) | ((('i')) << 8) | ((140))), data); |
| 2896 | |
| 2897 | return (0); |
| 2898 | } |
| 2899 | |
| 2900 | /* |
| 2901 | * Stores all groups in memory pointed to by data |
| 2902 | */ |
| 2903 | int |
| 2904 | if_getgrouplist(caddr_t data) |
| 2905 | { |
| 2906 | struct ifgroupreq *ifgr = (struct ifgroupreq *)data; |
| 2907 | struct ifg_group *ifg; |
| 2908 | struct ifg_req ifgrq, *ifgp; |
| 2909 | int len, error; |
| 2910 | |
| 2911 | if (ifgr->ifgr_len == 0) { |
| 2912 | TAILQ_FOREACH(ifg, &ifg_head, ifg_next)for((ifg) = ((&ifg_head)->tqh_first); (ifg) != ((void * )0); (ifg) = ((ifg)->ifg_next.tqe_next)) |
| 2913 | ifgr->ifgr_len += sizeof(ifgrq); |
| 2914 | return (0); |
| 2915 | } |
| 2916 | |
| 2917 | len = ifgr->ifgr_len; |
| 2918 | ifgp = ifgr->ifgr_groupsifgr_ifgru.ifgru_groups; |
| 2919 | TAILQ_FOREACH(ifg, &ifg_head, ifg_next)for((ifg) = ((&ifg_head)->tqh_first); (ifg) != ((void * )0); (ifg) = ((ifg)->ifg_next.tqe_next)) { |
| 2920 | if (len < sizeof(ifgrq)) |
| 2921 | return (EINVAL22); |
| 2922 | bzero(&ifgrq, sizeof ifgrq)__builtin_bzero((&ifgrq), (sizeof ifgrq)); |
| 2923 | strlcpy(ifgrq.ifgrq_groupifgrq_ifgrqu.ifgrqu_group, ifg->ifg_group, |
| 2924 | sizeof(ifgrq.ifgrq_groupifgrq_ifgrqu.ifgrqu_group)); |
| 2925 | if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp, |
| 2926 | sizeof(struct ifg_req)))) |
| 2927 | return (error); |
| 2928 | len -= sizeof(ifgrq); |
| 2929 | ifgp++; |
| 2930 | } |
| 2931 | |
| 2932 | return (0); |
| 2933 | } |
| 2934 | |
| 2935 | void |
| 2936 | if_group_routechange(struct sockaddr *dst, struct sockaddr *mask) |
| 2937 | { |
| 2938 | switch (dst->sa_family) { |
| 2939 | case AF_INET2: |
| 2940 | if (satosin(dst)->sin_addr.s_addr == INADDR_ANY((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000000 )) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000000)) & 0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000000)) & 0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000000)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000000) ) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000000 )))) && |
| 2941 | mask && (mask->sa_len == 0 || |
| 2942 | satosin(mask)->sin_addr.s_addr == INADDR_ANY((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000000 )) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000000)) & 0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000000)) & 0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000000)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000000) ) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000000 )))))) |
| 2943 | if_group_egress_build(); |
| 2944 | break; |
| 2945 | #ifdef INET61 |
| 2946 | case AF_INET624: |
| 2947 | if (IN6_ARE_ADDR_EQUAL(&(satosin6(dst))->sin6_addr,(__builtin_memcmp((&(&(satosin6(dst))->sin6_addr)-> __u6_addr.__u6_addr8[0]), (&(&in6addr_any)->__u6_addr .__u6_addr8[0]), (sizeof(struct in6_addr))) == 0) |
| 2948 | &in6addr_any)(__builtin_memcmp((&(&(satosin6(dst))->sin6_addr)-> __u6_addr.__u6_addr8[0]), (&(&in6addr_any)->__u6_addr .__u6_addr8[0]), (sizeof(struct in6_addr))) == 0) && mask && (mask->sa_len == 0 || |
| 2949 | IN6_ARE_ADDR_EQUAL(&(satosin6(mask))->sin6_addr,(__builtin_memcmp((&(&(satosin6(mask))->sin6_addr) ->__u6_addr.__u6_addr8[0]), (&(&in6addr_any)->__u6_addr .__u6_addr8[0]), (sizeof(struct in6_addr))) == 0) |
| 2950 | &in6addr_any)(__builtin_memcmp((&(&(satosin6(mask))->sin6_addr) ->__u6_addr.__u6_addr8[0]), (&(&in6addr_any)->__u6_addr .__u6_addr8[0]), (sizeof(struct in6_addr))) == 0))) |
| 2951 | if_group_egress_build(); |
| 2952 | break; |
| 2953 | #endif |
| 2954 | } |
| 2955 | } |
| 2956 | |
| 2957 | int |
| 2958 | if_group_egress_build(void) |
| 2959 | { |
| 2960 | struct ifnet *ifp; |
| 2961 | struct ifg_group *ifg; |
| 2962 | struct ifg_member *ifgm, *next; |
| 2963 | struct sockaddr_in sa_in; |
| 2964 | #ifdef INET61 |
| 2965 | struct sockaddr_in6 sa_in6; |
| 2966 | #endif |
| 2967 | struct rtentry *rt; |
| 2968 | |
| 2969 | TAILQ_FOREACH(ifg, &ifg_head, ifg_next)for((ifg) = ((&ifg_head)->tqh_first); (ifg) != ((void * )0); (ifg) = ((ifg)->ifg_next.tqe_next)) |
| 2970 | if (!strcmp(ifg->ifg_group, IFG_EGRESS"egress")) |
| 2971 | break; |
| 2972 | |
| 2973 | if (ifg != NULL((void *)0)) |
| 2974 | TAILQ_FOREACH_SAFE(ifgm, &ifg->ifg_members, ifgm_next, next)for ((ifgm) = ((&ifg->ifg_members)->tqh_first); (ifgm ) != ((void *)0) && ((next) = ((ifgm)->ifgm_next.tqe_next ), 1); (ifgm) = (next)) |
| 2975 | if_delgroup(ifgm->ifgm_ifp, IFG_EGRESS"egress"); |
| 2976 | |
| 2977 | bzero(&sa_in, sizeof(sa_in))__builtin_bzero((&sa_in), (sizeof(sa_in))); |
| 2978 | sa_in.sin_len = sizeof(sa_in); |
| 2979 | sa_in.sin_family = AF_INET2; |
| 2980 | rt = rtable_lookup(0, sintosa(&sa_in), sintosa(&sa_in), NULL((void *)0), RTP_ANY64); |
| 2981 | while (rt != NULL((void *)0)) { |
| 2982 | ifp = if_get(rt->rt_ifidx); |
| 2983 | if (ifp != NULL((void *)0)) { |
| 2984 | if_addgroup(ifp, IFG_EGRESS"egress"); |
| 2985 | if_put(ifp); |
| 2986 | } |
| 2987 | rt = rtable_iterate(rt); |
| 2988 | } |
| 2989 | |
| 2990 | #ifdef INET61 |
| 2991 | bcopy(&sa6_any, &sa_in6, sizeof(sa_in6)); |
| 2992 | rt = rtable_lookup(0, sin6tosa(&sa_in6), sin6tosa(&sa_in6), NULL((void *)0), |
| 2993 | RTP_ANY64); |
| 2994 | while (rt != NULL((void *)0)) { |
| 2995 | ifp = if_get(rt->rt_ifidx); |
| 2996 | if (ifp != NULL((void *)0)) { |
| 2997 | if_addgroup(ifp, IFG_EGRESS"egress"); |
| 2998 | if_put(ifp); |
| 2999 | } |
| 3000 | rt = rtable_iterate(rt); |
| 3001 | } |
| 3002 | #endif /* INET6 */ |
| 3003 | |
| 3004 | return (0); |
| 3005 | } |
| 3006 | |
| 3007 | /* |
| 3008 | * Set/clear promiscuous mode on interface ifp based on the truth value |
| 3009 | * of pswitch. The calls are reference counted so that only the first |
| 3010 | * "on" request actually has an effect, as does the final "off" request. |
| 3011 | * Results are undefined if the "off" and "on" requests are not matched. |
| 3012 | */ |
| 3013 | int |
| 3014 | ifpromisc(struct ifnet *ifp, int pswitch) |
| 3015 | { |
| 3016 | struct ifreq ifr; |
| 3017 | unsigned short oif_flags; |
| 3018 | int oif_pcount, error; |
| 3019 | |
| 3020 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); /* modifying if_flags and if_pcount */ |
| 3021 | |
| 3022 | oif_flags = ifp->if_flags; |
| 3023 | oif_pcount = ifp->if_pcount; |
| 3024 | if (pswitch) { |
| 3025 | if (ifp->if_pcount++ != 0) |
| 3026 | return (0); |
| 3027 | ifp->if_flags |= IFF_PROMISC0x100; |
| 3028 | } else { |
| 3029 | if (--ifp->if_pcount > 0) |
| 3030 | return (0); |
| 3031 | ifp->if_flags &= ~IFF_PROMISC0x100; |
| 3032 | } |
| 3033 | |
| 3034 | if ((ifp->if_flags & IFF_UP0x1) == 0) |
| 3035 | return (0); |
| 3036 | |
| 3037 | memset(&ifr, 0, sizeof(ifr))__builtin_memset((&ifr), (0), (sizeof(ifr))); |
| 3038 | ifr.ifr_flagsifr_ifru.ifru_flags = ifp->if_flags; |
| 3039 | error = ((*ifp->if_ioctl)(ifp, SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))), (caddr_t)&ifr)); |
| 3040 | if (error) { |
| 3041 | ifp->if_flags = oif_flags; |
| 3042 | ifp->if_pcount = oif_pcount; |
| 3043 | } |
| 3044 | |
| 3045 | return (error); |
| 3046 | } |
| 3047 | |
| 3048 | void |
| 3049 | ifa_add(struct ifnet *ifp, struct ifaddr *ifa) |
| 3050 | { |
| 3051 | TAILQ_INSERT_TAIL(&ifp->if_addrlist, ifa, ifa_list)do { (ifa)->ifa_list.tqe_next = ((void *)0); (ifa)->ifa_list .tqe_prev = (&ifp->if_addrlist)->tqh_last; *(&ifp ->if_addrlist)->tqh_last = (ifa); (&ifp->if_addrlist )->tqh_last = &(ifa)->ifa_list.tqe_next; } while (0 ); |
| 3052 | } |
| 3053 | |
| 3054 | void |
| 3055 | ifa_del(struct ifnet *ifp, struct ifaddr *ifa) |
| 3056 | { |
| 3057 | TAILQ_REMOVE(&ifp->if_addrlist, ifa, ifa_list)do { if (((ifa)->ifa_list.tqe_next) != ((void *)0)) (ifa)-> ifa_list.tqe_next->ifa_list.tqe_prev = (ifa)->ifa_list. tqe_prev; else (&ifp->if_addrlist)->tqh_last = (ifa )->ifa_list.tqe_prev; *(ifa)->ifa_list.tqe_prev = (ifa) ->ifa_list.tqe_next; ((ifa)->ifa_list.tqe_prev) = ((void *)-1); ((ifa)->ifa_list.tqe_next) = ((void *)-1); } while (0); |
| 3058 | } |
| 3059 | |
| 3060 | void |
| 3061 | ifa_update_broadaddr(struct ifnet *ifp, struct ifaddr *ifa, struct sockaddr *sa) |
| 3062 | { |
| 3063 | if (ifa->ifa_broadaddrifa_dstaddr->sa_len != sa->sa_len) |
| 3064 | panic("ifa_update_broadaddr does not support dynamic length"); |
| 3065 | bcopy(sa, ifa->ifa_broadaddrifa_dstaddr, sa->sa_len); |
| 3066 | } |
| 3067 | |
| 3068 | #ifdef DDB1 |
| 3069 | /* debug function, can be called from ddb> */ |
| 3070 | void |
| 3071 | ifa_print_all(void) |
| 3072 | { |
| 3073 | struct ifnet *ifp; |
| 3074 | struct ifaddr *ifa; |
| 3075 | |
| 3076 | TAILQ_FOREACH(ifp, &ifnet, if_list)for((ifp) = ((&ifnet)->tqh_first); (ifp) != ((void *)0 ); (ifp) = ((ifp)->if_list.tqe_next)) { |
| 3077 | TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list)for((ifa) = ((&ifp->if_addrlist)->tqh_first); (ifa) != ((void *)0); (ifa) = ((ifa)->ifa_list.tqe_next)) { |
| 3078 | char addr[INET6_ADDRSTRLEN46]; |
| 3079 | |
| 3080 | switch (ifa->ifa_addr->sa_family) { |
| 3081 | case AF_INET2: |
| 3082 | printf("%s", inet_ntop(AF_INET2, |
| 3083 | &satosin(ifa->ifa_addr)->sin_addr, |
| 3084 | addr, sizeof(addr))); |
| 3085 | break; |
| 3086 | #ifdef INET61 |
| 3087 | case AF_INET624: |
| 3088 | printf("%s", inet_ntop(AF_INET624, |
| 3089 | &(satosin6(ifa->ifa_addr))->sin6_addr, |
| 3090 | addr, sizeof(addr))); |
| 3091 | break; |
| 3092 | #endif |
| 3093 | } |
| 3094 | printf(" on %s\n", ifp->if_xname); |
| 3095 | } |
| 3096 | } |
| 3097 | } |
| 3098 | #endif /* DDB */ |
| 3099 | |
| 3100 | void |
| 3101 | ifnewlladdr(struct ifnet *ifp) |
| 3102 | { |
| 3103 | #ifdef INET61 |
| 3104 | struct ifaddr *ifa; |
| 3105 | #endif |
| 3106 | struct ifreq ifrq; |
| 3107 | short up; |
| 3108 | |
| 3109 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); /* for ioctl and in6 */ |
| 3110 | KERNEL_ASSERT_LOCKED()((_kernel_lock_held()) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if.c" , 3110, "_kernel_lock_held()")); /* for if_flags */ |
| 3111 | |
| 3112 | up = ifp->if_flags & IFF_UP0x1; |
| 3113 | |
| 3114 | if (up) { |
| 3115 | /* go down for a moment... */ |
| 3116 | ifp->if_flags &= ~IFF_UP0x1; |
| 3117 | ifrq.ifr_flagsifr_ifru.ifru_flags = ifp->if_flags; |
| 3118 | (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))), (caddr_t)&ifrq); |
| 3119 | } |
| 3120 | |
| 3121 | ifp->if_flags |= IFF_UP0x1; |
| 3122 | ifrq.ifr_flagsifr_ifru.ifru_flags = ifp->if_flags; |
| 3123 | (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))), (caddr_t)&ifrq); |
| 3124 | |
| 3125 | #ifdef INET61 |
| 3126 | /* |
| 3127 | * Update the link-local address. Don't do it if we're |
| 3128 | * a router to avoid confusing hosts on the network. |
| 3129 | */ |
| 3130 | if (!ip6_forwarding) { |
| 3131 | ifa = &in6ifa_ifpforlinklocal(ifp, 0)->ia_ifa; |
| 3132 | if (ifa) { |
| 3133 | in6_purgeaddr(ifa); |
| 3134 | if_hooks_run(&ifp->if_addrhooks); |
| 3135 | in6_ifattach(ifp); |
| 3136 | } |
| 3137 | } |
| 3138 | #endif |
| 3139 | if (!up) { |
| 3140 | /* go back down */ |
| 3141 | ifp->if_flags &= ~IFF_UP0x1; |
| 3142 | ifrq.ifr_flagsifr_ifru.ifru_flags = ifp->if_flags; |
| 3143 | (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))), (caddr_t)&ifrq); |
| 3144 | } |
| 3145 | } |
| 3146 | |
| 3147 | void |
| 3148 | if_addrhook_add(struct ifnet *ifp, struct task *t) |
| 3149 | { |
| 3150 | mtx_enter(&if_hooks_mtx); |
| 3151 | TAILQ_INSERT_TAIL(&ifp->if_addrhooks, t, t_entry)do { (t)->t_entry.tqe_next = ((void *)0); (t)->t_entry. tqe_prev = (&ifp->if_addrhooks)->tqh_last; *(&ifp ->if_addrhooks)->tqh_last = (t); (&ifp->if_addrhooks )->tqh_last = &(t)->t_entry.tqe_next; } while (0); |
| 3152 | mtx_leave(&if_hooks_mtx); |
| 3153 | } |
| 3154 | |
| 3155 | void |
| 3156 | if_addrhook_del(struct ifnet *ifp, struct task *t) |
| 3157 | { |
| 3158 | mtx_enter(&if_hooks_mtx); |
| 3159 | TAILQ_REMOVE(&ifp->if_addrhooks, t, t_entry)do { if (((t)->t_entry.tqe_next) != ((void *)0)) (t)->t_entry .tqe_next->t_entry.tqe_prev = (t)->t_entry.tqe_prev; else (&ifp->if_addrhooks)->tqh_last = (t)->t_entry.tqe_prev ; *(t)->t_entry.tqe_prev = (t)->t_entry.tqe_next; ((t)-> t_entry.tqe_prev) = ((void *)-1); ((t)->t_entry.tqe_next) = ((void *)-1); } while (0); |
| 3160 | mtx_leave(&if_hooks_mtx); |
| 3161 | } |
| 3162 | |
| 3163 | void |
| 3164 | if_addrhooks_run(struct ifnet *ifp) |
| 3165 | { |
| 3166 | if_hooks_run(&ifp->if_addrhooks); |
| 3167 | } |
| 3168 | |
| 3169 | void |
| 3170 | if_rxr_init(struct if_rxring *rxr, u_int lwm, u_int hwm) |
| 3171 | { |
| 3172 | extern int ticks; |
| 3173 | |
| 3174 | memset(rxr, 0, sizeof(*rxr))__builtin_memset((rxr), (0), (sizeof(*rxr))); |
| 3175 | |
| 3176 | rxr->rxr_adjusted = ticks; |
| 3177 | rxr->rxr_cwm = rxr->rxr_lwm = lwm; |
| 3178 | rxr->rxr_hwm = hwm; |
| 3179 | } |
| 3180 | |
| 3181 | static inline void |
| 3182 | if_rxr_adjust_cwm(struct if_rxring *rxr) |
| 3183 | { |
| 3184 | extern int ticks; |
| 3185 | |
| 3186 | if (rxr->rxr_alive >= rxr->rxr_lwm) |
| 3187 | return; |
| 3188 | else if (rxr->rxr_cwm < rxr->rxr_hwm) |
| 3189 | rxr->rxr_cwm++; |
| 3190 | |
| 3191 | rxr->rxr_adjusted = ticks; |
| 3192 | } |
| 3193 | |
| 3194 | void |
| 3195 | if_rxr_livelocked(struct if_rxring *rxr) |
| 3196 | { |
| 3197 | extern int ticks; |
| 3198 | |
| 3199 | if (ticks - rxr->rxr_adjusted >= 1) { |
| 3200 | if (rxr->rxr_cwm > rxr->rxr_lwm) |
| 3201 | rxr->rxr_cwm--; |
| 3202 | |
| 3203 | rxr->rxr_adjusted = ticks; |
| 3204 | } |
| 3205 | } |
| 3206 | |
| 3207 | u_int |
| 3208 | if_rxr_get(struct if_rxring *rxr, u_int max) |
| 3209 | { |
| 3210 | extern int ticks; |
| 3211 | u_int diff; |
| 3212 | |
| 3213 | if (ticks - rxr->rxr_adjusted >= 1) { |
| 3214 | /* we're free to try for an adjustment */ |
| 3215 | if_rxr_adjust_cwm(rxr); |
| 3216 | } |
| 3217 | |
| 3218 | if (rxr->rxr_alive >= rxr->rxr_cwm) |
| 3219 | return (0); |
| 3220 | |
| 3221 | diff = min(rxr->rxr_cwm - rxr->rxr_alive, max); |
| 3222 | rxr->rxr_alive += diff; |
| 3223 | |
| 3224 | return (diff); |
| 3225 | } |
| 3226 | |
| 3227 | int |
| 3228 | if_rxr_info_ioctl(struct if_rxrinfo *uifri, u_int t, struct if_rxring_info *e) |
| 3229 | { |
| 3230 | struct if_rxrinfo kifri; |
| 3231 | int error; |
| 3232 | u_int n; |
| 3233 | |
| 3234 | error = copyin(uifri, &kifri, sizeof(kifri)); |
| 3235 | if (error) |
| 3236 | return (error); |
| 3237 | |
| 3238 | n = min(t, kifri.ifri_total); |
| 3239 | kifri.ifri_total = t; |
| 3240 | |
| 3241 | if (n > 0) { |
| 3242 | error = copyout(e, kifri.ifri_entries, sizeof(*e) * n); |
| 3243 | if (error) |
| 3244 | return (error); |
| 3245 | } |
| 3246 | |
| 3247 | return (copyout(&kifri, uifri, sizeof(kifri))); |
| 3248 | } |
| 3249 | |
| 3250 | int |
| 3251 | if_rxr_ioctl(struct if_rxrinfo *ifri, const char *name, u_int size, |
| 3252 | struct if_rxring *rxr) |
| 3253 | { |
| 3254 | struct if_rxring_info ifr; |
| 3255 | |
| 3256 | memset(&ifr, 0, sizeof(ifr))__builtin_memset((&ifr), (0), (sizeof(ifr))); |
| 3257 | |
| 3258 | if (name != NULL((void *)0)) |
| 3259 | strlcpy(ifr.ifr_name, name, sizeof(ifr.ifr_name)); |
| 3260 | |
| 3261 | ifr.ifr_size = size; |
| 3262 | ifr.ifr_info = *rxr; |
| 3263 | |
| 3264 | return (if_rxr_info_ioctl(ifri, 1, &ifr)); |
| 3265 | } |
| 3266 | |
| 3267 | /* |
| 3268 | * Network stack input queues. |
| 3269 | */ |
| 3270 | |
| 3271 | void |
| 3272 | niq_init(struct niqueue *niq, u_int maxlen, u_int isr) |
| 3273 | { |
| 3274 | mq_init(&niq->ni_q, maxlen, IPL_NET0x7); |
| 3275 | niq->ni_isr = isr; |
| 3276 | } |
| 3277 | |
| 3278 | int |
| 3279 | niq_enqueue(struct niqueue *niq, struct mbuf *m) |
| 3280 | { |
| 3281 | int rv; |
| 3282 | |
| 3283 | rv = mq_enqueue(&niq->ni_q, m); |
| 3284 | if (rv == 0) |
| 3285 | schednetisr(niq->ni_isr)do { x86_atomic_setbits_u32(&netisr, (1 << (niq-> ni_isr))); task_add(net_tq(0), &if_input_task_locked); } while ( 0); |
| 3286 | else |
| 3287 | if_congestion(); |
| 3288 | |
| 3289 | return (rv); |
| 3290 | } |
| 3291 | |
| 3292 | int |
| 3293 | niq_enlist(struct niqueue *niq, struct mbuf_list *ml) |
| 3294 | { |
| 3295 | int rv; |
| 3296 | |
| 3297 | rv = mq_enlist(&niq->ni_q, ml); |
| 3298 | if (rv == 0) |
| 3299 | schednetisr(niq->ni_isr)do { x86_atomic_setbits_u32(&netisr, (1 << (niq-> ni_isr))); task_add(net_tq(0), &if_input_task_locked); } while ( 0); |
| 3300 | else |
| 3301 | if_congestion(); |
| 3302 | |
| 3303 | return (rv); |
| 3304 | } |
| 3305 | |
| 3306 | __dead__attribute__((__noreturn__)) void |
| 3307 | unhandled_af(int af) |
| 3308 | { |
| 3309 | panic("unhandled af %d", af); |
| 3310 | } |
| 3311 | |
| 3312 | struct taskq * |
| 3313 | net_tq(unsigned int ifindex) |
| 3314 | { |
| 3315 | struct taskq *t = NULL((void *)0); |
| 3316 | static int nettaskqs; |
| 3317 | |
| 3318 | if (nettaskqs == 0) |
| 3319 | nettaskqs = min(NET_TASKQ1, ncpus); |
| 3320 | |
| 3321 | t = nettqmp[ifindex % nettaskqs]; |
| 3322 | |
| 3323 | return (t); |
| 3324 | } |