| File: | dev/ic/re.c |
| Warning: | line 1765, column 16 Access to field 'rl_cmdstat' results in a dereference of an undefined pointer value (loaded from variable 'd') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* $OpenBSD: re.c,v 1.216 2023/11/10 15:51:20 bluhm Exp $ */ | |||
| 2 | /* $FreeBSD: if_re.c,v 1.31 2004/09/04 07:54:05 ru Exp $ */ | |||
| 3 | /* | |||
| 4 | * Copyright (c) 1997, 1998-2003 | |||
| 5 | * Bill Paul <wpaul@windriver.com>. All rights reserved. | |||
| 6 | * | |||
| 7 | * Redistribution and use in source and binary forms, with or without | |||
| 8 | * modification, are permitted provided that the following conditions | |||
| 9 | * are met: | |||
| 10 | * 1. Redistributions of source code must retain the above copyright | |||
| 11 | * notice, this list of conditions and the following disclaimer. | |||
| 12 | * 2. Redistributions in binary form must reproduce the above copyright | |||
| 13 | * notice, this list of conditions and the following disclaimer in the | |||
| 14 | * documentation and/or other materials provided with the distribution. | |||
| 15 | * 3. All advertising materials mentioning features or use of this software | |||
| 16 | * must display the following acknowledgement: | |||
| 17 | * This product includes software developed by Bill Paul. | |||
| 18 | * 4. Neither the name of the author nor the names of any co-contributors | |||
| 19 | * may be used to endorse or promote products derived from this software | |||
| 20 | * without specific prior written permission. | |||
| 21 | * | |||
| 22 | * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND | |||
| 23 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||
| 24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||
| 25 | * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD | |||
| 26 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |||
| 27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |||
| 28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |||
| 29 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |||
| 30 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |||
| 31 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF | |||
| 32 | * THE POSSIBILITY OF SUCH DAMAGE. | |||
| 33 | */ | |||
| 34 | ||||
| 35 | /* | |||
| 36 | * Realtek 8139C+/8169/8169S/8110S PCI NIC driver | |||
| 37 | * | |||
| 38 | * Written by Bill Paul <wpaul@windriver.com> | |||
| 39 | * Senior Networking Software Engineer | |||
| 40 | * Wind River Systems | |||
| 41 | */ | |||
| 42 | ||||
| 43 | /* | |||
| 44 | * This driver is designed to support Realtek's next generation of | |||
| 45 | * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently | |||
| 46 | * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, | |||
| 47 | * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. | |||
| 48 | * | |||
| 49 | * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible | |||
| 50 | * with the older 8139 family, however it also supports a special | |||
| 51 | * C+ mode of operation that provides several new performance enhancing | |||
| 52 | * features. These include: | |||
| 53 | * | |||
| 54 | * o Descriptor based DMA mechanism. Each descriptor represents | |||
| 55 | * a single packet fragment. Data buffers may be aligned on | |||
| 56 | * any byte boundary. | |||
| 57 | * | |||
| 58 | * o 64-bit DMA | |||
| 59 | * | |||
| 60 | * o TCP/IP checksum offload for both RX and TX | |||
| 61 | * | |||
| 62 | * o High and normal priority transmit DMA rings | |||
| 63 | * | |||
| 64 | * o VLAN tag insertion and extraction | |||
| 65 | * | |||
| 66 | * o TCP large send (segmentation offload) | |||
| 67 | * | |||
| 68 | * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ | |||
| 69 | * programming API is fairly straightforward. The RX filtering, EEPROM | |||
| 70 | * access and PHY access is the same as it is on the older 8139 series | |||
| 71 | * chips. | |||
| 72 | * | |||
| 73 | * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the | |||
| 74 | * same programming API and feature set as the 8139C+ with the following | |||
| 75 | * differences and additions: | |||
| 76 | * | |||
| 77 | * o 1000Mbps mode | |||
| 78 | * | |||
| 79 | * o Jumbo frames | |||
| 80 | * | |||
| 81 | * o GMII and TBI ports/registers for interfacing with copper | |||
| 82 | * or fiber PHYs | |||
| 83 | * | |||
| 84 | * o RX and TX DMA rings can have up to 1024 descriptors | |||
| 85 | * (the 8139C+ allows a maximum of 64) | |||
| 86 | * | |||
| 87 | * o Slight differences in register layout from the 8139C+ | |||
| 88 | * | |||
| 89 | * The TX start and timer interrupt registers are at different locations | |||
| 90 | * on the 8169 than they are on the 8139C+. Also, the status word in the | |||
| 91 | * RX descriptor has a slightly different bit layout. The 8169 does not | |||
| 92 | * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' | |||
| 93 | * copper gigE PHY. | |||
| 94 | * | |||
| 95 | * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs | |||
| 96 | * (the 'S' stands for 'single-chip'). These devices have the same | |||
| 97 | * programming API as the older 8169, but also have some vendor-specific | |||
| 98 | * registers for the on-board PHY. The 8110S is a LAN-on-motherboard | |||
| 99 | * part designed to be pin-compatible with the Realtek 8100 10/100 chip. | |||
| 100 | * | |||
| 101 | * This driver takes advantage of the RX and TX checksum offload and | |||
| 102 | * VLAN tag insertion/extraction features. It also implements TX | |||
| 103 | * interrupt moderation using the timer interrupt registers, which | |||
| 104 | * significantly reduces TX interrupt load. There is also support | |||
| 105 | * for jumbo frames, however the 8169/8169S/8110S can not transmit | |||
| 106 | * jumbo frames larger than 7440, so the max MTU possible with this | |||
| 107 | * driver is 7422 bytes. | |||
| 108 | */ | |||
| 109 | ||||
| 110 | #include "bpfilter.h" | |||
| 111 | #include "vlan.h" | |||
| 112 | #include "kstat.h" | |||
| 113 | ||||
| 114 | #include <sys/param.h> | |||
| 115 | #include <sys/endian.h> | |||
| 116 | #include <sys/systm.h> | |||
| 117 | #include <sys/sockio.h> | |||
| 118 | #include <sys/mbuf.h> | |||
| 119 | #include <sys/malloc.h> | |||
| 120 | #include <sys/kernel.h> | |||
| 121 | #include <sys/device.h> | |||
| 122 | #include <sys/timeout.h> | |||
| 123 | #include <sys/socket.h> | |||
| 124 | #include <sys/atomic.h> | |||
| 125 | ||||
| 126 | #include <machine/bus.h> | |||
| 127 | ||||
| 128 | #include <net/if.h> | |||
| 129 | #include <net/if_media.h> | |||
| 130 | ||||
| 131 | #include <netinet/in.h> | |||
| 132 | #include <netinet/ip.h> | |||
| 133 | #include <netinet/if_ether.h> | |||
| 134 | ||||
| 135 | #if NBPFILTER1 > 0 | |||
| 136 | #include <net/bpf.h> | |||
| 137 | #endif | |||
| 138 | ||||
| 139 | #if NKSTAT1 > 0 | |||
| 140 | #include <sys/kstat.h> | |||
| 141 | #endif | |||
| 142 | ||||
| 143 | #include <dev/mii/mii.h> | |||
| 144 | #include <dev/mii/miivar.h> | |||
| 145 | ||||
| 146 | #include <dev/pci/pcidevs.h> | |||
| 147 | ||||
| 148 | #include <dev/ic/rtl81x9reg.h> | |||
| 149 | #include <dev/ic/revar.h> | |||
| 150 | ||||
| 151 | #ifdef RE_DEBUG | |||
| 152 | int redebug = 0; | |||
| 153 | #define DPRINTF(x) do { if (redebug) printf x; } while (0) | |||
| 154 | #else | |||
| 155 | #define DPRINTF(x) | |||
| 156 | #endif | |||
| 157 | ||||
| 158 | static inline void re_set_bufaddr(struct rl_desc *, bus_addr_t); | |||
| 159 | ||||
| 160 | int re_encap(struct rl_softc *, unsigned int, struct mbuf *); | |||
| 161 | ||||
| 162 | int re_newbuf(struct rl_softc *); | |||
| 163 | int re_rx_list_init(struct rl_softc *); | |||
| 164 | void re_rx_list_fill(struct rl_softc *); | |||
| 165 | int re_tx_list_init(struct rl_softc *); | |||
| 166 | int re_rxeof(struct rl_softc *); | |||
| 167 | int re_txeof(struct rl_softc *); | |||
| 168 | void re_tick(void *); | |||
| 169 | void re_start(struct ifqueue *); | |||
| 170 | void re_txstart(void *); | |||
| 171 | int re_ioctl(struct ifnet *, u_long, caddr_t); | |||
| 172 | void re_watchdog(struct ifnet *); | |||
| 173 | int re_ifmedia_upd(struct ifnet *); | |||
| 174 | void re_ifmedia_sts(struct ifnet *, struct ifmediareq *); | |||
| 175 | ||||
| 176 | void re_set_jumbo(struct rl_softc *); | |||
| 177 | ||||
| 178 | void re_eeprom_putbyte(struct rl_softc *, int); | |||
| 179 | void re_eeprom_getword(struct rl_softc *, int, u_int16_t *); | |||
| 180 | void re_read_eeprom(struct rl_softc *, caddr_t, int, int); | |||
| 181 | ||||
| 182 | int re_gmii_readreg(struct device *, int, int); | |||
| 183 | void re_gmii_writereg(struct device *, int, int, int); | |||
| 184 | ||||
| 185 | int re_miibus_readreg(struct device *, int, int); | |||
| 186 | void re_miibus_writereg(struct device *, int, int, int); | |||
| 187 | void re_miibus_statchg(struct device *); | |||
| 188 | ||||
| 189 | void re_iff(struct rl_softc *); | |||
| 190 | ||||
| 191 | void re_setup_hw_im(struct rl_softc *); | |||
| 192 | void re_setup_sim_im(struct rl_softc *); | |||
| 193 | void re_disable_hw_im(struct rl_softc *); | |||
| 194 | void re_disable_sim_im(struct rl_softc *); | |||
| 195 | void re_config_imtype(struct rl_softc *, int); | |||
| 196 | void re_setup_intr(struct rl_softc *, int, int); | |||
| 197 | #ifndef SMALL_KERNEL | |||
| 198 | int re_wol(struct ifnet*, int); | |||
| 199 | #endif | |||
| 200 | #if NKSTAT1 > 0 | |||
| 201 | void re_kstat_attach(struct rl_softc *); | |||
| 202 | #endif | |||
| 203 | ||||
| 204 | void in_delayed_cksum(struct mbuf *); | |||
| 205 | ||||
| 206 | struct cfdriver re_cd = { | |||
| 207 | 0, "re", DV_IFNET | |||
| 208 | }; | |||
| 209 | ||||
| 210 | #define EE_SET(x)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) | x))) \ | |||
| 211 | CSR_WRITE_1(sc, RL_EECMD, \((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) | x))) | |||
| 212 | CSR_READ_1(sc, RL_EECMD) | x)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) | x))) | |||
| 213 | ||||
| 214 | #define EE_CLR(x)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) & ~x))) \ | |||
| 215 | CSR_WRITE_1(sc, RL_EECMD, \((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) & ~x))) | |||
| 216 | CSR_READ_1(sc, RL_EECMD) & ~x)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) & ~x))) | |||
| 217 | ||||
| 218 | #define RL_FRAMELEN(mtu)(mtu + ((6 * 2) + 2) + 4 + 4) \ | |||
| 219 | (mtu + ETHER_HDR_LEN((6 * 2) + 2) + ETHER_CRC_LEN4 + \ | |||
| 220 | ETHER_VLAN_ENCAP_LEN4) | |||
| 221 | ||||
| 222 | static const struct re_revision { | |||
| 223 | u_int32_t re_chipid; | |||
| 224 | const char *re_name; | |||
| 225 | } re_revisions[] = { | |||
| 226 | { RL_HWREV_81000x78800000, "RTL8100" }, | |||
| 227 | { RL_HWREV_8100E0x30800000, "RTL8100E" }, | |||
| 228 | { RL_HWREV_8100E_SPIN20x38800000, "RTL8100E 2" }, | |||
| 229 | { RL_HWREV_81010x74c00000, "RTL8101" }, | |||
| 230 | { RL_HWREV_8101E0x34000000, "RTL8101E" }, | |||
| 231 | { RL_HWREV_8102E0x34800000, "RTL8102E" }, | |||
| 232 | { RL_HWREV_8106E0x44800000, "RTL8106E" }, | |||
| 233 | { RL_HWREV_8401E0x24000000, "RTL8401E" }, | |||
| 234 | { RL_HWREV_84020x44000000, "RTL8402" }, | |||
| 235 | { RL_HWREV_84110x48800000, "RTL8411" }, | |||
| 236 | { RL_HWREV_8411B0x5c800000, "RTL8411B" }, | |||
| 237 | { RL_HWREV_8102EL0x24800000, "RTL8102EL" }, | |||
| 238 | { RL_HWREV_8102EL_SPIN10x24C00000, "RTL8102EL 1" }, | |||
| 239 | { RL_HWREV_8103E0x34C00000, "RTL8103E" }, | |||
| 240 | { RL_HWREV_8110S0x04000000, "RTL8110S" }, | |||
| 241 | { RL_HWREV_8139CPLUS0x74800000, "RTL8139C+" }, | |||
| 242 | { RL_HWREV_8168B_SPIN10x30000000, "RTL8168 1" }, | |||
| 243 | { RL_HWREV_8168B_SPIN20x38000000, "RTL8168 2" }, | |||
| 244 | { RL_HWREV_8168B_SPIN30x38400000, "RTL8168 3" }, | |||
| 245 | { RL_HWREV_8168C0x3c000000, "RTL8168C/8111C" }, | |||
| 246 | { RL_HWREV_8168C_SPIN20x3c400000, "RTL8168C/8111C" }, | |||
| 247 | { RL_HWREV_8168CP0x3c800000, "RTL8168CP/8111CP" }, | |||
| 248 | { RL_HWREV_8168F0x48000000, "RTL8168F/8111F" }, | |||
| 249 | { RL_HWREV_8168G0x4c000000, "RTL8168G/8111G" }, | |||
| 250 | { RL_HWREV_8168GU0x50800000, "RTL8168GU/8111GU" }, | |||
| 251 | { RL_HWREV_8168H0x54000000, "RTL8168H/8111H" }, | |||
| 252 | { RL_HWREV_8105E0x40800000, "RTL8105E" }, | |||
| 253 | { RL_HWREV_8105E_SPIN10x40C00000, "RTL8105E" }, | |||
| 254 | { RL_HWREV_8168D0x28000000, "RTL8168D/8111D" }, | |||
| 255 | { RL_HWREV_8168DP0x28800000, "RTL8168DP/8111DP" }, | |||
| 256 | { RL_HWREV_8168E0x2C000000, "RTL8168E/8111E" }, | |||
| 257 | { RL_HWREV_8168E_VL0x2C800000, "RTL8168E/8111E-VL" }, | |||
| 258 | { RL_HWREV_8168EP0x50000000, "RTL8168EP/8111EP" }, | |||
| 259 | { RL_HWREV_8168FP0x54800000, "RTL8168FP/8111FP" }, | |||
| 260 | { RL_HWREV_81690x00000000, "RTL8169" }, | |||
| 261 | { RL_HWREV_8169_8110SB0x10000000, "RTL8169/8110SB" }, | |||
| 262 | { RL_HWREV_8169_8110SBL0x7cc00000, "RTL8169SBL" }, | |||
| 263 | { RL_HWREV_8169_8110SCd0x18000000, "RTL8169/8110SCd" }, | |||
| 264 | { RL_HWREV_8169_8110SCe0x98000000, "RTL8169/8110SCe" }, | |||
| 265 | { RL_HWREV_8169S0x00800000, "RTL8169S" }, | |||
| 266 | ||||
| 267 | { 0, NULL((void *)0) } | |||
| 268 | }; | |||
| 269 | ||||
| 270 | ||||
| 271 | static inline void | |||
| 272 | re_set_bufaddr(struct rl_desc *d, bus_addr_t addr) | |||
| 273 | { | |||
| 274 | d->rl_bufaddr_lo = htole32((uint32_t)addr)((__uint32_t)((uint32_t)addr)); | |||
| 275 | if (sizeof(bus_addr_t) == sizeof(uint64_t)) | |||
| 276 | d->rl_bufaddr_hi = htole32((uint64_t)addr >> 32)((__uint32_t)((uint64_t)addr >> 32)); | |||
| 277 | else | |||
| 278 | d->rl_bufaddr_hi = 0; | |||
| 279 | } | |||
| 280 | ||||
| 281 | /* | |||
| 282 | * Send a read command and address to the EEPROM, check for ACK. | |||
| 283 | */ | |||
| 284 | void | |||
| 285 | re_eeprom_putbyte(struct rl_softc *sc, int addr) | |||
| 286 | { | |||
| 287 | int d, i; | |||
| 288 | ||||
| 289 | d = addr | (RL_9346_READ0x6 << sc->rl_eewidth); | |||
| 290 | ||||
| 291 | /* | |||
| 292 | * Feed in each bit and strobe the clock. | |||
| 293 | */ | |||
| 294 | ||||
| 295 | for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { | |||
| 296 | if (d & i) | |||
| 297 | EE_SET(RL_EE_DATAIN)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) | 0x02))); | |||
| 298 | else | |||
| 299 | EE_CLR(RL_EE_DATAIN)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) & ~0x02))); | |||
| 300 | DELAY(100)(*delay_func)(100); | |||
| 301 | EE_SET(RL_EE_CLK)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) | 0x04))); | |||
| 302 | DELAY(150)(*delay_func)(150); | |||
| 303 | EE_CLR(RL_EE_CLK)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) & ~0x04))); | |||
| 304 | DELAY(100)(*delay_func)(100); | |||
| 305 | } | |||
| 306 | } | |||
| 307 | ||||
| 308 | /* | |||
| 309 | * Read a word of data stored in the EEPROM at address 'addr.' | |||
| 310 | */ | |||
| 311 | void | |||
| 312 | re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) | |||
| 313 | { | |||
| 314 | int i; | |||
| 315 | u_int16_t word = 0; | |||
| 316 | ||||
| 317 | /* | |||
| 318 | * Send address of word we want to read. | |||
| 319 | */ | |||
| 320 | re_eeprom_putbyte(sc, addr); | |||
| 321 | ||||
| 322 | /* | |||
| 323 | * Start reading bits from EEPROM. | |||
| 324 | */ | |||
| 325 | for (i = 0x8000; i; i >>= 1) { | |||
| 326 | EE_SET(RL_EE_CLK)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) | 0x04))); | |||
| 327 | DELAY(100)(*delay_func)(100); | |||
| 328 | if (CSR_READ_1(sc, RL_EECMD)((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) & RL_EE_DATAOUT0x01) | |||
| 329 | word |= i; | |||
| 330 | EE_CLR(RL_EE_CLK)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) & ~0x04))); | |||
| 331 | DELAY(100)(*delay_func)(100); | |||
| 332 | } | |||
| 333 | ||||
| 334 | *dest = word; | |||
| 335 | } | |||
| 336 | ||||
| 337 | /* | |||
| 338 | * Read a sequence of words from the EEPROM. | |||
| 339 | */ | |||
| 340 | void | |||
| 341 | re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) | |||
| 342 | { | |||
| 343 | int i; | |||
| 344 | u_int16_t word = 0, *ptr; | |||
| 345 | ||||
| 346 | CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) | (0x80)))); | |||
| 347 | ||||
| 348 | DELAY(100)(*delay_func)(100); | |||
| 349 | ||||
| 350 | for (i = 0; i < cnt; i++) { | |||
| 351 | CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) | (0x08)))); | |||
| 352 | re_eeprom_getword(sc, off + i, &word); | |||
| 353 | CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) & ~(0x08)))); | |||
| 354 | ptr = (u_int16_t *)(dest + (i * 2)); | |||
| 355 | *ptr = word; | |||
| 356 | } | |||
| 357 | ||||
| 358 | CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) & ~(0x80)))); | |||
| 359 | } | |||
| 360 | ||||
| 361 | int | |||
| 362 | re_gmii_readreg(struct device *self, int phy, int reg) | |||
| 363 | { | |||
| 364 | struct rl_softc *sc = (struct rl_softc *)self; | |||
| 365 | u_int32_t rval; | |||
| 366 | int i; | |||
| 367 | ||||
| 368 | if (phy != 7) | |||
| 369 | return (0); | |||
| 370 | ||||
| 371 | /* Let the rgephy driver read the GMEDIASTAT register */ | |||
| 372 | ||||
| 373 | if (reg == RL_GMEDIASTAT0x006C) { | |||
| 374 | rval = CSR_READ_1(sc, RL_GMEDIASTAT)((sc->rl_btag)->read_1((sc->rl_bhandle), (0x006C))); | |||
| 375 | return (rval); | |||
| 376 | } | |||
| 377 | ||||
| 378 | CSR_WRITE_4(sc, RL_PHYAR, reg << 16)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0060), ( reg << 16))); | |||
| 379 | ||||
| 380 | for (i = 0; i < RL_PHY_TIMEOUT20; i++) { | |||
| 381 | rval = CSR_READ_4(sc, RL_PHYAR)((sc->rl_btag)->read_4((sc->rl_bhandle), (0x0060))); | |||
| 382 | if (rval & RL_PHYAR_BUSY0x80000000) | |||
| 383 | break; | |||
| 384 | DELAY(25)(*delay_func)(25); | |||
| 385 | } | |||
| 386 | ||||
| 387 | if (i == RL_PHY_TIMEOUT20) { | |||
| 388 | printf ("%s: PHY read failed\n", sc->sc_dev.dv_xname); | |||
| 389 | return (0); | |||
| 390 | } | |||
| 391 | ||||
| 392 | DELAY(20)(*delay_func)(20); | |||
| 393 | ||||
| 394 | return (rval & RL_PHYAR_PHYDATA0x0000FFFF); | |||
| 395 | } | |||
| 396 | ||||
| 397 | void | |||
| 398 | re_gmii_writereg(struct device *dev, int phy, int reg, int data) | |||
| 399 | { | |||
| 400 | struct rl_softc *sc = (struct rl_softc *)dev; | |||
| 401 | u_int32_t rval; | |||
| 402 | int i; | |||
| 403 | ||||
| 404 | CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0060), ( (reg << 16) | (data & 0x0000FFFF) | 0x80000000))) | |||
| 405 | (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0060), ( (reg << 16) | (data & 0x0000FFFF) | 0x80000000))); | |||
| 406 | ||||
| 407 | for (i = 0; i < RL_PHY_TIMEOUT20; i++) { | |||
| 408 | rval = CSR_READ_4(sc, RL_PHYAR)((sc->rl_btag)->read_4((sc->rl_bhandle), (0x0060))); | |||
| 409 | if (!(rval & RL_PHYAR_BUSY0x80000000)) | |||
| 410 | break; | |||
| 411 | DELAY(25)(*delay_func)(25); | |||
| 412 | } | |||
| 413 | ||||
| 414 | if (i == RL_PHY_TIMEOUT20) | |||
| 415 | printf ("%s: PHY write failed\n", sc->sc_dev.dv_xname); | |||
| 416 | ||||
| 417 | DELAY(20)(*delay_func)(20); | |||
| 418 | } | |||
| 419 | ||||
| 420 | int | |||
| 421 | re_miibus_readreg(struct device *dev, int phy, int reg) | |||
| 422 | { | |||
| 423 | struct rl_softc *sc = (struct rl_softc *)dev; | |||
| 424 | u_int16_t rval = 0; | |||
| 425 | u_int16_t re8139_reg = 0; | |||
| 426 | int s; | |||
| 427 | ||||
| 428 | s = splnet()splraise(0x4); | |||
| 429 | ||||
| 430 | if (sc->sc_hwrev != RL_HWREV_8139CPLUS0x74800000) { | |||
| 431 | rval = re_gmii_readreg(dev, phy, reg); | |||
| 432 | splx(s)spllower(s); | |||
| 433 | return (rval); | |||
| 434 | } | |||
| 435 | ||||
| 436 | /* Pretend the internal PHY is only at address 0 */ | |||
| 437 | if (phy) { | |||
| 438 | splx(s)spllower(s); | |||
| 439 | return (0); | |||
| 440 | } | |||
| 441 | switch(reg) { | |||
| 442 | case MII_BMCR0x00: | |||
| 443 | re8139_reg = RL_BMCR0x0062; | |||
| 444 | break; | |||
| 445 | case MII_BMSR0x01: | |||
| 446 | re8139_reg = RL_BMSR0x0064; | |||
| 447 | break; | |||
| 448 | case MII_ANAR0x04: | |||
| 449 | re8139_reg = RL_ANAR0x0066; | |||
| 450 | break; | |||
| 451 | case MII_ANER0x06: | |||
| 452 | re8139_reg = RL_ANER0x006A; | |||
| 453 | break; | |||
| 454 | case MII_ANLPAR0x05: | |||
| 455 | re8139_reg = RL_LPAR0x0068; | |||
| 456 | break; | |||
| 457 | case MII_PHYIDR10x02: | |||
| 458 | case MII_PHYIDR20x03: | |||
| 459 | splx(s)spllower(s); | |||
| 460 | return (0); | |||
| 461 | /* | |||
| 462 | * Allow the rlphy driver to read the media status | |||
| 463 | * register. If we have a link partner which does not | |||
| 464 | * support NWAY, this is the register which will tell | |||
| 465 | * us the results of parallel detection. | |||
| 466 | */ | |||
| 467 | case RL_MEDIASTAT0x0058: | |||
| 468 | rval = CSR_READ_1(sc, RL_MEDIASTAT)((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0058))); | |||
| 469 | splx(s)spllower(s); | |||
| 470 | return (rval); | |||
| 471 | default: | |||
| 472 | printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); | |||
| 473 | splx(s)spllower(s); | |||
| 474 | return (0); | |||
| 475 | } | |||
| 476 | rval = CSR_READ_2(sc, re8139_reg)((sc->rl_btag)->read_2((sc->rl_bhandle), (re8139_reg ))); | |||
| 477 | if (re8139_reg == RL_BMCR0x0062) { | |||
| 478 | /* 8139C+ has different bit layout. */ | |||
| 479 | rval &= ~(BMCR_LOOP0x4000 | BMCR_ISO0x0400); | |||
| 480 | } | |||
| 481 | splx(s)spllower(s); | |||
| 482 | return (rval); | |||
| 483 | } | |||
| 484 | ||||
| 485 | void | |||
| 486 | re_miibus_writereg(struct device *dev, int phy, int reg, int data) | |||
| 487 | { | |||
| 488 | struct rl_softc *sc = (struct rl_softc *)dev; | |||
| 489 | u_int16_t re8139_reg = 0; | |||
| 490 | int s; | |||
| 491 | ||||
| 492 | s = splnet()splraise(0x4); | |||
| 493 | ||||
| 494 | if (sc->sc_hwrev != RL_HWREV_8139CPLUS0x74800000) { | |||
| 495 | re_gmii_writereg(dev, phy, reg, data); | |||
| 496 | splx(s)spllower(s); | |||
| 497 | return; | |||
| 498 | } | |||
| 499 | ||||
| 500 | /* Pretend the internal PHY is only at address 0 */ | |||
| 501 | if (phy) { | |||
| 502 | splx(s)spllower(s); | |||
| 503 | return; | |||
| 504 | } | |||
| 505 | switch(reg) { | |||
| 506 | case MII_BMCR0x00: | |||
| 507 | re8139_reg = RL_BMCR0x0062; | |||
| 508 | /* 8139C+ has different bit layout. */ | |||
| 509 | data &= ~(BMCR_LOOP0x4000 | BMCR_ISO0x0400); | |||
| 510 | break; | |||
| 511 | case MII_BMSR0x01: | |||
| 512 | re8139_reg = RL_BMSR0x0064; | |||
| 513 | break; | |||
| 514 | case MII_ANAR0x04: | |||
| 515 | re8139_reg = RL_ANAR0x0066; | |||
| 516 | break; | |||
| 517 | case MII_ANER0x06: | |||
| 518 | re8139_reg = RL_ANER0x006A; | |||
| 519 | break; | |||
| 520 | case MII_ANLPAR0x05: | |||
| 521 | re8139_reg = RL_LPAR0x0068; | |||
| 522 | break; | |||
| 523 | case MII_PHYIDR10x02: | |||
| 524 | case MII_PHYIDR20x03: | |||
| 525 | splx(s)spllower(s); | |||
| 526 | return; | |||
| 527 | break; | |||
| 528 | default: | |||
| 529 | printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg); | |||
| 530 | splx(s)spllower(s); | |||
| 531 | return; | |||
| 532 | } | |||
| 533 | CSR_WRITE_2(sc, re8139_reg, data)((sc->rl_btag)->write_2((sc->rl_bhandle), (re8139_reg ), (data))); | |||
| 534 | splx(s)spllower(s); | |||
| 535 | } | |||
| 536 | ||||
| 537 | void | |||
| 538 | re_miibus_statchg(struct device *dev) | |||
| 539 | { | |||
| 540 | struct rl_softc *sc = (struct rl_softc *)dev; | |||
| 541 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
| 542 | struct mii_data *mii = &sc->sc_mii; | |||
| 543 | ||||
| 544 | if ((ifp->if_flags & IFF_RUNNING0x40) == 0) | |||
| 545 | return; | |||
| 546 | ||||
| 547 | sc->rl_flags &= ~RL_FLAG_LINK0x00002000; | |||
| 548 | if ((mii->mii_media_status & (IFM_ACTIVE0x0000000000000002ULL | IFM_AVALID0x0000000000000001ULL)) == | |||
| 549 | (IFM_ACTIVE0x0000000000000002ULL | IFM_AVALID0x0000000000000001ULL)) { | |||
| 550 | switch (IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL)) { | |||
| 551 | case IFM_10_T3: | |||
| 552 | case IFM_100_TX6: | |||
| 553 | sc->rl_flags |= RL_FLAG_LINK0x00002000; | |||
| 554 | break; | |||
| 555 | case IFM_1000_T16: | |||
| 556 | if ((sc->rl_flags & RL_FLAG_FASTETHER0x00040000) != 0) | |||
| 557 | break; | |||
| 558 | sc->rl_flags |= RL_FLAG_LINK0x00002000; | |||
| 559 | break; | |||
| 560 | default: | |||
| 561 | break; | |||
| 562 | } | |||
| 563 | } | |||
| 564 | ||||
| 565 | /* | |||
| 566 | * Realtek controllers do not provide an interface to | |||
| 567 | * Tx/Rx MACs for resolved speed, duplex and flow-control | |||
| 568 | * parameters. | |||
| 569 | */ | |||
| 570 | } | |||
| 571 | ||||
| 572 | void | |||
| 573 | re_iff(struct rl_softc *sc) | |||
| 574 | { | |||
| 575 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
| 576 | int h = 0; | |||
| 577 | u_int32_t hashes[2]; | |||
| 578 | u_int32_t rxfilt; | |||
| 579 | struct arpcom *ac = &sc->sc_arpcom; | |||
| 580 | struct ether_multi *enm; | |||
| 581 | struct ether_multistep step; | |||
| 582 | ||||
| 583 | rxfilt = CSR_READ_4(sc, RL_RXCFG)((sc->rl_btag)->read_4((sc->rl_bhandle), (0x0044))); | |||
| 584 | rxfilt &= ~(RL_RXCFG_RX_ALLPHYS0x00000001 | RL_RXCFG_RX_BROAD0x00000008 | | |||
| 585 | RL_RXCFG_RX_INDIV0x00000002 | RL_RXCFG_RX_MULTI0x00000004); | |||
| 586 | ifp->if_flags &= ~IFF_ALLMULTI0x200; | |||
| 587 | ||||
| 588 | /* | |||
| 589 | * Always accept frames destined to our station address. | |||
| 590 | * Always accept broadcast frames. | |||
| 591 | */ | |||
| 592 | rxfilt |= RL_RXCFG_RX_INDIV0x00000002 | RL_RXCFG_RX_BROAD0x00000008; | |||
| 593 | ||||
| 594 | if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0) { | |||
| 595 | ifp->if_flags |= IFF_ALLMULTI0x200; | |||
| 596 | rxfilt |= RL_RXCFG_RX_MULTI0x00000004; | |||
| 597 | if (ifp->if_flags & IFF_PROMISC0x100) | |||
| 598 | rxfilt |= RL_RXCFG_RX_ALLPHYS0x00000001; | |||
| 599 | hashes[0] = hashes[1] = 0xFFFFFFFF; | |||
| 600 | } else { | |||
| 601 | rxfilt |= RL_RXCFG_RX_MULTI0x00000004; | |||
| 602 | /* Program new filter. */ | |||
| 603 | bzero(hashes, sizeof(hashes))__builtin_bzero((hashes), (sizeof(hashes))); | |||
| 604 | ||||
| 605 | ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first ); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step )).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while ( 0); | |||
| 606 | while (enm != NULL((void *)0)) { | |||
| 607 | h = ether_crc32_be(enm->enm_addrlo, | |||
| 608 | ETHER_ADDR_LEN6) >> 26; | |||
| 609 | ||||
| 610 | if (h < 32) | |||
| 611 | hashes[0] |= (1 << h); | |||
| 612 | else | |||
| 613 | hashes[1] |= (1 << (h - 32)); | |||
| 614 | ||||
| 615 | ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm = (((enm))->enm_list.le_next); } while ( 0); | |||
| 616 | } | |||
| 617 | } | |||
| 618 | ||||
| 619 | /* | |||
| 620 | * For some unfathomable reason, Realtek decided to reverse | |||
| 621 | * the order of the multicast hash registers in the PCI Express | |||
| 622 | * parts. This means we have to write the hash pattern in reverse | |||
| 623 | * order for those devices. | |||
| 624 | */ | |||
| 625 | if (sc->rl_flags & RL_FLAG_PCIE0x00000004) { | |||
| 626 | CSR_WRITE_4(sc, RL_MAR0, swap32(hashes[1]))((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0008), ( (__uint32_t)(__builtin_constant_p(hashes[1]) ? (__uint32_t)(( (__uint32_t)(hashes[1]) & 0xff) << 24 | ((__uint32_t )(hashes[1]) & 0xff00) << 8 | ((__uint32_t)(hashes[ 1]) & 0xff0000) >> 8 | ((__uint32_t)(hashes[1]) & 0xff000000) >> 24) : __swap32md(hashes[1]))))); | |||
| 627 | CSR_WRITE_4(sc, RL_MAR4, swap32(hashes[0]))((sc->rl_btag)->write_4((sc->rl_bhandle), (0x000C), ( (__uint32_t)(__builtin_constant_p(hashes[0]) ? (__uint32_t)(( (__uint32_t)(hashes[0]) & 0xff) << 24 | ((__uint32_t )(hashes[0]) & 0xff00) << 8 | ((__uint32_t)(hashes[ 0]) & 0xff0000) >> 8 | ((__uint32_t)(hashes[0]) & 0xff000000) >> 24) : __swap32md(hashes[0]))))); | |||
| 628 | } else { | |||
| 629 | CSR_WRITE_4(sc, RL_MAR0, hashes[0])((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0008), ( hashes[0]))); | |||
| 630 | CSR_WRITE_4(sc, RL_MAR4, hashes[1])((sc->rl_btag)->write_4((sc->rl_bhandle), (0x000C), ( hashes[1]))); | |||
| 631 | } | |||
| 632 | ||||
| 633 | CSR_WRITE_4(sc, RL_RXCFG, rxfilt)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0044), ( rxfilt))); | |||
| 634 | } | |||
| 635 | ||||
| 636 | void | |||
| 637 | re_reset(struct rl_softc *sc) | |||
| 638 | { | |||
| 639 | int i; | |||
| 640 | ||||
| 641 | CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0037), ( 0x0010))); | |||
| 642 | ||||
| 643 | for (i = 0; i < RL_TIMEOUT1000; i++) { | |||
| 644 | DELAY(10)(*delay_func)(10); | |||
| 645 | if (!(CSR_READ_1(sc, RL_COMMAND)((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0037))) & RL_CMD_RESET0x0010)) | |||
| 646 | break; | |||
| 647 | } | |||
| 648 | if (i == RL_TIMEOUT1000) | |||
| 649 | printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); | |||
| 650 | ||||
| 651 | if (sc->rl_flags & RL_FLAG_MACRESET0x00000200) | |||
| 652 | CSR_WRITE_1(sc, RL_LDPS, 1)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0082), ( 1))); | |||
| 653 | } | |||
| 654 | ||||
| 655 | /* | |||
| 656 | * Attach the interface. Allocate softc structures, do ifmedia | |||
| 657 | * setup and ethernet/BPF attach. | |||
| 658 | */ | |||
| 659 | int | |||
| 660 | re_attach(struct rl_softc *sc, const char *intrstr) | |||
| 661 | { | |||
| 662 | u_char eaddr[ETHER_ADDR_LEN6]; | |||
| 663 | u_int16_t as[ETHER_ADDR_LEN6 / 2]; | |||
| 664 | struct ifnet *ifp; | |||
| 665 | u_int16_t re_did = 0; | |||
| 666 | int error = 0, i; | |||
| 667 | const struct re_revision *rr; | |||
| 668 | const char *re_name = NULL((void *)0); | |||
| 669 | ||||
| 670 | sc->sc_hwrev = CSR_READ_4(sc, RL_TXCFG)((sc->rl_btag)->read_4((sc->rl_bhandle), (0x0040))) & RL_TXCFG_HWREV0x7C800000; | |||
| 671 | ||||
| 672 | switch (sc->sc_hwrev) { | |||
| 673 | case RL_HWREV_8139CPLUS0x74800000: | |||
| 674 | sc->rl_flags |= RL_FLAG_FASTETHER0x00040000 | RL_FLAG_AUTOPAD0x00001000; | |||
| 675 | sc->rl_max_mtu = RL_MTU(1518 - ((6 * 2) + 2) - 4); | |||
| 676 | break; | |||
| 677 | case RL_HWREV_8100E0x30800000: | |||
| 678 | case RL_HWREV_8100E_SPIN20x38800000: | |||
| 679 | case RL_HWREV_8101E0x34000000: | |||
| 680 | sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_FASTETHER0x00040000; | |||
| 681 | sc->rl_max_mtu = RL_MTU(1518 - ((6 * 2) + 2) - 4); | |||
| 682 | break; | |||
| 683 | case RL_HWREV_8103E0x34C00000: | |||
| 684 | sc->rl_flags |= RL_FLAG_MACSLEEP0x00000800; | |||
| 685 | /* FALLTHROUGH */ | |||
| 686 | case RL_HWREV_8102E0x34800000: | |||
| 687 | case RL_HWREV_8102EL0x24800000: | |||
| 688 | case RL_HWREV_8102EL_SPIN10x24C00000: | |||
| 689 | sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PAR0x00000010 | | |||
| 690 | RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 | RL_FLAG_FASTETHER0x00040000 | | |||
| 691 | RL_FLAG_CMDSTOP0x00000400 | RL_FLAG_AUTOPAD0x00001000; | |||
| 692 | sc->rl_max_mtu = RL_MTU(1518 - ((6 * 2) + 2) - 4); | |||
| 693 | break; | |||
| 694 | case RL_HWREV_8401E0x24000000: | |||
| 695 | case RL_HWREV_8105E0x40800000: | |||
| 696 | case RL_HWREV_8105E_SPIN10x40C00000: | |||
| 697 | case RL_HWREV_8106E0x44800000: | |||
| 698 | sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PHYWAKE_PM0x00004000 | | |||
| 699 | RL_FLAG_PAR0x00000010 | RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 | | |||
| 700 | RL_FLAG_FASTETHER0x00040000 | RL_FLAG_CMDSTOP0x00000400 | RL_FLAG_AUTOPAD0x00001000; | |||
| 701 | sc->rl_max_mtu = RL_MTU(1518 - ((6 * 2) + 2) - 4); | |||
| 702 | break; | |||
| 703 | case RL_HWREV_84020x44000000: | |||
| 704 | sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PHYWAKE_PM0x00004000 | | |||
| 705 | RL_FLAG_PAR0x00000010 | RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 | | |||
| 706 | RL_FLAG_FASTETHER0x00040000 | RL_FLAG_CMDSTOP0x00000400 | RL_FLAG_AUTOPAD0x00001000 | | |||
| 707 | RL_FLAG_CMDSTOP_WAIT_TXQ0x00080000; | |||
| 708 | sc->rl_max_mtu = RL_MTU(1518 - ((6 * 2) + 2) - 4); | |||
| 709 | break; | |||
| 710 | case RL_HWREV_8168B_SPIN10x30000000: | |||
| 711 | case RL_HWREV_8168B_SPIN20x38000000: | |||
| 712 | sc->rl_flags |= RL_FLAG_WOLRXENB0x00800000; | |||
| 713 | /* FALLTHROUGH */ | |||
| 714 | case RL_HWREV_8168B_SPIN30x38400000: | |||
| 715 | sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_MACSTAT0x00000040; | |||
| 716 | sc->rl_max_mtu = RL_MTU(1518 - ((6 * 2) + 2) - 4); | |||
| 717 | break; | |||
| 718 | case RL_HWREV_8168C_SPIN20x3c400000: | |||
| 719 | sc->rl_flags |= RL_FLAG_MACSLEEP0x00000800; | |||
| 720 | /* FALLTHROUGH */ | |||
| 721 | case RL_HWREV_8168C0x3c000000: | |||
| 722 | case RL_HWREV_8168CP0x3c800000: | |||
| 723 | sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PAR0x00000010 | | |||
| 724 | RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 | RL_FLAG_CMDSTOP0x00000400 | | |||
| 725 | RL_FLAG_AUTOPAD0x00001000 | RL_FLAG_JUMBOV20x00100000 | RL_FLAG_WOL_MANLINK0x00200000; | |||
| 726 | sc->rl_max_mtu = RL_JUMBO_MTU_6K((6 * 1024) - ((6 * 2) + 2) - 4 - 4); | |||
| 727 | break; | |||
| 728 | case RL_HWREV_8168D0x28000000: | |||
| 729 | sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PHYWAKE_PM0x00004000 | | |||
| 730 | RL_FLAG_PAR0x00000010 | RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 | | |||
| 731 | RL_FLAG_CMDSTOP0x00000400 | RL_FLAG_AUTOPAD0x00001000 | RL_FLAG_JUMBOV20x00100000 | | |||
| 732 | RL_FLAG_WOL_MANLINK0x00200000; | |||
| 733 | sc->rl_max_mtu = RL_JUMBO_MTU_9K((9 * 1024) - ((6 * 2) + 2) - 4 - 4); | |||
| 734 | break; | |||
| 735 | case RL_HWREV_8168DP0x28800000: | |||
| 736 | sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PAR0x00000010 | | |||
| 737 | RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 | RL_FLAG_AUTOPAD0x00001000 | | |||
| 738 | RL_FLAG_JUMBOV20x00100000 | RL_FLAG_WAIT_TXPOLL0x00400000 | RL_FLAG_WOL_MANLINK0x00200000; | |||
| 739 | sc->rl_max_mtu = RL_JUMBO_MTU_9K((9 * 1024) - ((6 * 2) + 2) - 4 - 4); | |||
| 740 | break; | |||
| 741 | case RL_HWREV_8168E0x2C000000: | |||
| 742 | sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PHYWAKE_PM0x00004000 | | |||
| 743 | RL_FLAG_PAR0x00000010 | RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 | | |||
| 744 | RL_FLAG_CMDSTOP0x00000400 | RL_FLAG_AUTOPAD0x00001000 | RL_FLAG_JUMBOV20x00100000 | | |||
| 745 | RL_FLAG_WOL_MANLINK0x00200000; | |||
| 746 | sc->rl_max_mtu = RL_JUMBO_MTU_9K((9 * 1024) - ((6 * 2) + 2) - 4 - 4); | |||
| 747 | break; | |||
| 748 | case RL_HWREV_8168E_VL0x2C800000: | |||
| 749 | sc->rl_flags |= RL_FLAG_EARLYOFF0x00008000 | RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PAR0x00000010 | | |||
| 750 | RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 | RL_FLAG_CMDSTOP0x00000400 | | |||
| 751 | RL_FLAG_AUTOPAD0x00001000 | RL_FLAG_JUMBOV20x00100000 | RL_FLAG_CMDSTOP_WAIT_TXQ0x00080000 | | |||
| 752 | RL_FLAG_WOL_MANLINK0x00200000; | |||
| 753 | sc->rl_max_mtu = RL_JUMBO_MTU_6K((6 * 1024) - ((6 * 2) + 2) - 4 - 4); | |||
| 754 | break; | |||
| 755 | case RL_HWREV_8168F0x48000000: | |||
| 756 | sc->rl_flags |= RL_FLAG_EARLYOFF0x00008000; | |||
| 757 | /* FALLTHROUGH */ | |||
| 758 | case RL_HWREV_84110x48800000: | |||
| 759 | sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PAR0x00000010 | | |||
| 760 | RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 | RL_FLAG_CMDSTOP0x00000400 | | |||
| 761 | RL_FLAG_AUTOPAD0x00001000 | RL_FLAG_JUMBOV20x00100000 | RL_FLAG_CMDSTOP_WAIT_TXQ0x00080000 | | |||
| 762 | RL_FLAG_WOL_MANLINK0x00200000; | |||
| 763 | sc->rl_max_mtu = RL_JUMBO_MTU_9K((9 * 1024) - ((6 * 2) + 2) - 4 - 4); | |||
| 764 | break; | |||
| 765 | case RL_HWREV_8168EP0x50000000: | |||
| 766 | case RL_HWREV_8168FP0x54800000: | |||
| 767 | case RL_HWREV_8168G0x4c000000: | |||
| 768 | case RL_HWREV_8168GU0x50800000: | |||
| 769 | case RL_HWREV_8168H0x54000000: | |||
| 770 | case RL_HWREV_8411B0x5c800000: | |||
| 771 | if (sc->sc_product == PCI_PRODUCT_REALTEK_RT8101E0x8136) { | |||
| 772 | /* RTL8106EUS */ | |||
| 773 | sc->rl_flags |= RL_FLAG_FASTETHER0x00040000; | |||
| 774 | sc->rl_max_mtu = RL_MTU(1518 - ((6 * 2) + 2) - 4); | |||
| 775 | } else { | |||
| 776 | sc->rl_flags |= RL_FLAG_JUMBOV20x00100000 | RL_FLAG_WOL_MANLINK0x00200000; | |||
| 777 | sc->rl_max_mtu = RL_JUMBO_MTU_9K((9 * 1024) - ((6 * 2) + 2) - 4 - 4); | |||
| 778 | } | |||
| 779 | ||||
| 780 | sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PAR0x00000010 | | |||
| 781 | RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 | RL_FLAG_CMDSTOP0x00000400 | | |||
| 782 | RL_FLAG_AUTOPAD0x00001000 | RL_FLAG_CMDSTOP_WAIT_TXQ0x00080000 | | |||
| 783 | RL_FLAG_EARLYOFFV20x00010000 | RL_FLAG_RXDV_GATED0x00020000; | |||
| 784 | break; | |||
| 785 | case RL_HWREV_8169_8110SB0x10000000: | |||
| 786 | case RL_HWREV_8169_8110SBL0x7cc00000: | |||
| 787 | case RL_HWREV_8169_8110SCd0x18000000: | |||
| 788 | case RL_HWREV_8169_8110SCe0x98000000: | |||
| 789 | sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008; | |||
| 790 | /* FALLTHROUGH */ | |||
| 791 | case RL_HWREV_81690x00000000: | |||
| 792 | case RL_HWREV_8169S0x00800000: | |||
| 793 | case RL_HWREV_8110S0x04000000: | |||
| 794 | sc->rl_flags |= RL_FLAG_MACRESET0x00000200; | |||
| 795 | sc->rl_max_mtu = RL_JUMBO_MTU_7K((7 * 1024) - ((6 * 2) + 2) - 4 - 4); | |||
| 796 | break; | |||
| 797 | default: | |||
| 798 | break; | |||
| 799 | } | |||
| 800 | ||||
| 801 | if (sc->sc_hwrev == RL_HWREV_8139CPLUS0x74800000) { | |||
| 802 | sc->rl_cfg0 = RL_8139_CFG00x0051; | |||
| 803 | sc->rl_cfg1 = RL_8139_CFG10x0052; | |||
| 804 | sc->rl_cfg2 = 0; | |||
| 805 | sc->rl_cfg3 = RL_8139_CFG30x0059; | |||
| 806 | sc->rl_cfg4 = RL_8139_CFG40x005A; | |||
| 807 | sc->rl_cfg5 = RL_8139_CFG50x00D8; | |||
| 808 | } else { | |||
| 809 | sc->rl_cfg0 = RL_CFG00x0051; | |||
| 810 | sc->rl_cfg1 = RL_CFG10x0052; | |||
| 811 | sc->rl_cfg2 = RL_CFG20x0053; | |||
| 812 | sc->rl_cfg3 = RL_CFG30x0054; | |||
| 813 | sc->rl_cfg4 = RL_CFG40x0055; | |||
| 814 | sc->rl_cfg5 = RL_CFG50x0056; | |||
| 815 | } | |||
| 816 | ||||
| 817 | /* Reset the adapter. */ | |||
| 818 | re_reset(sc); | |||
| 819 | ||||
| 820 | sc->rl_tx_time = 5; /* 125us */ | |||
| 821 | sc->rl_rx_time = 2; /* 50us */ | |||
| 822 | if (sc->rl_flags & RL_FLAG_PCIE0x00000004) | |||
| 823 | sc->rl_sim_time = 75; /* 75us */ | |||
| 824 | else | |||
| 825 | sc->rl_sim_time = 125; /* 125us */ | |||
| 826 | sc->rl_imtype = RL_IMTYPE_SIM1; /* simulated interrupt moderation */ | |||
| 827 | ||||
| 828 | if (sc->sc_hwrev == RL_HWREV_8139CPLUS0x74800000) | |||
| 829 | sc->rl_bus_speed = 33; /* XXX */ | |||
| 830 | else if (sc->rl_flags & RL_FLAG_PCIE0x00000004) | |||
| 831 | sc->rl_bus_speed = 125; | |||
| 832 | else { | |||
| 833 | u_int8_t cfg2; | |||
| 834 | ||||
| 835 | cfg2 = CSR_READ_1(sc, sc->rl_cfg2)((sc->rl_btag)->read_1((sc->rl_bhandle), (sc->rl_cfg2 ))); | |||
| 836 | switch (cfg2 & RL_CFG2_PCI_MASK0x07) { | |||
| 837 | case RL_CFG2_PCI_33MHZ0x00: | |||
| 838 | sc->rl_bus_speed = 33; | |||
| 839 | break; | |||
| 840 | case RL_CFG2_PCI_66MHZ0x01: | |||
| 841 | sc->rl_bus_speed = 66; | |||
| 842 | break; | |||
| 843 | default: | |||
| 844 | printf("%s: unknown bus speed, assume 33MHz\n", | |||
| 845 | sc->sc_dev.dv_xname); | |||
| 846 | sc->rl_bus_speed = 33; | |||
| 847 | break; | |||
| 848 | } | |||
| 849 | ||||
| 850 | if (cfg2 & RL_CFG2_PCI_64BIT0x08) | |||
| 851 | sc->rl_flags |= RL_FLAG_PCI640x00000002; | |||
| 852 | } | |||
| 853 | ||||
| 854 | re_config_imtype(sc, sc->rl_imtype); | |||
| 855 | ||||
| 856 | if (sc->rl_flags & RL_FLAG_PAR0x00000010) { | |||
| 857 | /* | |||
| 858 | * XXX Should have a better way to extract station | |||
| 859 | * address from EEPROM. | |||
| 860 | */ | |||
| 861 | for (i = 0; i < ETHER_ADDR_LEN6; i++) | |||
| 862 | eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i)((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0000 + i ))); | |||
| 863 | } else { | |||
| 864 | sc->rl_eewidth = RL_9356_ADDR_LEN8; | |||
| 865 | re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); | |||
| 866 | if (re_did != 0x8129) | |||
| 867 | sc->rl_eewidth = RL_9346_ADDR_LEN6; | |||
| 868 | ||||
| 869 | /* | |||
| 870 | * Get station address from the EEPROM. | |||
| 871 | */ | |||
| 872 | re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR0x07, 3); | |||
| 873 | for (i = 0; i < ETHER_ADDR_LEN6 / 2; i++) | |||
| 874 | as[i] = letoh16(as[i])((__uint16_t)(as[i])); | |||
| 875 | bcopy(as, eaddr, ETHER_ADDR_LEN6); | |||
| 876 | } | |||
| 877 | ||||
| 878 | /* | |||
| 879 | * Set RX length mask, TX poll request register | |||
| 880 | * and descriptor count. | |||
| 881 | */ | |||
| 882 | if (sc->sc_hwrev == RL_HWREV_8139CPLUS0x74800000) { | |||
| 883 | sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN0x00001FFF; | |||
| 884 | sc->rl_txstart = RL_TXSTART0x00D9; | |||
| 885 | sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT64; | |||
| 886 | sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT64; | |||
| 887 | sc->rl_ldata.rl_tx_ndescs = RL_8139_NTXSEGS8; | |||
| 888 | } else { | |||
| 889 | sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN0x00003FFF; | |||
| 890 | sc->rl_txstart = RL_GTXSTART0x0038; | |||
| 891 | sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT1024; | |||
| 892 | sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT1024; | |||
| 893 | sc->rl_ldata.rl_tx_ndescs = RL_8169_NTXSEGS32; | |||
| 894 | } | |||
| 895 | ||||
| 896 | bcopy(eaddr, (char *)&sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN6); | |||
| 897 | ||||
| 898 | for (rr = re_revisions; rr->re_name != NULL((void *)0); rr++) { | |||
| 899 | if (rr->re_chipid == sc->sc_hwrev) | |||
| 900 | re_name = rr->re_name; | |||
| 901 | } | |||
| 902 | ||||
| 903 | if (re_name == NULL((void *)0)) | |||
| 904 | printf(": unknown ASIC (0x%04x)", sc->sc_hwrev >> 16); | |||
| 905 | else | |||
| 906 | printf(": %s (0x%04x)", re_name, sc->sc_hwrev >> 16); | |||
| 907 | ||||
| 908 | printf(", %s, address %s\n", intrstr, | |||
| 909 | ether_sprintf(sc->sc_arpcom.ac_enaddr)); | |||
| 910 | ||||
| 911 | /* Allocate DMA'able memory for the TX ring */ | |||
| 912 | if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_TX_LIST_SZ(sc),(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (((sc )->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), (256 ), (0), (&sc->rl_ldata.rl_tx_listseg), (1), (&sc-> rl_ldata.rl_tx_listnseg), (0x0001 | 0x1000)) | |||
| 913 | RL_RING_ALIGN, 0, &sc->rl_ldata.rl_tx_listseg, 1,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (((sc )->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), (256 ), (0), (&sc->rl_ldata.rl_tx_listseg), (1), (&sc-> rl_ldata.rl_tx_listnseg), (0x0001 | 0x1000)) | |||
| 914 | &sc->rl_ldata.rl_tx_listnseg, BUS_DMA_NOWAIT |(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (((sc )->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), (256 ), (0), (&sc->rl_ldata.rl_tx_listseg), (1), (&sc-> rl_ldata.rl_tx_listnseg), (0x0001 | 0x1000)) | |||
| 915 | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (((sc )->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), (256 ), (0), (&sc->rl_ldata.rl_tx_listseg), (1), (&sc-> rl_ldata.rl_tx_listnseg), (0x0001 | 0x1000))) != 0) { | |||
| 916 | printf("%s: can't allocate tx listseg, error = %d\n", | |||
| 917 | sc->sc_dev.dv_xname, error); | |||
| 918 | goto fail_0; | |||
| 919 | } | |||
| 920 | ||||
| 921 | /* Load the map for the TX ring. */ | |||
| 922 | if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_tx_listseg,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc ->rl_ldata.rl_tx_listseg), (sc->rl_ldata.rl_tx_listnseg ), (((sc)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc ))), ((caddr_t *)&sc->rl_ldata.rl_tx_list), (0x0004 | 0x0001 )) | |||
| 923 | sc->rl_ldata.rl_tx_listnseg, RL_TX_LIST_SZ(sc),(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc ->rl_ldata.rl_tx_listseg), (sc->rl_ldata.rl_tx_listnseg ), (((sc)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc ))), ((caddr_t *)&sc->rl_ldata.rl_tx_list), (0x0004 | 0x0001 )) | |||
| 924 | (caddr_t *)&sc->rl_ldata.rl_tx_list,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc ->rl_ldata.rl_tx_listseg), (sc->rl_ldata.rl_tx_listnseg ), (((sc)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc ))), ((caddr_t *)&sc->rl_ldata.rl_tx_list), (0x0004 | 0x0001 )) | |||
| 925 | BUS_DMA_COHERENT | BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc ->rl_ldata.rl_tx_listseg), (sc->rl_ldata.rl_tx_listnseg ), (((sc)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc ))), ((caddr_t *)&sc->rl_ldata.rl_tx_list), (0x0004 | 0x0001 ))) != 0) { | |||
| 926 | printf("%s: can't map tx list, error = %d\n", | |||
| 927 | sc->sc_dev.dv_xname, error); | |||
| 928 | goto fail_1; | |||
| 929 | } | |||
| 930 | ||||
| 931 | if ((error = bus_dmamap_create(sc->sc_dmat, RL_TX_LIST_SZ(sc), 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (((sc )->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), (1) , (((sc)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc) )), (0), (0), (&sc->rl_ldata.rl_tx_list_map)) | |||
| 932 | RL_TX_LIST_SZ(sc), 0, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (((sc )->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), (1) , (((sc)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc) )), (0), (0), (&sc->rl_ldata.rl_tx_list_map)) | |||
| 933 | &sc->rl_ldata.rl_tx_list_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (((sc )->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), (1) , (((sc)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc) )), (0), (0), (&sc->rl_ldata.rl_tx_list_map))) != 0) { | |||
| 934 | printf("%s: can't create tx list map, error = %d\n", | |||
| 935 | sc->sc_dev.dv_xname, error); | |||
| 936 | goto fail_2; | |||
| 937 | } | |||
| 938 | ||||
| 939 | if ((error = bus_dmamap_load(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc-> rl_ldata.rl_tx_list_map), (sc->rl_ldata.rl_tx_list), (((sc )->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), ((( void *)0)), (0x0001)) | |||
| 940 | sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc-> rl_ldata.rl_tx_list_map), (sc->rl_ldata.rl_tx_list), (((sc )->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), ((( void *)0)), (0x0001)) | |||
| 941 | RL_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc-> rl_ldata.rl_tx_list_map), (sc->rl_ldata.rl_tx_list), (((sc )->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), ((( void *)0)), (0x0001))) != 0) { | |||
| 942 | printf("%s: can't load tx list, error = %d\n", | |||
| 943 | sc->sc_dev.dv_xname, error); | |||
| 944 | goto fail_3; | |||
| 945 | } | |||
| 946 | ||||
| 947 | /* Create DMA maps for TX buffers */ | |||
| 948 | for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { | |||
| 949 | error = bus_dmamap_create(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((9 * 1024)), (sc->rl_ldata.rl_tx_ndescs), ((9 * 1024)), (0), ( 0), (&sc->rl_ldata.rl_txq[i].txq_dmamap)) | |||
| 950 | RL_JUMBO_FRAMELEN, sc->rl_ldata.rl_tx_ndescs,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((9 * 1024)), (sc->rl_ldata.rl_tx_ndescs), ((9 * 1024)), (0), ( 0), (&sc->rl_ldata.rl_txq[i].txq_dmamap)) | |||
| 951 | RL_JUMBO_FRAMELEN, 0, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((9 * 1024)), (sc->rl_ldata.rl_tx_ndescs), ((9 * 1024)), (0), ( 0), (&sc->rl_ldata.rl_txq[i].txq_dmamap)) | |||
| 952 | &sc->rl_ldata.rl_txq[i].txq_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((9 * 1024)), (sc->rl_ldata.rl_tx_ndescs), ((9 * 1024)), (0), ( 0), (&sc->rl_ldata.rl_txq[i].txq_dmamap)); | |||
| 953 | if (error) { | |||
| 954 | printf("%s: can't create DMA map for TX\n", | |||
| 955 | sc->sc_dev.dv_xname); | |||
| 956 | goto fail_4; | |||
| 957 | } | |||
| 958 | } | |||
| 959 | ||||
| 960 | /* Allocate DMA'able memory for the RX ring */ | |||
| 961 | if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_RX_DMAMEM_SZ(sc),(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((((sc )->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + ((( 6 * 2) + 2) + 28))), (256), (0), (&sc->rl_ldata.rl_rx_listseg ), (1), (&sc->rl_ldata.rl_rx_listnseg), (0x0001 | 0x1000 )) | |||
| 962 | RL_RING_ALIGN, 0, &sc->rl_ldata.rl_rx_listseg, 1,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((((sc )->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + ((( 6 * 2) + 2) + 28))), (256), (0), (&sc->rl_ldata.rl_rx_listseg ), (1), (&sc->rl_ldata.rl_rx_listnseg), (0x0001 | 0x1000 )) | |||
| 963 | &sc->rl_ldata.rl_rx_listnseg, BUS_DMA_NOWAIT |(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((((sc )->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + ((( 6 * 2) + 2) + 28))), (256), (0), (&sc->rl_ldata.rl_rx_listseg ), (1), (&sc->rl_ldata.rl_rx_listnseg), (0x0001 | 0x1000 )) | |||
| 964 | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((((sc )->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + ((( 6 * 2) + 2) + 28))), (256), (0), (&sc->rl_ldata.rl_rx_listseg ), (1), (&sc->rl_ldata.rl_rx_listnseg), (0x0001 | 0x1000 ))) != 0) { | |||
| 965 | printf("%s: can't allocate rx listnseg, error = %d\n", | |||
| 966 | sc->sc_dev.dv_xname, error); | |||
| 967 | goto fail_4; | |||
| 968 | } | |||
| 969 | ||||
| 970 | /* Load the map for the RX ring. */ | |||
| 971 | if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_rx_listseg,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc ->rl_ldata.rl_rx_listseg), (sc->rl_ldata.rl_rx_listnseg ), ((((sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc )) + (((6 * 2) + 2) + 28))), ((caddr_t *)&sc->rl_ldata .rl_rx_list), (0x0004 | 0x0001)) | |||
| 972 | sc->rl_ldata.rl_rx_listnseg, RL_RX_DMAMEM_SZ(sc),(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc ->rl_ldata.rl_rx_listseg), (sc->rl_ldata.rl_rx_listnseg ), ((((sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc )) + (((6 * 2) + 2) + 28))), ((caddr_t *)&sc->rl_ldata .rl_rx_list), (0x0004 | 0x0001)) | |||
| 973 | (caddr_t *)&sc->rl_ldata.rl_rx_list,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc ->rl_ldata.rl_rx_listseg), (sc->rl_ldata.rl_rx_listnseg ), ((((sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc )) + (((6 * 2) + 2) + 28))), ((caddr_t *)&sc->rl_ldata .rl_rx_list), (0x0004 | 0x0001)) | |||
| 974 | BUS_DMA_COHERENT | BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc ->rl_ldata.rl_rx_listseg), (sc->rl_ldata.rl_rx_listnseg ), ((((sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc )) + (((6 * 2) + 2) + 28))), ((caddr_t *)&sc->rl_ldata .rl_rx_list), (0x0004 | 0x0001))) != 0) { | |||
| 975 | printf("%s: can't map rx list, error = %d\n", | |||
| 976 | sc->sc_dev.dv_xname, error); | |||
| 977 | goto fail_5; | |||
| 978 | ||||
| 979 | } | |||
| 980 | ||||
| 981 | if ((error = bus_dmamap_create(sc->sc_dmat, RL_RX_DMAMEM_SZ(sc), 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (((( sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + ( ((6 * 2) + 2) + 28))), (1), ((((sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + (((6 * 2) + 2) + 28))), (0), (0) , (&sc->rl_ldata.rl_rx_list_map)) | |||
| 982 | RL_RX_DMAMEM_SZ(sc), 0, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (((( sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + ( ((6 * 2) + 2) + 28))), (1), ((((sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + (((6 * 2) + 2) + 28))), (0), (0) , (&sc->rl_ldata.rl_rx_list_map)) | |||
| 983 | &sc->rl_ldata.rl_rx_list_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (((( sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + ( ((6 * 2) + 2) + 28))), (1), ((((sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + (((6 * 2) + 2) + 28))), (0), (0) , (&sc->rl_ldata.rl_rx_list_map))) != 0) { | |||
| 984 | printf("%s: can't create rx list map, error = %d\n", | |||
| 985 | sc->sc_dev.dv_xname, error); | |||
| 986 | goto fail_6; | |||
| 987 | } | |||
| 988 | ||||
| 989 | if ((error = bus_dmamap_load(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc-> rl_ldata.rl_rx_list_map), (sc->rl_ldata.rl_rx_list), ((((sc )->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + ((( 6 * 2) + 2) + 28))), (((void *)0)), (0x0001)) | |||
| 990 | sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc-> rl_ldata.rl_rx_list_map), (sc->rl_ldata.rl_rx_list), ((((sc )->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + ((( 6 * 2) + 2) + 28))), (((void *)0)), (0x0001)) | |||
| 991 | RL_RX_DMAMEM_SZ(sc), NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc-> rl_ldata.rl_rx_list_map), (sc->rl_ldata.rl_rx_list), ((((sc )->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + ((( 6 * 2) + 2) + 28))), (((void *)0)), (0x0001))) != 0) { | |||
| 992 | printf("%s: can't load rx list, error = %d\n", | |||
| 993 | sc->sc_dev.dv_xname, error); | |||
| 994 | goto fail_7; | |||
| 995 | } | |||
| 996 | ||||
| 997 | /* Create DMA maps for RX buffers */ | |||
| 998 | for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { | |||
| 999 | error = bus_dmamap_create(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((sc ->rl_max_mtu + ((6 * 2) + 2) + 4 + 4)), (1), ((sc->rl_max_mtu + ((6 * 2) + 2) + 4 + 4)), (0), (0), (&sc->rl_ldata.rl_rxsoft [i].rxs_dmamap)) | |||
| 1000 | RL_FRAMELEN(sc->rl_max_mtu), 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((sc ->rl_max_mtu + ((6 * 2) + 2) + 4 + 4)), (1), ((sc->rl_max_mtu + ((6 * 2) + 2) + 4 + 4)), (0), (0), (&sc->rl_ldata.rl_rxsoft [i].rxs_dmamap)) | |||
| 1001 | RL_FRAMELEN(sc->rl_max_mtu), 0, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((sc ->rl_max_mtu + ((6 * 2) + 2) + 4 + 4)), (1), ((sc->rl_max_mtu + ((6 * 2) + 2) + 4 + 4)), (0), (0), (&sc->rl_ldata.rl_rxsoft [i].rxs_dmamap)) | |||
| 1002 | &sc->rl_ldata.rl_rxsoft[i].rxs_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((sc ->rl_max_mtu + ((6 * 2) + 2) + 4 + 4)), (1), ((sc->rl_max_mtu + ((6 * 2) + 2) + 4 + 4)), (0), (0), (&sc->rl_ldata.rl_rxsoft [i].rxs_dmamap)); | |||
| 1003 | if (error) { | |||
| 1004 | printf("%s: can't create DMA map for RX\n", | |||
| 1005 | sc->sc_dev.dv_xname); | |||
| 1006 | goto fail_8; | |||
| 1007 | } | |||
| 1008 | } | |||
| 1009 | ||||
| 1010 | ifp = &sc->sc_arpcom.ac_if; | |||
| 1011 | ifp->if_softc = sc; | |||
| 1012 | strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ16); | |||
| 1013 | ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000; | |||
| 1014 | ifp->if_xflags = IFXF_MPSAFE0x1; | |||
| 1015 | ifp->if_ioctl = re_ioctl; | |||
| 1016 | ifp->if_qstart = re_start; | |||
| 1017 | ifp->if_watchdog = re_watchdog; | |||
| 1018 | ifp->if_hardmtu = sc->rl_max_mtu; | |||
| 1019 | ifq_init_maxlen(&ifp->if_snd, sc->rl_ldata.rl_tx_desc_cnt); | |||
| 1020 | ||||
| 1021 | ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010 | IFCAP_CSUM_TCPv40x00000002 | | |||
| 1022 | IFCAP_CSUM_UDPv40x00000004; | |||
| 1023 | ||||
| 1024 | /* | |||
| 1025 | * RTL8168/8111C generates wrong IP checksummed frame if the | |||
| 1026 | * packet has IP options so disable TX IP checksum offloading. | |||
| 1027 | */ | |||
| 1028 | switch (sc->sc_hwrev) { | |||
| 1029 | case RL_HWREV_8168C0x3c000000: | |||
| 1030 | case RL_HWREV_8168C_SPIN20x3c400000: | |||
| 1031 | case RL_HWREV_8168CP0x3c800000: | |||
| 1032 | break; | |||
| 1033 | default: | |||
| 1034 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_CSUM_IPv40x00000001; | |||
| 1035 | } | |||
| 1036 | ||||
| 1037 | #if NVLAN1 > 0 | |||
| 1038 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020; | |||
| 1039 | #endif | |||
| 1040 | ||||
| 1041 | #ifndef SMALL_KERNEL | |||
| 1042 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_WOL0x00008000; | |||
| 1043 | ifp->if_wol = re_wol; | |||
| 1044 | re_wol(ifp, 0); | |||
| 1045 | #endif | |||
| 1046 | timeout_set(&sc->timer_handle, re_tick, sc); | |||
| 1047 | task_set(&sc->rl_start, re_txstart, sc); | |||
| 1048 | ||||
| 1049 | /* Take PHY out of power down mode. */ | |||
| 1050 | if (sc->rl_flags & RL_FLAG_PHYWAKE_PM0x00004000) { | |||
| 1051 | CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x006F), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x006F))) | 0x80))); | |||
| 1052 | if (sc->sc_hwrev == RL_HWREV_8401E0x24000000) | |||
| 1053 | CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08)((sc->rl_btag)->write_1((sc->rl_bhandle), (0xD1), (( (sc->rl_btag)->read_1((sc->rl_bhandle), (0xD1))) & ~0x08))); | |||
| 1054 | } | |||
| 1055 | if (sc->rl_flags & RL_FLAG_PHYWAKE0x00000008) { | |||
| 1056 | re_gmii_writereg((struct device *)sc, 1, 0x1f, 0); | |||
| 1057 | re_gmii_writereg((struct device *)sc, 1, 0x0e, 0); | |||
| 1058 | } | |||
| 1059 | ||||
| 1060 | /* Do MII setup */ | |||
| 1061 | sc->sc_mii.mii_ifp = ifp; | |||
| 1062 | sc->sc_mii.mii_readreg = re_miibus_readreg; | |||
| 1063 | sc->sc_mii.mii_writereg = re_miibus_writereg; | |||
| 1064 | sc->sc_mii.mii_statchg = re_miibus_statchg; | |||
| 1065 | ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK0xff00000000000000ULL, re_ifmedia_upd, | |||
| 1066 | re_ifmedia_sts); | |||
| 1067 | mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY-1, | |||
| 1068 | MII_OFFSET_ANY-1, MIIF_DOPAUSE0x0100); | |||
| 1069 | if (LIST_FIRST(&sc->sc_mii.mii_phys)((&sc->sc_mii.mii_phys)->lh_first) == NULL((void *)0)) { | |||
| 1070 | printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); | |||
| 1071 | ifmedia_add(&sc->sc_mii.mii_media, | |||
| 1072 | IFM_ETHER0x0000000000000100ULL|IFM_NONE2ULL, 0, NULL((void *)0)); | |||
| 1073 | ifmedia_set(&sc->sc_mii.mii_media, | |||
| 1074 | IFM_ETHER0x0000000000000100ULL|IFM_NONE2ULL); | |||
| 1075 | } else | |||
| 1076 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL); | |||
| 1077 | ||||
| 1078 | /* | |||
| 1079 | * Call MI attach routine. | |||
| 1080 | */ | |||
| 1081 | if_attach(ifp); | |||
| 1082 | ether_ifattach(ifp); | |||
| 1083 | ||||
| 1084 | #if NKSTAT1 > 0 | |||
| 1085 | re_kstat_attach(sc); | |||
| 1086 | #endif | |||
| 1087 | ||||
| 1088 | return (0); | |||
| 1089 | ||||
| 1090 | fail_8: | |||
| 1091 | /* Destroy DMA maps for RX buffers. */ | |||
| 1092 | for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { | |||
| 1093 | if (sc->rl_ldata.rl_rxsoft[i].rxs_dmamap != NULL((void *)0)) | |||
| 1094 | bus_dmamap_destroy(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->rl_ldata.rl_rxsoft[i].rxs_dmamap)) | |||
| 1095 | sc->rl_ldata.rl_rxsoft[i].rxs_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->rl_ldata.rl_rxsoft[i].rxs_dmamap)); | |||
| 1096 | } | |||
| 1097 | ||||
| 1098 | /* Free DMA'able memory for the RX ring. */ | |||
| 1099 | bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc-> rl_ldata.rl_rx_list_map)); | |||
| 1100 | fail_7: | |||
| 1101 | bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->rl_ldata.rl_rx_list_map)); | |||
| 1102 | fail_6: | |||
| 1103 | bus_dmamem_unmap(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t )sc->rl_ldata.rl_rx_list), ((((sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + (((6 * 2) + 2) + 28)))) | |||
| 1104 | (caddr_t)sc->rl_ldata.rl_rx_list, RL_RX_DMAMEM_SZ(sc))(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t )sc->rl_ldata.rl_rx_list), ((((sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + (((6 * 2) + 2) + 28)))); | |||
| 1105 | fail_5: | |||
| 1106 | bus_dmamem_free(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& sc->rl_ldata.rl_rx_listseg), (sc->rl_ldata.rl_rx_listnseg )) | |||
| 1107 | &sc->rl_ldata.rl_rx_listseg, sc->rl_ldata.rl_rx_listnseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& sc->rl_ldata.rl_rx_listseg), (sc->rl_ldata.rl_rx_listnseg )); | |||
| 1108 | ||||
| 1109 | fail_4: | |||
| 1110 | /* Destroy DMA maps for TX buffers. */ | |||
| 1111 | for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { | |||
| 1112 | if (sc->rl_ldata.rl_txq[i].txq_dmamap != NULL((void *)0)) | |||
| 1113 | bus_dmamap_destroy(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->rl_ldata.rl_txq[i].txq_dmamap)) | |||
| 1114 | sc->rl_ldata.rl_txq[i].txq_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->rl_ldata.rl_txq[i].txq_dmamap)); | |||
| 1115 | } | |||
| 1116 | ||||
| 1117 | /* Free DMA'able memory for the TX ring. */ | |||
| 1118 | bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc-> rl_ldata.rl_tx_list_map)); | |||
| 1119 | fail_3: | |||
| 1120 | bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->rl_ldata.rl_tx_list_map)); | |||
| 1121 | fail_2: | |||
| 1122 | bus_dmamem_unmap(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t )sc->rl_ldata.rl_tx_list), (((sc)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)))) | |||
| 1123 | (caddr_t)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ(sc))(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t )sc->rl_ldata.rl_tx_list), (((sc)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)))); | |||
| 1124 | fail_1: | |||
| 1125 | bus_dmamem_free(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& sc->rl_ldata.rl_tx_listseg), (sc->rl_ldata.rl_tx_listnseg )) | |||
| 1126 | &sc->rl_ldata.rl_tx_listseg, sc->rl_ldata.rl_tx_listnseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& sc->rl_ldata.rl_tx_listseg), (sc->rl_ldata.rl_tx_listnseg )); | |||
| 1127 | fail_0: | |||
| 1128 | return (1); | |||
| 1129 | } | |||
| 1130 | ||||
| 1131 | ||||
| 1132 | int | |||
| 1133 | re_newbuf(struct rl_softc *sc) | |||
| 1134 | { | |||
| 1135 | struct mbuf *m; | |||
| 1136 | bus_dmamap_t map; | |||
| 1137 | struct rl_desc *d; | |||
| 1138 | struct rl_rxsoft *rxs; | |||
| 1139 | u_int32_t cmdstat; | |||
| 1140 | int error, idx; | |||
| 1141 | ||||
| 1142 | m = MCLGETL(NULL, M_DONTWAIT, RL_FRAMELEN(sc->rl_max_mtu))m_clget((((void *)0)), (0x0002), ((sc->rl_max_mtu + ((6 * 2 ) + 2) + 4 + 4))); | |||
| 1143 | if (!m) | |||
| 1144 | return (ENOBUFS55); | |||
| 1145 | ||||
| 1146 | /* | |||
| 1147 | * Initialize mbuf length fields and fixup | |||
| 1148 | * alignment so that the frame payload is | |||
| 1149 | * longword aligned on strict alignment archs. | |||
| 1150 | */ | |||
| 1151 | m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = RL_FRAMELEN(sc->rl_max_mtu)(sc->rl_max_mtu + ((6 * 2) + 2) + 4 + 4); | |||
| 1152 | m->m_datam_hdr.mh_data += RE_ETHER_ALIGN0; | |||
| 1153 | ||||
| 1154 | idx = sc->rl_ldata.rl_rx_prodidx; | |||
| 1155 | rxs = &sc->rl_ldata.rl_rxsoft[idx]; | |||
| 1156 | map = rxs->rxs_dmamap; | |||
| 1157 | error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( map), (m), (0x0200|0x0001)) | |||
| 1158 | BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( map), (m), (0x0200|0x0001)); | |||
| 1159 | if (error) { | |||
| 1160 | m_freem(m); | |||
| 1161 | return (ENOBUFS55); | |||
| 1162 | } | |||
| 1163 | ||||
| 1164 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x01)) | |||
| 1165 | BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x01)); | |||
| 1166 | ||||
| 1167 | d = &sc->rl_ldata.rl_rx_list[idx]; | |||
| 1168 | RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->rl_ldata.rl_rx_list_map), (sizeof(struct rl_desc) * ( idx)), (sizeof(struct rl_desc)), ((0x02|0x08))); | |||
| 1169 | cmdstat = letoh32(d->rl_cmdstat)((__uint32_t)(d->rl_cmdstat)); | |||
| 1170 | RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->rl_ldata.rl_rx_list_map), (sizeof(struct rl_desc) * ( idx)), (sizeof(struct rl_desc)), ((0x01))); | |||
| 1171 | if (cmdstat & RL_RDESC_STAT_OWN0x80000000) { | |||
| 1172 | printf("%s: tried to map busy RX descriptor\n", | |||
| 1173 | sc->sc_dev.dv_xname); | |||
| 1174 | m_freem(m); | |||
| 1175 | return (ENOBUFS55); | |||
| 1176 | } | |||
| 1177 | ||||
| 1178 | rxs->rxs_mbuf = m; | |||
| 1179 | ||||
| 1180 | d->rl_vlanctl = 0; | |||
| 1181 | cmdstat = map->dm_segs[0].ds_len; | |||
| 1182 | if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) | |||
| 1183 | cmdstat |= RL_RDESC_CMD_EOR0x40000000; | |||
| 1184 | re_set_bufaddr(d, map->dm_segs[0].ds_addr); | |||
| 1185 | d->rl_cmdstat = htole32(cmdstat)((__uint32_t)(cmdstat)); | |||
| 1186 | RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->rl_ldata.rl_rx_list_map), (sizeof(struct rl_desc) * ( idx)), (sizeof(struct rl_desc)), ((0x01|0x04))); | |||
| 1187 | cmdstat |= RL_RDESC_CMD_OWN0x80000000; | |||
| 1188 | d->rl_cmdstat = htole32(cmdstat)((__uint32_t)(cmdstat)); | |||
| 1189 | RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->rl_ldata.rl_rx_list_map), (sizeof(struct rl_desc) * ( idx)), (sizeof(struct rl_desc)), ((0x01|0x04))); | |||
| 1190 | ||||
| 1191 | sc->rl_ldata.rl_rx_prodidx = RL_NEXT_RX_DESC(sc, idx)(((idx) + 1) % (sc)->rl_ldata.rl_rx_desc_cnt); | |||
| 1192 | ||||
| 1193 | return (0); | |||
| 1194 | } | |||
| 1195 | ||||
| 1196 | ||||
| 1197 | int | |||
| 1198 | re_tx_list_init(struct rl_softc *sc) | |||
| 1199 | { | |||
| 1200 | int i; | |||
| 1201 | ||||
| 1202 | memset(sc->rl_ldata.rl_tx_list, 0, RL_TX_LIST_SZ(sc))__builtin_memset((sc->rl_ldata.rl_tx_list), (0), (((sc)-> rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)))); | |||
| 1203 | for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { | |||
| 1204 | sc->rl_ldata.rl_txq[i].txq_mbuf = NULL((void *)0); | |||
| 1205 | } | |||
| 1206 | ||||
| 1207 | bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> rl_ldata.rl_tx_list_map), (0), (sc->rl_ldata.rl_tx_list_map ->dm_mapsize), (0x01|0x04)) | |||
| 1208 | sc->rl_ldata.rl_tx_list_map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> rl_ldata.rl_tx_list_map), (0), (sc->rl_ldata.rl_tx_list_map ->dm_mapsize), (0x01|0x04)) | |||
| 1209 | sc->rl_ldata.rl_tx_list_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> rl_ldata.rl_tx_list_map), (0), (sc->rl_ldata.rl_tx_list_map ->dm_mapsize), (0x01|0x04)) | |||
| 1210 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> rl_ldata.rl_tx_list_map), (0), (sc->rl_ldata.rl_tx_list_map ->dm_mapsize), (0x01|0x04)); | |||
| 1211 | sc->rl_ldata.rl_txq_prodidx = 0; | |||
| 1212 | sc->rl_ldata.rl_txq_considx = 0; | |||
| 1213 | sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt; | |||
| 1214 | sc->rl_ldata.rl_tx_nextfree = 0; | |||
| 1215 | ||||
| 1216 | return (0); | |||
| 1217 | } | |||
| 1218 | ||||
| 1219 | int | |||
| 1220 | re_rx_list_init(struct rl_softc *sc) | |||
| 1221 | { | |||
| 1222 | bzero(sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ(sc))__builtin_bzero((sc->rl_ldata.rl_rx_list), (((sc)->rl_ldata .rl_rx_desc_cnt * sizeof(struct rl_desc)))); | |||
| 1223 | ||||
| 1224 | sc->rl_ldata.rl_rx_prodidx = 0; | |||
| 1225 | sc->rl_ldata.rl_rx_considx = 0; | |||
| 1226 | sc->rl_head = sc->rl_tail = NULL((void *)0); | |||
| 1227 | ||||
| 1228 | if_rxr_init(&sc->rl_ldata.rl_rx_ring, 2, | |||
| 1229 | sc->rl_ldata.rl_rx_desc_cnt - 1); | |||
| 1230 | re_rx_list_fill(sc); | |||
| 1231 | ||||
| 1232 | return (0); | |||
| 1233 | } | |||
| 1234 | ||||
| 1235 | void | |||
| 1236 | re_rx_list_fill(struct rl_softc *sc) | |||
| 1237 | { | |||
| 1238 | u_int slots; | |||
| 1239 | ||||
| 1240 | for (slots = if_rxr_get(&sc->rl_ldata.rl_rx_ring, | |||
| 1241 | sc->rl_ldata.rl_rx_desc_cnt); | |||
| 1242 | slots > 0; slots--) { | |||
| 1243 | if (re_newbuf(sc) == ENOBUFS55) | |||
| 1244 | break; | |||
| 1245 | } | |||
| 1246 | if_rxr_put(&sc->rl_ldata.rl_rx_ring, slots)do { (&sc->rl_ldata.rl_rx_ring)->rxr_alive -= (slots ); } while (0); | |||
| 1247 | } | |||
| 1248 | ||||
| 1249 | /* | |||
| 1250 | * RX handler for C+ and 8169. For the gigE chips, we support | |||
| 1251 | * the reception of jumbo frames that have been fragmented | |||
| 1252 | * across multiple 2K mbuf cluster buffers. | |||
| 1253 | */ | |||
| 1254 | int | |||
| 1255 | re_rxeof(struct rl_softc *sc) | |||
| 1256 | { | |||
| 1257 | struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 }; | |||
| 1258 | struct mbuf *m; | |||
| 1259 | struct ifnet *ifp; | |||
| 1260 | int i, total_len, rx = 0; | |||
| 1261 | struct rl_desc *cur_rx; | |||
| 1262 | struct rl_rxsoft *rxs; | |||
| 1263 | u_int32_t rxstat, rxvlan; | |||
| 1264 | ||||
| 1265 | ifp = &sc->sc_arpcom.ac_if; | |||
| 1266 | ||||
| 1267 | for (i = sc->rl_ldata.rl_rx_considx; | |||
| 1268 | if_rxr_inuse(&sc->rl_ldata.rl_rx_ring)((&sc->rl_ldata.rl_rx_ring)->rxr_alive) > 0; | |||
| 1269 | i = RL_NEXT_RX_DESC(sc, i)(((i) + 1) % (sc)->rl_ldata.rl_rx_desc_cnt)) { | |||
| 1270 | cur_rx = &sc->rl_ldata.rl_rx_list[i]; | |||
| 1271 | RL_RXDESCSYNC(sc, i,(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->rl_ldata.rl_rx_list_map), (sizeof(struct rl_desc) * ( i)), (sizeof(struct rl_desc)), ((0x02|0x08))) | |||
| 1272 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->rl_ldata.rl_rx_list_map), (sizeof(struct rl_desc) * ( i)), (sizeof(struct rl_desc)), ((0x02|0x08))); | |||
| 1273 | rxstat = letoh32(cur_rx->rl_cmdstat)((__uint32_t)(cur_rx->rl_cmdstat)); | |||
| 1274 | rxvlan = letoh32(cur_rx->rl_vlanctl)((__uint32_t)(cur_rx->rl_vlanctl)); | |||
| 1275 | RL_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->rl_ldata.rl_rx_list_map), (sizeof(struct rl_desc) * ( i)), (sizeof(struct rl_desc)), ((0x01))); | |||
| 1276 | if ((rxstat & RL_RDESC_STAT_OWN0x80000000) != 0) | |||
| 1277 | break; | |||
| 1278 | total_len = rxstat & sc->rl_rxlenmask; | |||
| 1279 | rxs = &sc->rl_ldata.rl_rxsoft[i]; | |||
| 1280 | m = rxs->rxs_mbuf; | |||
| 1281 | rxs->rxs_mbuf = NULL((void *)0); | |||
| 1282 | if_rxr_put(&sc->rl_ldata.rl_rx_ring, 1)do { (&sc->rl_ldata.rl_rx_ring)->rxr_alive -= (1); } while (0); | |||
| 1283 | rx = 1; | |||
| 1284 | ||||
| 1285 | /* Invalidate the RX mbuf and unload its map */ | |||
| 1286 | ||||
| 1287 | bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs-> rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x02) ) | |||
| 1288 | rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs-> rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x02) ) | |||
| 1289 | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs-> rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x02) ); | |||
| 1290 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (rxs ->rxs_dmamap)); | |||
| 1291 | ||||
| 1292 | if ((sc->rl_flags & RL_FLAG_JUMBOV20x00100000) != 0 && | |||
| 1293 | (rxstat & (RL_RDESC_STAT_SOF0x20000000 | RL_RDESC_STAT_EOF0x10000000)) != | |||
| 1294 | (RL_RDESC_STAT_SOF0x20000000 | RL_RDESC_STAT_EOF0x10000000)) { | |||
| 1295 | ifp->if_ierrorsif_data.ifi_ierrors++; | |||
| 1296 | m_freem(m); | |||
| 1297 | continue; | |||
| 1298 | } else if (!(rxstat & RL_RDESC_STAT_EOF0x10000000)) { | |||
| 1299 | m->m_lenm_hdr.mh_len = RL_FRAMELEN(sc->rl_max_mtu)(sc->rl_max_mtu + ((6 * 2) + 2) + 4 + 4); | |||
| 1300 | if (sc->rl_head == NULL((void *)0)) | |||
| 1301 | sc->rl_head = sc->rl_tail = m; | |||
| 1302 | else { | |||
| 1303 | m->m_flagsm_hdr.mh_flags &= ~M_PKTHDR0x0002; | |||
| 1304 | sc->rl_tail->m_nextm_hdr.mh_next = m; | |||
| 1305 | sc->rl_tail = m; | |||
| 1306 | } | |||
| 1307 | continue; | |||
| 1308 | } | |||
| 1309 | ||||
| 1310 | /* | |||
| 1311 | * NOTE: for the 8139C+, the frame length field | |||
| 1312 | * is always 12 bits in size, but for the gigE chips, | |||
| 1313 | * it is 13 bits (since the max RX frame length is 16K). | |||
| 1314 | * Unfortunately, all 32 bits in the status word | |||
| 1315 | * were already used, so to make room for the extra | |||
| 1316 | * length bit, Realtek took out the 'frame alignment | |||
| 1317 | * error' bit and shifted the other status bits | |||
| 1318 | * over one slot. The OWN, EOR, FS and LS bits are | |||
| 1319 | * still in the same places. We have already extracted | |||
| 1320 | * the frame length and checked the OWN bit, so rather | |||
| 1321 | * than using an alternate bit mapping, we shift the | |||
| 1322 | * status bits one space to the right so we can evaluate | |||
| 1323 | * them using the 8169 status as though it was in the | |||
| 1324 | * same format as that of the 8139C+. | |||
| 1325 | */ | |||
| 1326 | if (sc->sc_hwrev != RL_HWREV_8139CPLUS0x74800000) | |||
| 1327 | rxstat >>= 1; | |||
| 1328 | ||||
| 1329 | /* | |||
| 1330 | * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be | |||
| 1331 | * set, but if CRC is clear, it will still be a valid frame. | |||
| 1332 | */ | |||
| 1333 | if ((rxstat & RL_RDESC_STAT_RXERRSUM0x00100000) != 0 && | |||
| 1334 | !(rxstat & RL_RDESC_STAT_RXERRSUM0x00100000 && !(total_len > 8191 && | |||
| 1335 | (rxstat & RL_RDESC_STAT_ERRS(0x00200000|0x00080000| 0x00040000)) == RL_RDESC_STAT_GIANT0x00200000))) { | |||
| 1336 | ifp->if_ierrorsif_data.ifi_ierrors++; | |||
| 1337 | /* | |||
| 1338 | * If this is part of a multi-fragment packet, | |||
| 1339 | * discard all the pieces. | |||
| 1340 | */ | |||
| 1341 | if (sc->rl_head != NULL((void *)0)) { | |||
| 1342 | m_freem(sc->rl_head); | |||
| 1343 | sc->rl_head = sc->rl_tail = NULL((void *)0); | |||
| 1344 | } | |||
| 1345 | m_freem(m); | |||
| 1346 | continue; | |||
| 1347 | } | |||
| 1348 | ||||
| 1349 | if (sc->rl_head != NULL((void *)0)) { | |||
| 1350 | m->m_lenm_hdr.mh_len = total_len % RL_FRAMELEN(sc->rl_max_mtu)(sc->rl_max_mtu + ((6 * 2) + 2) + 4 + 4); | |||
| 1351 | if (m->m_lenm_hdr.mh_len == 0) | |||
| 1352 | m->m_lenm_hdr.mh_len = RL_FRAMELEN(sc->rl_max_mtu)(sc->rl_max_mtu + ((6 * 2) + 2) + 4 + 4); | |||
| 1353 | /* | |||
| 1354 | * Special case: if there's 4 bytes or less | |||
| 1355 | * in this buffer, the mbuf can be discarded: | |||
| 1356 | * the last 4 bytes is the CRC, which we don't | |||
| 1357 | * care about anyway. | |||
| 1358 | */ | |||
| 1359 | if (m->m_lenm_hdr.mh_len <= ETHER_CRC_LEN4) { | |||
| 1360 | sc->rl_tail->m_lenm_hdr.mh_len -= | |||
| 1361 | (ETHER_CRC_LEN4 - m->m_lenm_hdr.mh_len); | |||
| 1362 | m_freem(m); | |||
| 1363 | } else { | |||
| 1364 | m->m_lenm_hdr.mh_len -= ETHER_CRC_LEN4; | |||
| 1365 | m->m_flagsm_hdr.mh_flags &= ~M_PKTHDR0x0002; | |||
| 1366 | sc->rl_tail->m_nextm_hdr.mh_next = m; | |||
| 1367 | } | |||
| 1368 | m = sc->rl_head; | |||
| 1369 | sc->rl_head = sc->rl_tail = NULL((void *)0); | |||
| 1370 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = total_len - ETHER_CRC_LEN4; | |||
| 1371 | } else | |||
| 1372 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = | |||
| 1373 | (total_len - ETHER_CRC_LEN4); | |||
| 1374 | ||||
| 1375 | /* Do RX checksumming */ | |||
| 1376 | ||||
| 1377 | if (sc->rl_flags & RL_FLAG_DESCV20x00000020) { | |||
| 1378 | /* Check IP header checksum */ | |||
| 1379 | if ((rxvlan & RL_RDESC_IPV40x40000000) && | |||
| 1380 | !(rxstat & RL_RDESC_STAT_IPSUMBAD0x00008000)) | |||
| 1381 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK0x0008; | |||
| 1382 | ||||
| 1383 | /* Check TCP/UDP checksum */ | |||
| 1384 | if ((rxvlan & (RL_RDESC_IPV40x40000000|RL_RDESC_IPV60x80000000)) && | |||
| 1385 | (((rxstat & RL_RDESC_STAT_TCP0x00010000) && | |||
| 1386 | !(rxstat & RL_RDESC_STAT_TCPSUMBAD0x00002000)) || | |||
| 1387 | ((rxstat & RL_RDESC_STAT_UDP0x00020000) && | |||
| 1388 | !(rxstat & RL_RDESC_STAT_UDPSUMBAD0x00004000)))) | |||
| 1389 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK0x0020 | | |||
| 1390 | M_UDP_CSUM_IN_OK0x0080; | |||
| 1391 | } else { | |||
| 1392 | /* Check IP header checksum */ | |||
| 1393 | if ((rxstat & RL_RDESC_STAT_PROTOID0x00030000) && | |||
| 1394 | !(rxstat & RL_RDESC_STAT_IPSUMBAD0x00008000)) | |||
| 1395 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK0x0008; | |||
| 1396 | ||||
| 1397 | /* Check TCP/UDP checksum */ | |||
| 1398 | if ((RL_TCPPKT(rxstat)(((rxstat) & 0x00030000) == 0x00010000) && | |||
| 1399 | !(rxstat & RL_RDESC_STAT_TCPSUMBAD0x00002000)) || | |||
| 1400 | (RL_UDPPKT(rxstat)(((rxstat) & 0x00030000) == 0x00020000) && | |||
| 1401 | !(rxstat & RL_RDESC_STAT_UDPSUMBAD0x00004000))) | |||
| 1402 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK0x0020 | | |||
| 1403 | M_UDP_CSUM_IN_OK0x0080; | |||
| 1404 | } | |||
| 1405 | #if NVLAN1 > 0 | |||
| 1406 | if (rxvlan & RL_RDESC_VLANCTL_TAG0x00010000) { | |||
| 1407 | m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag = | |||
| 1408 | ntohs((rxvlan & RL_RDESC_VLANCTL_DATA))(__uint16_t)(__builtin_constant_p((rxvlan & 0x0000FFFF)) ? (__uint16_t)(((__uint16_t)((rxvlan & 0x0000FFFF)) & 0xffU ) << 8 | ((__uint16_t)((rxvlan & 0x0000FFFF)) & 0xff00U) >> 8) : __swap16md((rxvlan & 0x0000FFFF)) ); | |||
| 1409 | m->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020; | |||
| 1410 | } | |||
| 1411 | #endif | |||
| 1412 | ||||
| 1413 | ml_enqueue(&ml, m); | |||
| 1414 | } | |||
| 1415 | ||||
| 1416 | if (ifiq_input(&ifp->if_rcv, &ml)) | |||
| 1417 | if_rxr_livelocked(&sc->rl_ldata.rl_rx_ring); | |||
| 1418 | ||||
| 1419 | sc->rl_ldata.rl_rx_considx = i; | |||
| 1420 | re_rx_list_fill(sc); | |||
| 1421 | ||||
| 1422 | ||||
| 1423 | return (rx); | |||
| 1424 | } | |||
| 1425 | ||||
| 1426 | int | |||
| 1427 | re_txeof(struct rl_softc *sc) | |||
| 1428 | { | |||
| 1429 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
| 1430 | struct rl_txq *txq; | |||
| 1431 | uint32_t txstat; | |||
| 1432 | unsigned int prod, cons; | |||
| 1433 | unsigned int idx; | |||
| 1434 | int free = 0; | |||
| 1435 | ||||
| 1436 | prod = sc->rl_ldata.rl_txq_prodidx; | |||
| 1437 | cons = sc->rl_ldata.rl_txq_considx; | |||
| 1438 | ||||
| 1439 | while (prod != cons) { | |||
| 1440 | txq = &sc->rl_ldata.rl_txq[cons]; | |||
| 1441 | ||||
| 1442 | idx = txq->txq_descidx; | |||
| 1443 | RL_TXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->rl_ldata.rl_tx_list_map), (sizeof(struct rl_desc) * ( idx)), (sizeof(struct rl_desc)), ((0x02))); | |||
| 1444 | txstat = letoh32(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat)((__uint32_t)(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat)); | |||
| 1445 | RL_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->rl_ldata.rl_tx_list_map), (sizeof(struct rl_desc) * ( idx)), (sizeof(struct rl_desc)), ((0x01))); | |||
| 1446 | if (ISSET(txstat, RL_TDESC_CMD_OWN)((txstat) & (0x80000000))) { | |||
| 1447 | free = 2; | |||
| 1448 | break; | |||
| 1449 | } | |||
| 1450 | ||||
| 1451 | bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txq-> txq_dmamap), (0), (txq->txq_dmamap->dm_mapsize), (0x08) ) | |||
| 1452 | 0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txq-> txq_dmamap), (0), (txq->txq_dmamap->dm_mapsize), (0x08) ); | |||
| 1453 | bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txq ->txq_dmamap)); | |||
| 1454 | m_freem(txq->txq_mbuf); | |||
| 1455 | txq->txq_mbuf = NULL((void *)0); | |||
| 1456 | ||||
| 1457 | if (txstat & (RL_TDESC_STAT_EXCESSCOL0x00100000 | RL_TDESC_STAT_COLCNT0x000F0000)) | |||
| 1458 | ifp->if_collisionsif_data.ifi_collisions++; | |||
| 1459 | if (txstat & RL_TDESC_STAT_TXERRSUM0x00800000) | |||
| 1460 | ifp->if_oerrorsif_data.ifi_oerrors++; | |||
| 1461 | ||||
| 1462 | cons = RL_NEXT_TX_DESC(sc, idx)(((idx) + 1) % (sc)->rl_ldata.rl_tx_desc_cnt); | |||
| 1463 | free = 1; | |||
| 1464 | } | |||
| 1465 | ||||
| 1466 | if (free == 0) | |||
| 1467 | return (0); | |||
| 1468 | ||||
| 1469 | sc->rl_ldata.rl_txq_considx = cons; | |||
| 1470 | ||||
| 1471 | /* | |||
| 1472 | * Some chips will ignore a second TX request issued while an | |||
| 1473 | * existing transmission is in progress. If the transmitter goes | |||
| 1474 | * idle but there are still packets waiting to be sent, we need | |||
| 1475 | * to restart the channel here to flush them out. This only | |||
| 1476 | * seems to be required with the PCIe devices. | |||
| 1477 | */ | |||
| 1478 | if (ifq_is_oactive(&ifp->if_snd)) | |||
| 1479 | ifq_restart(&ifp->if_snd); | |||
| 1480 | else if (free == 2) | |||
| 1481 | ifq_serialize(&ifp->if_snd, &sc->rl_start); | |||
| 1482 | else | |||
| 1483 | ifp->if_timer = 0; | |||
| 1484 | ||||
| 1485 | return (1); | |||
| 1486 | } | |||
| 1487 | ||||
| 1488 | void | |||
| 1489 | re_tick(void *xsc) | |||
| 1490 | { | |||
| 1491 | struct rl_softc *sc = xsc; | |||
| 1492 | struct mii_data *mii; | |||
| 1493 | int s; | |||
| 1494 | ||||
| 1495 | mii = &sc->sc_mii; | |||
| 1496 | ||||
| 1497 | s = splnet()splraise(0x4); | |||
| 1498 | ||||
| 1499 | mii_tick(mii); | |||
| 1500 | ||||
| 1501 | if ((sc->rl_flags & RL_FLAG_LINK0x00002000) == 0) | |||
| 1502 | re_miibus_statchg(&sc->sc_dev); | |||
| 1503 | ||||
| 1504 | splx(s)spllower(s); | |||
| 1505 | ||||
| 1506 | timeout_add_sec(&sc->timer_handle, 1); | |||
| 1507 | } | |||
| 1508 | ||||
| 1509 | int | |||
| 1510 | re_intr(void *arg) | |||
| 1511 | { | |||
| 1512 | struct rl_softc *sc = arg; | |||
| 1513 | struct ifnet *ifp; | |||
| 1514 | u_int16_t status; | |||
| 1515 | int claimed = 0, rx, tx; | |||
| 1516 | ||||
| 1517 | ifp = &sc->sc_arpcom.ac_if; | |||
| 1518 | ||||
| 1519 | if (!(ifp->if_flags & IFF_RUNNING0x40)) | |||
| 1520 | return (0); | |||
| 1521 | ||||
| 1522 | /* Disable interrupts. */ | |||
| 1523 | CSR_WRITE_2(sc, RL_IMR, 0)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x003C), ( 0))); | |||
| 1524 | ||||
| 1525 | rx = tx = 0; | |||
| 1526 | status = CSR_READ_2(sc, RL_ISR)((sc->rl_btag)->read_2((sc->rl_bhandle), (0x003E))); | |||
| 1527 | /* If the card has gone away the read returns 0xffff. */ | |||
| 1528 | if (status == 0xffff) | |||
| 1529 | return (0); | |||
| 1530 | if (status) | |||
| 1531 | CSR_WRITE_2(sc, RL_ISR, status)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x003E), ( status))); | |||
| 1532 | ||||
| 1533 | if (status & RL_ISR_TIMEOUT_EXPIRED0x4000) | |||
| 1534 | claimed = 1; | |||
| 1535 | ||||
| 1536 | if (status & RL_INTRS_CPLUS(0x0001|0x0002|0x0008| 0x0010|0x0040| 0x8000|0x0004)) { | |||
| 1537 | if (status & | |||
| 1538 | (sc->rl_rx_ack | RL_ISR_RX_ERR0x0002 | RL_ISR_FIFO_OFLOW0x0040)) { | |||
| 1539 | rx |= re_rxeof(sc); | |||
| 1540 | claimed = 1; | |||
| 1541 | } | |||
| 1542 | ||||
| 1543 | if (status & (sc->rl_tx_ack | RL_ISR_TX_ERR0x0008)) { | |||
| 1544 | tx |= re_txeof(sc); | |||
| 1545 | claimed = 1; | |||
| 1546 | } | |||
| 1547 | ||||
| 1548 | if (status & RL_ISR_SYSTEM_ERR0x8000) { | |||
| 1549 | KERNEL_LOCK()_kernel_lock(); | |||
| 1550 | re_init(ifp); | |||
| 1551 | KERNEL_UNLOCK()_kernel_unlock(); | |||
| 1552 | claimed = 1; | |||
| 1553 | } | |||
| 1554 | } | |||
| 1555 | ||||
| 1556 | if (sc->rl_imtype == RL_IMTYPE_SIM1) { | |||
| 1557 | if (sc->rl_timerintr) { | |||
| 1558 | if ((tx | rx) == 0) { | |||
| 1559 | /* | |||
| 1560 | * Nothing needs to be processed, fallback | |||
| 1561 | * to use TX/RX interrupts. | |||
| 1562 | */ | |||
| 1563 | re_setup_intr(sc, 1, RL_IMTYPE_NONE0); | |||
| 1564 | ||||
| 1565 | /* | |||
| 1566 | * Recollect, mainly to avoid the possible | |||
| 1567 | * race introduced by changing interrupt | |||
| 1568 | * masks. | |||
| 1569 | */ | |||
| 1570 | re_rxeof(sc); | |||
| 1571 | re_txeof(sc); | |||
| 1572 | } else | |||
| 1573 | CSR_WRITE_4(sc, RL_TIMERCNT, 1)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0048), ( 1))); /* reload */ | |||
| 1574 | } else if (tx | rx) { | |||
| 1575 | /* | |||
| 1576 | * Assume that using simulated interrupt moderation | |||
| 1577 | * (hardware timer based) could reduce the interrupt | |||
| 1578 | * rate. | |||
| 1579 | */ | |||
| 1580 | re_setup_intr(sc, 1, RL_IMTYPE_SIM1); | |||
| 1581 | } | |||
| 1582 | } | |||
| 1583 | ||||
| 1584 | CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x003C), ( sc->rl_intrs))); | |||
| 1585 | ||||
| 1586 | return (claimed); | |||
| 1587 | } | |||
| 1588 | ||||
| 1589 | int | |||
| 1590 | re_encap(struct rl_softc *sc, unsigned int idx, struct mbuf *m) | |||
| 1591 | { | |||
| 1592 | struct rl_txq *txq; | |||
| 1593 | bus_dmamap_t map; | |||
| 1594 | int error, seg, nsegs, curidx, lastidx, pad; | |||
| 1595 | int off; | |||
| 1596 | struct ip *ip; | |||
| 1597 | struct rl_desc *d; | |||
| 1598 | u_int32_t cmdstat, vlanctl = 0, csum_flags = 0; | |||
| 1599 | ||||
| 1600 | /* | |||
| 1601 | * Set up checksum offload. Note: checksum offload bits must | |||
| 1602 | * appear in all descriptors of a multi-descriptor transmit | |||
| 1603 | * attempt. This is according to testing done with an 8169 | |||
| 1604 | * chip. This is a requirement. | |||
| 1605 | */ | |||
| 1606 | ||||
| 1607 | /* | |||
| 1608 | * Set RL_TDESC_CMD_IPCSUM if any checksum offloading | |||
| 1609 | * is requested. Otherwise, RL_TDESC_CMD_TCPCSUM/ | |||
| 1610 | * RL_TDESC_CMD_UDPCSUM does not take affect. | |||
| 1611 | */ | |||
| 1612 | ||||
| 1613 | if ((sc->rl_flags & RL_FLAG_JUMBOV20x00100000) && | |||
| 1614 | m->m_pkthdrM_dat.MH.MH_pkthdr.len > RL_MTU(1518 - ((6 * 2) + 2) - 4) && | |||
| 1615 | (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & | |||
| 1616 | (M_IPV4_CSUM_OUT0x0001|M_TCP_CSUM_OUT0x0002|M_UDP_CSUM_OUT0x0004)) != 0) { | |||
| 1617 | struct mbuf mh, *mp; | |||
| 1618 | ||||
| 1619 | mp = m_getptr(m, ETHER_HDR_LEN((6 * 2) + 2), &off); | |||
| 1620 | mh.m_flagsm_hdr.mh_flags = 0; | |||
| 1621 | mh.m_datam_hdr.mh_data = mtod(mp, caddr_t)((caddr_t)((mp)->m_hdr.mh_data)) + off; | |||
| 1622 | mh.m_nextm_hdr.mh_next = mp->m_nextm_hdr.mh_next; | |||
| 1623 | mh.m_pkthdrM_dat.MH.MH_pkthdr.len = mp->m_pkthdrM_dat.MH.MH_pkthdr.len - ETHER_HDR_LEN((6 * 2) + 2); | |||
| 1624 | mh.m_lenm_hdr.mh_len = mp->m_lenm_hdr.mh_len - off; | |||
| 1625 | ip = (struct ip *)mh.m_datam_hdr.mh_data; | |||
| 1626 | ||||
| 1627 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_IPV4_CSUM_OUT0x0001) | |||
| 1628 | ip->ip_sum = in_cksum(&mh, sizeof(struct ip)); | |||
| 1629 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & (M_TCP_CSUM_OUT0x0002|M_UDP_CSUM_OUT0x0004)) | |||
| 1630 | in_delayed_cksum(&mh); | |||
| 1631 | ||||
| 1632 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags &= | |||
| 1633 | ~(M_IPV4_CSUM_OUT0x0001|M_TCP_CSUM_OUT0x0002|M_UDP_CSUM_OUT0x0004); | |||
| 1634 | } | |||
| 1635 | ||||
| 1636 | if ((m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & | |||
| 1637 | (M_IPV4_CSUM_OUT0x0001|M_TCP_CSUM_OUT0x0002|M_UDP_CSUM_OUT0x0004)) != 0) { | |||
| 1638 | if (sc->rl_flags & RL_FLAG_DESCV20x00000020) { | |||
| 1639 | vlanctl |= RL_TDESC_CMD_IPCSUMV20x20000000; | |||
| 1640 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_TCP_CSUM_OUT0x0002) | |||
| 1641 | vlanctl |= RL_TDESC_CMD_TCPCSUMV20x40000000; | |||
| 1642 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_UDP_CSUM_OUT0x0004) | |||
| 1643 | vlanctl |= RL_TDESC_CMD_UDPCSUMV20x80000000; | |||
| 1644 | } else { | |||
| 1645 | csum_flags |= RL_TDESC_CMD_IPCSUM0x00040000; | |||
| 1646 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_TCP_CSUM_OUT0x0002) | |||
| 1647 | csum_flags |= RL_TDESC_CMD_TCPCSUM0x00010000; | |||
| 1648 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_UDP_CSUM_OUT0x0004) | |||
| 1649 | csum_flags |= RL_TDESC_CMD_UDPCSUM0x00020000; | |||
| 1650 | } | |||
| 1651 | } | |||
| 1652 | ||||
| 1653 | txq = &sc->rl_ldata.rl_txq[idx]; | |||
| 1654 | map = txq->txq_dmamap; | |||
| 1655 | ||||
| 1656 | error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( map), (m), (0x0400|0x0001)) | |||
| 1657 | BUS_DMA_WRITE|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( map), (m), (0x0400|0x0001)); | |||
| 1658 | switch (error) { | |||
| 1659 | case 0: | |||
| 1660 | break; | |||
| 1661 | ||||
| 1662 | case EFBIG27: | |||
| 1663 | if (m_defrag(m, M_DONTWAIT0x0002) == 0 && | |||
| 1664 | bus_dmamap_load_mbuf(sc->sc_dmat, map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( map), (m), (0x0400|0x0001)) | |||
| 1665 | BUS_DMA_WRITE|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( map), (m), (0x0400|0x0001)) == 0) | |||
| 1666 | break; | |||
| 1667 | ||||
| 1668 | /* FALLTHROUGH */ | |||
| 1669 | default: | |||
| 1670 | return (0); | |||
| 1671 | } | |||
| 1672 | ||||
| 1673 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x04)) | |||
| 1674 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x04)); | |||
| 1675 | ||||
| 1676 | nsegs = map->dm_nsegs; | |||
| 1677 | pad = 0; | |||
| 1678 | ||||
| 1679 | /* | |||
| 1680 | * With some of the Realtek chips, using the checksum offload | |||
| 1681 | * support in conjunction with the autopadding feature results | |||
| 1682 | * in the transmission of corrupt frames. For example, if we | |||
| 1683 | * need to send a really small IP fragment that's less than 60 | |||
| 1684 | * bytes in size, and IP header checksumming is enabled, the | |||
| 1685 | * resulting ethernet frame that appears on the wire will | |||
| 1686 | * have garbled payload. To work around this, if TX IP checksum | |||
| 1687 | * offload is enabled, we always manually pad short frames out | |||
| 1688 | * to the minimum ethernet frame size. | |||
| 1689 | */ | |||
| 1690 | if ((sc->rl_flags & RL_FLAG_AUTOPAD0x00001000) == 0 && | |||
| 1691 | m->m_pkthdrM_dat.MH.MH_pkthdr.len < RL_IP4CSUMTX_PADLEN(((6 * 2) + 2) + 28) && | |||
| 1692 | (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_IPV4_CSUM_OUT0x0001) != 0) { | |||
| 1693 | pad = 1; | |||
| 1694 | nsegs++; | |||
| 1695 | } | |||
| 1696 | ||||
| 1697 | /* | |||
| 1698 | * Set up hardware VLAN tagging. Note: vlan tag info must | |||
| 1699 | * appear in all descriptors of a multi-descriptor | |||
| 1700 | * transmission attempt. | |||
| 1701 | */ | |||
| 1702 | #if NVLAN1 > 0 | |||
| 1703 | if (m->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020) | |||
| 1704 | vlanctl |= swap16(m->m_pkthdr.ether_vtag)(__uint16_t)(__builtin_constant_p(m->M_dat.MH.MH_pkthdr.ether_vtag ) ? (__uint16_t)(((__uint16_t)(m->M_dat.MH.MH_pkthdr.ether_vtag ) & 0xffU) << 8 | ((__uint16_t)(m->M_dat.MH.MH_pkthdr .ether_vtag) & 0xff00U) >> 8) : __swap16md(m->M_dat .MH.MH_pkthdr.ether_vtag)) | | |||
| 1705 | RL_TDESC_VLANCTL_TAG0x00020000; | |||
| 1706 | #endif | |||
| 1707 | ||||
| 1708 | /* | |||
| 1709 | * Map the segment array into descriptors. Note that we set the | |||
| 1710 | * start-of-frame and end-of-frame markers for either TX or RX, but | |||
| 1711 | * they really only have meaning in the TX case. (In the RX case, | |||
| 1712 | * it's the chip that tells us where packets begin and end.) | |||
| 1713 | * We also keep track of the end of the ring and set the | |||
| 1714 | * end-of-ring bits as needed, and we set the ownership bits | |||
| 1715 | * in all except the very first descriptor. (The caller will | |||
| 1716 | * set this descriptor later when it start transmission or | |||
| 1717 | * reception.) | |||
| 1718 | */ | |||
| 1719 | curidx = idx; | |||
| 1720 | cmdstat = RL_TDESC_CMD_SOF0x20000000; | |||
| 1721 | ||||
| 1722 | for (seg = 0; seg < map->dm_nsegs; seg++) { | |||
| 1723 | d = &sc->rl_ldata.rl_tx_list[curidx]; | |||
| 1724 | ||||
| 1725 | RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->rl_ldata.rl_tx_list_map), (sizeof(struct rl_desc) * ( curidx)), (sizeof(struct rl_desc)), ((0x08))); | |||
| 1726 | ||||
| 1727 | d->rl_vlanctl = htole32(vlanctl)((__uint32_t)(vlanctl)); | |||
| 1728 | re_set_bufaddr(d, map->dm_segs[seg].ds_addr); | |||
| 1729 | cmdstat |= csum_flags | map->dm_segs[seg].ds_len; | |||
| 1730 | ||||
| 1731 | if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1) | |||
| 1732 | cmdstat |= RL_TDESC_CMD_EOR0x40000000; | |||
| 1733 | ||||
| 1734 | d->rl_cmdstat = htole32(cmdstat)((__uint32_t)(cmdstat)); | |||
| 1735 | ||||
| 1736 | RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->rl_ldata.rl_tx_list_map), (sizeof(struct rl_desc) * ( curidx)), (sizeof(struct rl_desc)), ((0x04))); | |||
| 1737 | ||||
| 1738 | lastidx = curidx; | |||
| 1739 | cmdstat = RL_TDESC_CMD_OWN0x80000000; | |||
| 1740 | curidx = RL_NEXT_TX_DESC(sc, curidx)(((curidx) + 1) % (sc)->rl_ldata.rl_tx_desc_cnt); | |||
| 1741 | } | |||
| 1742 | ||||
| 1743 | if (pad
| |||
| 1744 | d = &sc->rl_ldata.rl_tx_list[curidx]; | |||
| 1745 | ||||
| 1746 | RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->rl_ldata.rl_tx_list_map), (sizeof(struct rl_desc) * ( curidx)), (sizeof(struct rl_desc)), ((0x08))); | |||
| 1747 | ||||
| 1748 | d->rl_vlanctl = htole32(vlanctl)((__uint32_t)(vlanctl)); | |||
| 1749 | re_set_bufaddr(d, RL_TXPADDADDR(sc)((sc)->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr + (( sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)))); | |||
| 1750 | cmdstat = csum_flags | | |||
| 1751 | RL_TDESC_CMD_OWN0x80000000 | RL_TDESC_CMD_EOF0x10000000 | | |||
| 1752 | (RL_IP4CSUMTX_PADLEN(((6 * 2) + 2) + 28) + 1 - m->m_pkthdrM_dat.MH.MH_pkthdr.len); | |||
| 1753 | ||||
| 1754 | if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1) | |||
| 1755 | cmdstat |= RL_TDESC_CMD_EOR0x40000000; | |||
| 1756 | ||||
| 1757 | d->rl_cmdstat = htole32(cmdstat)((__uint32_t)(cmdstat)); | |||
| 1758 | ||||
| 1759 | RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->rl_ldata.rl_tx_list_map), (sizeof(struct rl_desc) * ( curidx)), (sizeof(struct rl_desc)), ((0x04))); | |||
| 1760 | ||||
| 1761 | lastidx = curidx; | |||
| 1762 | } | |||
| 1763 | ||||
| 1764 | /* d is already pointing at the last descriptor */ | |||
| 1765 | d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF)((__uint32_t)(0x10000000)); | |||
| ||||
| 1766 | ||||
| 1767 | /* Transfer ownership of packet to the chip. */ | |||
| 1768 | d = &sc->rl_ldata.rl_tx_list[idx]; | |||
| 1769 | ||||
| 1770 | RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->rl_ldata.rl_tx_list_map), (sizeof(struct rl_desc) * ( curidx)), (sizeof(struct rl_desc)), ((0x08))); | |||
| 1771 | d->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN)((__uint32_t)(0x80000000)); | |||
| 1772 | RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->rl_ldata.rl_tx_list_map), (sizeof(struct rl_desc) * ( curidx)), (sizeof(struct rl_desc)), ((0x04))); | |||
| 1773 | ||||
| 1774 | /* update info of TX queue and descriptors */ | |||
| 1775 | txq->txq_mbuf = m; | |||
| 1776 | txq->txq_descidx = lastidx; | |||
| 1777 | ||||
| 1778 | return (nsegs); | |||
| 1779 | } | |||
| 1780 | ||||
| 1781 | void | |||
| 1782 | re_txstart(void *xsc) | |||
| 1783 | { | |||
| 1784 | struct rl_softc *sc = xsc; | |||
| 1785 | ||||
| 1786 | CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START)((sc->rl_btag)->write_1((sc->rl_bhandle), (sc->rl_txstart ), (0x40))); | |||
| 1787 | } | |||
| 1788 | ||||
| 1789 | /* | |||
| 1790 | * Main transmit routine for C+ and gigE NICs. | |||
| 1791 | */ | |||
| 1792 | ||||
| 1793 | void | |||
| 1794 | re_start(struct ifqueue *ifq) | |||
| 1795 | { | |||
| 1796 | struct ifnet *ifp = ifq->ifq_if; | |||
| 1797 | struct rl_softc *sc = ifp->if_softc; | |||
| 1798 | struct mbuf *m; | |||
| 1799 | unsigned int idx; | |||
| 1800 | unsigned int free, used; | |||
| 1801 | int post = 0; | |||
| 1802 | ||||
| 1803 | if (!ISSET(sc->rl_flags, RL_FLAG_LINK)((sc->rl_flags) & (0x00002000))) { | |||
| ||||
| 1804 | ifq_purge(ifq); | |||
| 1805 | return; | |||
| 1806 | } | |||
| 1807 | ||||
| 1808 | free = sc->rl_ldata.rl_txq_considx; | |||
| 1809 | idx = sc->rl_ldata.rl_txq_prodidx; | |||
| 1810 | if (free <= idx) | |||
| 1811 | free += sc->rl_ldata.rl_tx_desc_cnt; | |||
| 1812 | free -= idx; | |||
| 1813 | ||||
| 1814 | for (;;) { | |||
| 1815 | if (sc->rl_ldata.rl_tx_ndescs >= free + 2) { | |||
| 1816 | ifq_set_oactive(ifq); | |||
| 1817 | break; | |||
| 1818 | } | |||
| 1819 | ||||
| 1820 | m = ifq_dequeue(ifq); | |||
| 1821 | if (m == NULL((void *)0)) | |||
| 1822 | break; | |||
| 1823 | ||||
| 1824 | used = re_encap(sc, idx, m); | |||
| 1825 | if (used == 0) { | |||
| 1826 | m_freem(m); | |||
| 1827 | continue; | |||
| 1828 | } | |||
| 1829 | ||||
| 1830 | #if NBPFILTER1 > 0 | |||
| 1831 | if (ifp->if_bpf) | |||
| 1832 | bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1)); | |||
| 1833 | #endif | |||
| 1834 | ||||
| 1835 | KASSERT(used <= free)((used <= free) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/ic/re.c" , 1835, "used <= free")); | |||
| 1836 | free -= used; | |||
| 1837 | ||||
| 1838 | idx += used; | |||
| 1839 | if (idx >= sc->rl_ldata.rl_tx_desc_cnt) | |||
| 1840 | idx -= sc->rl_ldata.rl_tx_desc_cnt; | |||
| 1841 | ||||
| 1842 | post = 1; | |||
| 1843 | } | |||
| 1844 | ||||
| 1845 | if (post == 0) | |||
| 1846 | return; | |||
| 1847 | ||||
| 1848 | ifp->if_timer = 5; | |||
| 1849 | sc->rl_ldata.rl_txq_prodidx = idx; | |||
| 1850 | ifq_serialize(ifq, &sc->rl_start); | |||
| 1851 | } | |||
| 1852 | ||||
| 1853 | int | |||
| 1854 | re_init(struct ifnet *ifp) | |||
| 1855 | { | |||
| 1856 | struct rl_softc *sc = ifp->if_softc; | |||
| 1857 | u_int16_t cfg; | |||
| 1858 | uint32_t rxcfg; | |||
| 1859 | int s; | |||
| 1860 | union { | |||
| 1861 | u_int32_t align_dummy; | |||
| 1862 | u_char eaddr[ETHER_ADDR_LEN6]; | |||
| 1863 | } eaddr; | |||
| 1864 | ||||
| 1865 | s = splnet()splraise(0x4); | |||
| 1866 | ||||
| 1867 | /* | |||
| 1868 | * Cancel pending I/O and free all RX/TX buffers. | |||
| 1869 | */ | |||
| 1870 | re_stop(ifp); | |||
| 1871 | ||||
| 1872 | /* Put controller into known state. */ | |||
| 1873 | re_reset(sc); | |||
| 1874 | ||||
| 1875 | /* | |||
| 1876 | * Enable C+ RX and TX mode, as well as VLAN stripping and | |||
| 1877 | * RX checksum offload. We must configure the C+ register | |||
| 1878 | * before all others. | |||
| 1879 | */ | |||
| 1880 | cfg = RL_CPLUSCMD_TXENB0x0001 | RL_CPLUSCMD_PCI_MRW0x0008 | | |||
| 1881 | RL_CPLUSCMD_RXCSUM_ENB0x0020; | |||
| 1882 | ||||
| 1883 | if (ifp->if_capabilitiesif_data.ifi_capabilities & IFCAP_VLAN_HWTAGGING0x00000020) | |||
| 1884 | cfg |= RL_CPLUSCMD_VLANSTRIP0x0040; | |||
| 1885 | ||||
| 1886 | if (sc->rl_flags & RL_FLAG_MACSTAT0x00000040) | |||
| 1887 | cfg |= RL_CPLUSCMD_MACSTAT_DIS0x0080; | |||
| 1888 | else | |||
| 1889 | cfg |= RL_CPLUSCMD_RXENB0x0002; | |||
| 1890 | ||||
| 1891 | CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x00E0), ( cfg))); | |||
| 1892 | ||||
| 1893 | /* | |||
| 1894 | * Init our MAC address. Even though the chipset | |||
| 1895 | * documentation doesn't mention it, we need to enter "Config | |||
| 1896 | * register write enable" mode to modify the ID registers. | |||
| 1897 | */ | |||
| 1898 | bcopy(sc->sc_arpcom.ac_enaddr, eaddr.eaddr, ETHER_ADDR_LEN6); | |||
| 1899 | CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( (0x80|0x40)))); | |||
| 1900 | CSR_WRITE_4(sc, RL_IDR4,((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0004), ( ((__uint32_t)(*(u_int32_t *)(&eaddr.eaddr[4])))))) | |||
| 1901 | htole32(*(u_int32_t *)(&eaddr.eaddr[4])))((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0004), ( ((__uint32_t)(*(u_int32_t *)(&eaddr.eaddr[4])))))); | |||
| 1902 | CSR_WRITE_4(sc, RL_IDR0,((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0000), ( ((__uint32_t)(*(u_int32_t *)(&eaddr.eaddr[0])))))) | |||
| 1903 | htole32(*(u_int32_t *)(&eaddr.eaddr[0])))((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0000), ( ((__uint32_t)(*(u_int32_t *)(&eaddr.eaddr[0])))))); | |||
| 1904 | /* | |||
| 1905 | * Default on PC Engines APU1 is to have all LEDs off unless | |||
| 1906 | * there is network activity. Override to provide a link status | |||
| 1907 | * LED. | |||
| 1908 | */ | |||
| 1909 | if (sc->sc_hwrev == RL_HWREV_8168E0x2C000000 && | |||
| 1910 | hw_vendor != NULL((void *)0) && hw_prod != NULL((void *)0) && | |||
| 1911 | strcmp(hw_vendor, "PC Engines") == 0 && | |||
| 1912 | strcmp(hw_prod, "APU") == 0) { | |||
| 1913 | CSR_SETBIT_1(sc, RL_CFG4, RL_CFG4_CUSTOM_LED)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0055), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0055))) | (0x40)))); | |||
| 1914 | CSR_WRITE_1(sc, RL_LEDSEL, RL_LED_LINK | RL_LED_ACT << 4)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0018), ( 0x7 | 0x8 << 4))); | |||
| 1915 | } | |||
| 1916 | /* | |||
| 1917 | * Protect config register again | |||
| 1918 | */ | |||
| 1919 | CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( 0x00))); | |||
| 1920 | ||||
| 1921 | if ((sc->rl_flags & RL_FLAG_JUMBOV20x00100000) != 0) | |||
| 1922 | re_set_jumbo(sc); | |||
| 1923 | ||||
| 1924 | /* | |||
| 1925 | * For C+ mode, initialize the RX descriptors and mbufs. | |||
| 1926 | */ | |||
| 1927 | re_rx_list_init(sc); | |||
| 1928 | re_tx_list_init(sc); | |||
| 1929 | ||||
| 1930 | /* | |||
| 1931 | * Load the addresses of the RX and TX lists into the chip. | |||
| 1932 | */ | |||
| 1933 | CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,((sc->rl_btag)->write_4((sc->rl_bhandle), (0x00E8), ( ((u_int64_t) (sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr ) >> 32)))) | |||
| 1934 | RL_ADDR_HI(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr))((sc->rl_btag)->write_4((sc->rl_bhandle), (0x00E8), ( ((u_int64_t) (sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr ) >> 32)))); | |||
| 1935 | CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,((sc->rl_btag)->write_4((sc->rl_bhandle), (0x00E4), ( ((u_int64_t) (sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr ) & 0xFFFFFFFF)))) | |||
| 1936 | RL_ADDR_LO(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr))((sc->rl_btag)->write_4((sc->rl_bhandle), (0x00E4), ( ((u_int64_t) (sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr ) & 0xFFFFFFFF)))); | |||
| 1937 | ||||
| 1938 | CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0024), ( ((u_int64_t) (sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr ) >> 32)))) | |||
| 1939 | RL_ADDR_HI(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr))((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0024), ( ((u_int64_t) (sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr ) >> 32)))); | |||
| 1940 | CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0020), ( ((u_int64_t) (sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr ) & 0xFFFFFFFF)))) | |||
| 1941 | RL_ADDR_LO(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr))((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0020), ( ((u_int64_t) (sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr ) & 0xFFFFFFFF)))); | |||
| 1942 | ||||
| 1943 | if (sc->rl_flags & RL_FLAG_RXDV_GATED0x00020000) | |||
| 1944 | CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) &((sc->rl_btag)->write_4((sc->rl_bhandle), (0x00F0), ( ((sc->rl_btag)->read_4((sc->rl_bhandle), (0x00F0))) & ~0x00080000))) | |||
| 1945 | ~0x00080000)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x00F0), ( ((sc->rl_btag)->read_4((sc->rl_bhandle), (0x00F0))) & ~0x00080000))); | |||
| 1946 | ||||
| 1947 | /* | |||
| 1948 | * Set the initial TX and RX configuration. | |||
| 1949 | */ | |||
| 1950 | CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0040), ( (0x03000000|0x00000700)))); | |||
| 1951 | ||||
| 1952 | CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x00EC), ( 16))); | |||
| 1953 | ||||
| 1954 | rxcfg = RL_RXCFG_CONFIG(0x0000E000|0x00000700|0x00001800); | |||
| 1955 | if (sc->rl_flags & RL_FLAG_EARLYOFF0x00008000) | |||
| 1956 | rxcfg |= RL_RXCFG_EARLYOFF0x00003800; | |||
| 1957 | else if (sc->rl_flags & RL_FLAG_EARLYOFFV20x00010000) | |||
| 1958 | rxcfg |= RL_RXCFG_EARLYOFFV20x00000800; | |||
| 1959 | CSR_WRITE_4(sc, RL_RXCFG, rxcfg)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0044), ( rxcfg))); | |||
| 1960 | ||||
| 1961 | /* | |||
| 1962 | * Enable transmit and receive. | |||
| 1963 | */ | |||
| 1964 | CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0037), ( 0x0004 | 0x0008))); | |||
| 1965 | ||||
| 1966 | /* Program promiscuous mode and multicast filters. */ | |||
| 1967 | re_iff(sc); | |||
| 1968 | ||||
| 1969 | /* | |||
| 1970 | * Enable interrupts. | |||
| 1971 | */ | |||
| 1972 | re_setup_intr(sc, 1, sc->rl_imtype); | |||
| 1973 | CSR_WRITE_2(sc, RL_ISR, sc->rl_intrs)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x003E), ( sc->rl_intrs))); | |||
| 1974 | ||||
| 1975 | /* Start RX/TX process. */ | |||
| 1976 | CSR_WRITE_4(sc, RL_MISSEDPKT, 0)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x004C), ( 0))); | |||
| 1977 | ||||
| 1978 | /* | |||
| 1979 | * For 8169 gigE NICs, set the max allowed RX packet | |||
| 1980 | * size so we can receive jumbo frames. | |||
| 1981 | */ | |||
| 1982 | if (sc->sc_hwrev != RL_HWREV_8139CPLUS0x74800000) { | |||
| 1983 | if (sc->rl_flags & RL_FLAG_PCIE0x00000004 && | |||
| 1984 | (sc->rl_flags & RL_FLAG_JUMBOV20x00100000) == 0) | |||
| 1985 | CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x00DA), ( (1 << 11)))); | |||
| 1986 | else | |||
| 1987 | CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x00DA), ( 16383))); | |||
| 1988 | } | |||
| 1989 | ||||
| 1990 | CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) |((sc->rl_btag)->write_1((sc->rl_bhandle), (sc->rl_cfg1 ), (((sc->rl_btag)->read_1((sc->rl_bhandle), (sc-> rl_cfg1))) | 0x20))) | |||
| 1991 | RL_CFG1_DRVLOAD)((sc->rl_btag)->write_1((sc->rl_bhandle), (sc->rl_cfg1 ), (((sc->rl_btag)->read_1((sc->rl_bhandle), (sc-> rl_cfg1))) | 0x20))); | |||
| 1992 | ||||
| 1993 | ifp->if_flags |= IFF_RUNNING0x40; | |||
| 1994 | ifq_clr_oactive(&ifp->if_snd); | |||
| 1995 | ||||
| 1996 | splx(s)spllower(s); | |||
| 1997 | ||||
| 1998 | sc->rl_flags &= ~RL_FLAG_LINK0x00002000; | |||
| 1999 | mii_mediachg(&sc->sc_mii); | |||
| 2000 | ||||
| 2001 | timeout_add_sec(&sc->timer_handle, 1); | |||
| 2002 | ||||
| 2003 | return (0); | |||
| 2004 | } | |||
| 2005 | ||||
| 2006 | /* | |||
| 2007 | * Set media options. | |||
| 2008 | */ | |||
| 2009 | int | |||
| 2010 | re_ifmedia_upd(struct ifnet *ifp) | |||
| 2011 | { | |||
| 2012 | struct rl_softc *sc; | |||
| 2013 | ||||
| 2014 | sc = ifp->if_softc; | |||
| 2015 | ||||
| 2016 | return (mii_mediachg(&sc->sc_mii)); | |||
| 2017 | } | |||
| 2018 | ||||
| 2019 | /* | |||
| 2020 | * Report current media status. | |||
| 2021 | */ | |||
| 2022 | void | |||
| 2023 | re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) | |||
| 2024 | { | |||
| 2025 | struct rl_softc *sc; | |||
| 2026 | ||||
| 2027 | sc = ifp->if_softc; | |||
| 2028 | ||||
| 2029 | mii_pollstat(&sc->sc_mii); | |||
| 2030 | ifmr->ifm_active = sc->sc_mii.mii_media_active; | |||
| 2031 | ifmr->ifm_status = sc->sc_mii.mii_media_status; | |||
| 2032 | } | |||
| 2033 | ||||
| 2034 | int | |||
| 2035 | re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) | |||
| 2036 | { | |||
| 2037 | struct rl_softc *sc = ifp->if_softc; | |||
| 2038 | struct ifreq *ifr = (struct ifreq *) data; | |||
| 2039 | int s, error = 0; | |||
| 2040 | ||||
| 2041 | s = splnet()splraise(0x4); | |||
| 2042 | ||||
| 2043 | switch(command) { | |||
| 2044 | case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((12))): | |||
| 2045 | ifp->if_flags |= IFF_UP0x1; | |||
| 2046 | if (!(ifp->if_flags & IFF_RUNNING0x40)) | |||
| 2047 | re_init(ifp); | |||
| 2048 | break; | |||
| 2049 | case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))): | |||
| 2050 | if (ifp->if_flags & IFF_UP0x1) { | |||
| 2051 | if (ifp->if_flags & IFF_RUNNING0x40) | |||
| 2052 | error = ENETRESET52; | |||
| 2053 | else | |||
| 2054 | re_init(ifp); | |||
| 2055 | } else { | |||
| 2056 | if (ifp->if_flags & IFF_RUNNING0x40) | |||
| 2057 | re_stop(ifp); | |||
| 2058 | } | |||
| 2059 | break; | |||
| 2060 | case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifmediareq) & 0x1fff) << 16) | ((('i')) << 8) | ((56))): | |||
| 2061 | case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((55))): | |||
| 2062 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); | |||
| 2063 | break; | |||
| 2064 | case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((170))): | |||
| 2065 | error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data, | |||
| 2066 | NULL((void *)0), RL_FRAMELEN(sc->rl_max_mtu)(sc->rl_max_mtu + ((6 * 2) + 2) + 4 + 4), &sc->rl_ldata.rl_rx_ring); | |||
| 2067 | break; | |||
| 2068 | default: | |||
| 2069 | error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); | |||
| 2070 | } | |||
| 2071 | ||||
| 2072 | if (error == ENETRESET52) { | |||
| 2073 | if (ifp->if_flags & IFF_RUNNING0x40) | |||
| 2074 | re_iff(sc); | |||
| 2075 | error = 0; | |||
| 2076 | } | |||
| 2077 | ||||
| 2078 | splx(s)spllower(s); | |||
| 2079 | return (error); | |||
| 2080 | } | |||
| 2081 | ||||
| 2082 | void | |||
| 2083 | re_watchdog(struct ifnet *ifp) | |||
| 2084 | { | |||
| 2085 | struct rl_softc *sc; | |||
| 2086 | int s; | |||
| 2087 | ||||
| 2088 | sc = ifp->if_softc; | |||
| 2089 | s = splnet()splraise(0x4); | |||
| 2090 | printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); | |||
| 2091 | ||||
| 2092 | re_init(ifp); | |||
| 2093 | ||||
| 2094 | splx(s)spllower(s); | |||
| 2095 | } | |||
| 2096 | ||||
| 2097 | /* | |||
| 2098 | * Stop the adapter and free any mbufs allocated to the | |||
| 2099 | * RX and TX lists. | |||
| 2100 | */ | |||
| 2101 | void | |||
| 2102 | re_stop(struct ifnet *ifp) | |||
| 2103 | { | |||
| 2104 | struct rl_softc *sc; | |||
| 2105 | int i; | |||
| 2106 | ||||
| 2107 | sc = ifp->if_softc; | |||
| 2108 | ||||
| 2109 | ifp->if_timer = 0; | |||
| 2110 | sc->rl_flags &= ~RL_FLAG_LINK0x00002000; | |||
| 2111 | sc->rl_timerintr = 0; | |||
| 2112 | ||||
| 2113 | timeout_del(&sc->timer_handle); | |||
| 2114 | ifp->if_flags &= ~IFF_RUNNING0x40; | |||
| 2115 | ||||
| 2116 | /* | |||
| 2117 | * Disable accepting frames to put RX MAC into idle state. | |||
| 2118 | * Otherwise it's possible to get frames while stop command | |||
| 2119 | * execution is in progress and controller can DMA the frame | |||
| 2120 | * to already freed RX buffer during that period. | |||
| 2121 | */ | |||
| 2122 | CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) &((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0044), ( ((sc->rl_btag)->read_4((sc->rl_bhandle), (0x0044))) & ~(0x00000001 | 0x00000008 | 0x00000002 | 0x00000004)))) | |||
| 2123 | ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | RL_RXCFG_RX_INDIV |((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0044), ( ((sc->rl_btag)->read_4((sc->rl_bhandle), (0x0044))) & ~(0x00000001 | 0x00000008 | 0x00000002 | 0x00000004)))) | |||
| 2124 | RL_RXCFG_RX_MULTI))((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0044), ( ((sc->rl_btag)->read_4((sc->rl_bhandle), (0x0044))) & ~(0x00000001 | 0x00000008 | 0x00000002 | 0x00000004)))); | |||
| 2125 | ||||
| 2126 | if (sc->rl_flags & RL_FLAG_WAIT_TXPOLL0x00400000) { | |||
| 2127 | for (i = RL_TIMEOUT1000; i > 0; i--) { | |||
| 2128 | if ((CSR_READ_1(sc, sc->rl_txstart)((sc->rl_btag)->read_1((sc->rl_bhandle), (sc->rl_txstart ))) & | |||
| 2129 | RL_TXSTART_START0x40) == 0) | |||
| 2130 | break; | |||
| 2131 | DELAY(20)(*delay_func)(20); | |||
| 2132 | } | |||
| 2133 | if (i == 0) | |||
| 2134 | printf("%s: stopping TX poll timed out!\n", | |||
| 2135 | sc->sc_dev.dv_xname); | |||
| 2136 | CSR_WRITE_1(sc, RL_COMMAND, 0x00)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0037), ( 0x00))); | |||
| 2137 | } else if (sc->rl_flags & RL_FLAG_CMDSTOP0x00000400) { | |||
| 2138 | CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB |((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0037), ( 0x0080 | 0x0004 | 0x0008))) | |||
| 2139 | RL_CMD_RX_ENB)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0037), ( 0x0080 | 0x0004 | 0x0008))); | |||
| 2140 | if (sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ0x00080000) { | |||
| 2141 | for (i = RL_TIMEOUT1000; i > 0; i--) { | |||
| 2142 | if ((CSR_READ_4(sc, RL_TXCFG)((sc->rl_btag)->read_4((sc->rl_bhandle), (0x0040))) & | |||
| 2143 | RL_TXCFG_QUEUE_EMPTY0x00000800) != 0) | |||
| 2144 | break; | |||
| 2145 | DELAY(100)(*delay_func)(100); | |||
| 2146 | } | |||
| 2147 | if (i == 0) | |||
| 2148 | printf("%s: stopping TXQ timed out!\n", | |||
| 2149 | sc->sc_dev.dv_xname); | |||
| 2150 | } | |||
| 2151 | } else | |||
| 2152 | CSR_WRITE_1(sc, RL_COMMAND, 0x00)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0037), ( 0x00))); | |||
| 2153 | DELAY(1000)(*delay_func)(1000); | |||
| 2154 | CSR_WRITE_2(sc, RL_IMR, 0x0000)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x003C), ( 0x0000))); | |||
| 2155 | CSR_WRITE_2(sc, RL_ISR, 0xFFFF)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x003E), ( 0xFFFF))); | |||
| 2156 | ||||
| 2157 | intr_barrier(sc->sc_ih); | |||
| 2158 | ifq_barrier(&ifp->if_snd); | |||
| 2159 | ||||
| 2160 | ifq_clr_oactive(&ifp->if_snd); | |||
| 2161 | mii_down(&sc->sc_mii); | |||
| 2162 | ||||
| 2163 | if (sc->rl_head != NULL((void *)0)) { | |||
| 2164 | m_freem(sc->rl_head); | |||
| 2165 | sc->rl_head = sc->rl_tail = NULL((void *)0); | |||
| 2166 | } | |||
| 2167 | ||||
| 2168 | /* Free the TX list buffers. */ | |||
| 2169 | for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { | |||
| 2170 | if (sc->rl_ldata.rl_txq[i].txq_mbuf != NULL((void *)0)) { | |||
| 2171 | bus_dmamap_unload(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc-> rl_ldata.rl_txq[i].txq_dmamap)) | |||
| 2172 | sc->rl_ldata.rl_txq[i].txq_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc-> rl_ldata.rl_txq[i].txq_dmamap)); | |||
| 2173 | m_freem(sc->rl_ldata.rl_txq[i].txq_mbuf); | |||
| 2174 | sc->rl_ldata.rl_txq[i].txq_mbuf = NULL((void *)0); | |||
| 2175 | } | |||
| 2176 | } | |||
| 2177 | ||||
| 2178 | /* Free the RX list buffers. */ | |||
| 2179 | for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { | |||
| 2180 | if (sc->rl_ldata.rl_rxsoft[i].rxs_mbuf != NULL((void *)0)) { | |||
| 2181 | bus_dmamap_unload(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc-> rl_ldata.rl_rxsoft[i].rxs_dmamap)) | |||
| 2182 | sc->rl_ldata.rl_rxsoft[i].rxs_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc-> rl_ldata.rl_rxsoft[i].rxs_dmamap)); | |||
| 2183 | m_freem(sc->rl_ldata.rl_rxsoft[i].rxs_mbuf); | |||
| 2184 | sc->rl_ldata.rl_rxsoft[i].rxs_mbuf = NULL((void *)0); | |||
| 2185 | } | |||
| 2186 | } | |||
| 2187 | } | |||
| 2188 | ||||
| 2189 | void | |||
| 2190 | re_setup_hw_im(struct rl_softc *sc) | |||
| 2191 | { | |||
| 2192 | KASSERT(sc->rl_flags & RL_FLAG_HWIM)((sc->rl_flags & 0x00000080) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/ic/re.c", 2192, "sc->rl_flags & RL_FLAG_HWIM" )); | |||
| 2193 | ||||
| 2194 | /* | |||
| 2195 | * Interrupt moderation | |||
| 2196 | * | |||
| 2197 | * 0xABCD | |||
| 2198 | * A - unknown (maybe TX related) | |||
| 2199 | * B - TX timer (unit: 25us) | |||
| 2200 | * C - unknown (maybe RX related) | |||
| 2201 | * D - RX timer (unit: 25us) | |||
| 2202 | * | |||
| 2203 | * | |||
| 2204 | * re(4)'s interrupt moderation is actually controlled by | |||
| 2205 | * two variables, like most other NICs (bge, bnx etc.) | |||
| 2206 | * o timer | |||
| 2207 | * o number of packets [P] | |||
| 2208 | * | |||
| 2209 | * The logic relationship between these two variables is | |||
| 2210 | * similar to other NICs too: | |||
| 2211 | * if (timer expire || packets > [P]) | |||
| 2212 | * Interrupt is delivered | |||
| 2213 | * | |||
| 2214 | * Currently we only know how to set 'timer', but not | |||
| 2215 | * 'number of packets', which should be ~30, as far as I | |||
| 2216 | * tested (sink ~900Kpps, interrupt rate is 30KHz) | |||
| 2217 | */ | |||
| 2218 | CSR_WRITE_2(sc, RL_IM,((sc->rl_btag)->write_2((sc->rl_bhandle), (0x00E2), ( ((sc->rl_rx_time) & 0xf) | (((sc->rl_tx_time) & 0xf) << 8) | 0x5050))) | |||
| 2219 | RL_IM_RXTIME(sc->rl_rx_time) |((sc->rl_btag)->write_2((sc->rl_bhandle), (0x00E2), ( ((sc->rl_rx_time) & 0xf) | (((sc->rl_tx_time) & 0xf) << 8) | 0x5050))) | |||
| 2220 | RL_IM_TXTIME(sc->rl_tx_time) |((sc->rl_btag)->write_2((sc->rl_bhandle), (0x00E2), ( ((sc->rl_rx_time) & 0xf) | (((sc->rl_tx_time) & 0xf) << 8) | 0x5050))) | |||
| 2221 | RL_IM_MAGIC)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x00E2), ( ((sc->rl_rx_time) & 0xf) | (((sc->rl_tx_time) & 0xf) << 8) | 0x5050))); | |||
| 2222 | } | |||
| 2223 | ||||
| 2224 | void | |||
| 2225 | re_disable_hw_im(struct rl_softc *sc) | |||
| 2226 | { | |||
| 2227 | if (sc->rl_flags & RL_FLAG_HWIM0x00000080) | |||
| 2228 | CSR_WRITE_2(sc, RL_IM, 0)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x00E2), ( 0))); | |||
| 2229 | } | |||
| 2230 | ||||
| 2231 | void | |||
| 2232 | re_setup_sim_im(struct rl_softc *sc) | |||
| 2233 | { | |||
| 2234 | if (sc->sc_hwrev == RL_HWREV_8139CPLUS0x74800000) | |||
| 2235 | CSR_WRITE_4(sc, RL_TIMERINT, 0x400)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0054), ( 0x400))); /* XXX */ | |||
| 2236 | else { | |||
| 2237 | u_int32_t nticks; | |||
| 2238 | ||||
| 2239 | /* | |||
| 2240 | * Datasheet says tick decreases at bus speed, | |||
| 2241 | * but it seems the clock runs a little bit | |||
| 2242 | * faster, so we do some compensation here. | |||
| 2243 | */ | |||
| 2244 | nticks = (sc->rl_sim_time * sc->rl_bus_speed * 8) / 5; | |||
| 2245 | CSR_WRITE_4(sc, RL_TIMERINT_8169, nticks)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0058), ( nticks))); | |||
| 2246 | } | |||
| 2247 | CSR_WRITE_4(sc, RL_TIMERCNT, 1)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0048), ( 1))); /* reload */ | |||
| 2248 | sc->rl_timerintr = 1; | |||
| 2249 | } | |||
| 2250 | ||||
| 2251 | void | |||
| 2252 | re_disable_sim_im(struct rl_softc *sc) | |||
| 2253 | { | |||
| 2254 | if (sc->sc_hwrev == RL_HWREV_8139CPLUS0x74800000) | |||
| 2255 | CSR_WRITE_4(sc, RL_TIMERINT, 0)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0054), ( 0))); | |||
| 2256 | else | |||
| 2257 | CSR_WRITE_4(sc, RL_TIMERINT_8169, 0)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0058), ( 0))); | |||
| 2258 | sc->rl_timerintr = 0; | |||
| 2259 | } | |||
| 2260 | ||||
| 2261 | void | |||
| 2262 | re_config_imtype(struct rl_softc *sc, int imtype) | |||
| 2263 | { | |||
| 2264 | switch (imtype) { | |||
| 2265 | case RL_IMTYPE_HW2: | |||
| 2266 | KASSERT(sc->rl_flags & RL_FLAG_HWIM)((sc->rl_flags & 0x00000080) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/ic/re.c", 2266, "sc->rl_flags & RL_FLAG_HWIM" )); | |||
| 2267 | /* FALLTHROUGH */ | |||
| 2268 | case RL_IMTYPE_NONE0: | |||
| 2269 | sc->rl_intrs = RL_INTRS_CPLUS(0x0001|0x0002|0x0008| 0x0010|0x0040| 0x8000|0x0004); | |||
| 2270 | sc->rl_rx_ack = RL_ISR_RX_OK0x0001 | RL_ISR_FIFO_OFLOW0x0040 | | |||
| 2271 | RL_ISR_RX_OVERRUN0x0010; | |||
| 2272 | sc->rl_tx_ack = RL_ISR_TX_OK0x0004; | |||
| 2273 | break; | |||
| 2274 | ||||
| 2275 | case RL_IMTYPE_SIM1: | |||
| 2276 | sc->rl_intrs = RL_INTRS_TIMER(0x0002|0x0008|0x8000| 0x4000); | |||
| 2277 | sc->rl_rx_ack = RL_ISR_TIMEOUT_EXPIRED0x4000; | |||
| 2278 | sc->rl_tx_ack = RL_ISR_TIMEOUT_EXPIRED0x4000; | |||
| 2279 | break; | |||
| 2280 | ||||
| 2281 | default: | |||
| 2282 | panic("%s: unknown imtype %d", | |||
| 2283 | sc->sc_dev.dv_xname, imtype); | |||
| 2284 | } | |||
| 2285 | } | |||
| 2286 | ||||
| 2287 | void | |||
| 2288 | re_set_jumbo(struct rl_softc *sc) | |||
| 2289 | { | |||
| 2290 | CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( (0x80|0x40)))); | |||
| 2291 | CSR_WRITE_1(sc, RL_CFG3, CSR_READ_1(sc, RL_CFG3) |((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0054), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0054))) | 0x04))) | |||
| 2292 | RL_CFG3_JUMBO_EN0)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0054), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0054))) | 0x04))); | |||
| 2293 | ||||
| 2294 | switch (sc->sc_hwrev) { | |||
| 2295 | case RL_HWREV_8168DP0x28800000: | |||
| 2296 | break; | |||
| 2297 | case RL_HWREV_8168E0x2C000000: | |||
| 2298 | CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) |((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0055), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0055))) | 0x01))) | |||
| 2299 | RL_CFG4_8168E_JUMBO_EN1)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0055), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0055))) | 0x01))); | |||
| 2300 | break; | |||
| 2301 | default: | |||
| 2302 | CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) |((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0055), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0055))) | 0x02))) | |||
| 2303 | RL_CFG4_JUMBO_EN1)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0055), ( ((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0055))) | 0x02))); | |||
| 2304 | break; | |||
| 2305 | } | |||
| 2306 | ||||
| 2307 | CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( 0x00))); | |||
| 2308 | } | |||
| 2309 | ||||
| 2310 | void | |||
| 2311 | re_setup_intr(struct rl_softc *sc, int enable_intrs, int imtype) | |||
| 2312 | { | |||
| 2313 | re_config_imtype(sc, imtype); | |||
| 2314 | ||||
| 2315 | if (enable_intrs) | |||
| 2316 | CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x003C), ( sc->rl_intrs))); | |||
| 2317 | else | |||
| 2318 | CSR_WRITE_2(sc, RL_IMR, 0)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x003C), ( 0))); | |||
| 2319 | ||||
| 2320 | switch (imtype) { | |||
| 2321 | case RL_IMTYPE_NONE0: | |||
| 2322 | re_disable_sim_im(sc); | |||
| 2323 | re_disable_hw_im(sc); | |||
| 2324 | break; | |||
| 2325 | ||||
| 2326 | case RL_IMTYPE_HW2: | |||
| 2327 | KASSERT(sc->rl_flags & RL_FLAG_HWIM)((sc->rl_flags & 0x00000080) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/ic/re.c", 2327, "sc->rl_flags & RL_FLAG_HWIM" )); | |||
| 2328 | re_disable_sim_im(sc); | |||
| 2329 | re_setup_hw_im(sc); | |||
| 2330 | break; | |||
| 2331 | ||||
| 2332 | case RL_IMTYPE_SIM1: | |||
| 2333 | re_disable_hw_im(sc); | |||
| 2334 | re_setup_sim_im(sc); | |||
| 2335 | break; | |||
| 2336 | ||||
| 2337 | default: | |||
| 2338 | panic("%s: unknown imtype %d", | |||
| 2339 | sc->sc_dev.dv_xname, imtype); | |||
| 2340 | } | |||
| 2341 | } | |||
| 2342 | ||||
| 2343 | #ifndef SMALL_KERNEL | |||
| 2344 | int | |||
| 2345 | re_wol(struct ifnet *ifp, int enable) | |||
| 2346 | { | |||
| 2347 | struct rl_softc *sc = ifp->if_softc; | |||
| 2348 | u_int8_t val; | |||
| 2349 | ||||
| 2350 | if (enable) { | |||
| 2351 | if ((CSR_READ_1(sc, sc->rl_cfg1)((sc->rl_btag)->read_1((sc->rl_bhandle), (sc->rl_cfg1 ))) & RL_CFG1_PME0x01) == 0) { | |||
| 2352 | printf("%s: power management is disabled, " | |||
| 2353 | "cannot do WOL\n", sc->sc_dev.dv_xname); | |||
| 2354 | return (ENOTSUP91); | |||
| 2355 | } | |||
| 2356 | if ((CSR_READ_1(sc, sc->rl_cfg2)((sc->rl_btag)->read_1((sc->rl_bhandle), (sc->rl_cfg2 ))) & RL_CFG2_AUXPWR0x10) == 0) | |||
| 2357 | printf("%s: no auxiliary power, cannot do WOL from D3 " | |||
| 2358 | "(power-off) state\n", sc->sc_dev.dv_xname); | |||
| 2359 | } | |||
| 2360 | ||||
| 2361 | re_iff(sc); | |||
| 2362 | ||||
| 2363 | /* Temporarily enable write to configuration registers. */ | |||
| 2364 | CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( (0x80|0x40)))); | |||
| 2365 | ||||
| 2366 | /* Always disable all wake events except magic packet. */ | |||
| 2367 | if (enable) { | |||
| 2368 | val = CSR_READ_1(sc, sc->rl_cfg5)((sc->rl_btag)->read_1((sc->rl_bhandle), (sc->rl_cfg5 ))); | |||
| 2369 | val &= ~(RL_CFG5_WOL_UCAST0x10 | RL_CFG5_WOL_MCAST0x20 | | |||
| 2370 | RL_CFG5_WOL_BCAST0x40); | |||
| 2371 | CSR_WRITE_1(sc, sc->rl_cfg5, val)((sc->rl_btag)->write_1((sc->rl_bhandle), (sc->rl_cfg5 ), (val))); | |||
| 2372 | ||||
| 2373 | val = CSR_READ_1(sc, sc->rl_cfg3)((sc->rl_btag)->read_1((sc->rl_bhandle), (sc->rl_cfg3 ))); | |||
| 2374 | val |= RL_CFG3_WOL_MAGIC0x20; | |||
| 2375 | val &= ~RL_CFG3_WOL_LINK0x10; | |||
| 2376 | CSR_WRITE_1(sc, sc->rl_cfg3, val)((sc->rl_btag)->write_1((sc->rl_bhandle), (sc->rl_cfg3 ), (val))); | |||
| 2377 | } else { | |||
| 2378 | val = CSR_READ_1(sc, sc->rl_cfg5)((sc->rl_btag)->read_1((sc->rl_bhandle), (sc->rl_cfg5 ))); | |||
| 2379 | val &= ~(RL_CFG5_WOL_UCAST0x10 | RL_CFG5_WOL_MCAST0x20 | | |||
| 2380 | RL_CFG5_WOL_BCAST0x40); | |||
| 2381 | CSR_WRITE_1(sc, sc->rl_cfg5, val)((sc->rl_btag)->write_1((sc->rl_bhandle), (sc->rl_cfg5 ), (val))); | |||
| 2382 | ||||
| 2383 | val = CSR_READ_1(sc, sc->rl_cfg3)((sc->rl_btag)->read_1((sc->rl_bhandle), (sc->rl_cfg3 ))); | |||
| 2384 | val &= ~(RL_CFG3_WOL_MAGIC0x20 | RL_CFG3_WOL_LINK0x10); | |||
| 2385 | CSR_WRITE_1(sc, sc->rl_cfg3, val)((sc->rl_btag)->write_1((sc->rl_bhandle), (sc->rl_cfg3 ), (val))); | |||
| 2386 | } | |||
| 2387 | ||||
| 2388 | CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), ( 0x00))); | |||
| 2389 | ||||
| 2390 | return (0); | |||
| 2391 | } | |||
| 2392 | #endif | |||
| 2393 | ||||
| 2394 | #if NKSTAT1 > 0 | |||
| 2395 | ||||
| 2396 | #define RE_DTCCR_CMD(1U << 3) (1U << 3) | |||
| 2397 | #define RE_DTCCR_LO0x10 0x10 | |||
| 2398 | #define RE_DTCCR_HI0x14 0x14 | |||
| 2399 | ||||
| 2400 | struct re_kstats { | |||
| 2401 | struct kstat_kv tx_ok; | |||
| 2402 | struct kstat_kv rx_ok; | |||
| 2403 | struct kstat_kv tx_er; | |||
| 2404 | struct kstat_kv rx_er; | |||
| 2405 | struct kstat_kv miss_pkt; | |||
| 2406 | struct kstat_kv fae; | |||
| 2407 | struct kstat_kv tx_1col; | |||
| 2408 | struct kstat_kv tx_mcol; | |||
| 2409 | struct kstat_kv rx_ok_phy; | |||
| 2410 | struct kstat_kv rx_ok_brd; | |||
| 2411 | struct kstat_kv rx_ok_mul; | |||
| 2412 | struct kstat_kv tx_abt; | |||
| 2413 | struct kstat_kv tx_undrn; | |||
| 2414 | }; | |||
| 2415 | ||||
| 2416 | static const struct re_kstats re_kstats_tpl = { | |||
| 2417 | .tx_ok = KSTAT_KV_UNIT_INITIALIZER("TxOk",{ .kv_key = ("TxOk"), .kv_type = (KSTAT_KV_T_COUNTER64), .kv_unit = (KSTAT_KV_U_PACKETS), } | |||
| 2418 | KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS){ .kv_key = ("TxOk"), .kv_type = (KSTAT_KV_T_COUNTER64), .kv_unit = (KSTAT_KV_U_PACKETS), }, | |||
| 2419 | .rx_ok = KSTAT_KV_UNIT_INITIALIZER("RxOk",{ .kv_key = ("RxOk"), .kv_type = (KSTAT_KV_T_COUNTER64), .kv_unit = (KSTAT_KV_U_PACKETS), } | |||
| 2420 | KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS){ .kv_key = ("RxOk"), .kv_type = (KSTAT_KV_T_COUNTER64), .kv_unit = (KSTAT_KV_U_PACKETS), }, | |||
| 2421 | .tx_er = KSTAT_KV_UNIT_INITIALIZER("TxEr",{ .kv_key = ("TxEr"), .kv_type = (KSTAT_KV_T_COUNTER64), .kv_unit = (KSTAT_KV_U_PACKETS), } | |||
| 2422 | KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS){ .kv_key = ("TxEr"), .kv_type = (KSTAT_KV_T_COUNTER64), .kv_unit = (KSTAT_KV_U_PACKETS), }, | |||
| 2423 | .rx_er = KSTAT_KV_UNIT_INITIALIZER("RxEr",{ .kv_key = ("RxEr"), .kv_type = (KSTAT_KV_T_COUNTER32), .kv_unit = (KSTAT_KV_U_PACKETS), } | |||
| 2424 | KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS){ .kv_key = ("RxEr"), .kv_type = (KSTAT_KV_T_COUNTER32), .kv_unit = (KSTAT_KV_U_PACKETS), }, | |||
| 2425 | .miss_pkt = KSTAT_KV_UNIT_INITIALIZER("MissPkt",{ .kv_key = ("MissPkt"), .kv_type = (KSTAT_KV_T_COUNTER16), . kv_unit = (KSTAT_KV_U_PACKETS), } | |||
| 2426 | KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS){ .kv_key = ("MissPkt"), .kv_type = (KSTAT_KV_T_COUNTER16), . kv_unit = (KSTAT_KV_U_PACKETS), }, | |||
| 2427 | .fae = KSTAT_KV_UNIT_INITIALIZER("FAE",{ .kv_key = ("FAE"), .kv_type = (KSTAT_KV_T_COUNTER16), .kv_unit = (KSTAT_KV_U_PACKETS), } | |||
| 2428 | KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS){ .kv_key = ("FAE"), .kv_type = (KSTAT_KV_T_COUNTER16), .kv_unit = (KSTAT_KV_U_PACKETS), }, | |||
| 2429 | .tx_1col = KSTAT_KV_UNIT_INITIALIZER("Tx1Col",{ .kv_key = ("Tx1Col"), .kv_type = (KSTAT_KV_T_COUNTER32), .kv_unit = (KSTAT_KV_U_PACKETS), } | |||
| 2430 | KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS){ .kv_key = ("Tx1Col"), .kv_type = (KSTAT_KV_T_COUNTER32), .kv_unit = (KSTAT_KV_U_PACKETS), }, | |||
| 2431 | .tx_mcol = KSTAT_KV_UNIT_INITIALIZER("TxMCol",{ .kv_key = ("TxMCol"), .kv_type = (KSTAT_KV_T_COUNTER32), .kv_unit = (KSTAT_KV_U_PACKETS), } | |||
| 2432 | KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS){ .kv_key = ("TxMCol"), .kv_type = (KSTAT_KV_T_COUNTER32), .kv_unit = (KSTAT_KV_U_PACKETS), }, | |||
| 2433 | .rx_ok_phy = KSTAT_KV_UNIT_INITIALIZER("RxOkPhy",{ .kv_key = ("RxOkPhy"), .kv_type = (KSTAT_KV_T_COUNTER64), . kv_unit = (KSTAT_KV_U_PACKETS), } | |||
| 2434 | KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS){ .kv_key = ("RxOkPhy"), .kv_type = (KSTAT_KV_T_COUNTER64), . kv_unit = (KSTAT_KV_U_PACKETS), }, | |||
| 2435 | .rx_ok_brd = KSTAT_KV_UNIT_INITIALIZER("RxOkBrd",{ .kv_key = ("RxOkBrd"), .kv_type = (KSTAT_KV_T_COUNTER64), . kv_unit = (KSTAT_KV_U_PACKETS), } | |||
| 2436 | KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS){ .kv_key = ("RxOkBrd"), .kv_type = (KSTAT_KV_T_COUNTER64), . kv_unit = (KSTAT_KV_U_PACKETS), }, | |||
| 2437 | .rx_ok_mul = KSTAT_KV_UNIT_INITIALIZER("RxOkMul",{ .kv_key = ("RxOkMul"), .kv_type = (KSTAT_KV_T_COUNTER32), . kv_unit = (KSTAT_KV_U_PACKETS), } | |||
| 2438 | KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS){ .kv_key = ("RxOkMul"), .kv_type = (KSTAT_KV_T_COUNTER32), . kv_unit = (KSTAT_KV_U_PACKETS), }, | |||
| 2439 | .tx_abt = KSTAT_KV_UNIT_INITIALIZER("TxAbt",{ .kv_key = ("TxAbt"), .kv_type = (KSTAT_KV_T_COUNTER16), .kv_unit = (KSTAT_KV_U_PACKETS), } | |||
| 2440 | KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS){ .kv_key = ("TxAbt"), .kv_type = (KSTAT_KV_T_COUNTER16), .kv_unit = (KSTAT_KV_U_PACKETS), }, | |||
| 2441 | .tx_undrn = KSTAT_KV_UNIT_INITIALIZER("TxUndrn",{ .kv_key = ("TxUndrn"), .kv_type = (KSTAT_KV_T_COUNTER16), . kv_unit = (KSTAT_KV_U_PACKETS), } | |||
| 2442 | KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS){ .kv_key = ("TxUndrn"), .kv_type = (KSTAT_KV_T_COUNTER16), . kv_unit = (KSTAT_KV_U_PACKETS), }, | |||
| 2443 | }; | |||
| 2444 | ||||
| 2445 | struct re_kstat_softc { | |||
| 2446 | struct re_stats *re_ks_sc_stats; | |||
| 2447 | ||||
| 2448 | bus_dmamap_t re_ks_sc_map; | |||
| 2449 | bus_dma_segment_t re_ks_sc_seg; | |||
| 2450 | int re_ks_sc_nsegs; | |||
| 2451 | ||||
| 2452 | struct rwlock re_ks_sc_rwl; | |||
| 2453 | }; | |||
| 2454 | ||||
| 2455 | static int | |||
| 2456 | re_kstat_read(struct kstat *ks) | |||
| 2457 | { | |||
| 2458 | struct rl_softc *sc = ks->ks_softc; | |||
| 2459 | struct re_kstat_softc *re_ks_sc = ks->ks_ptr; | |||
| 2460 | bus_dmamap_t map; | |||
| 2461 | uint64_t cmd; | |||
| 2462 | uint32_t reg; | |||
| 2463 | uint8_t command; | |||
| 2464 | int tmo; | |||
| 2465 | ||||
| 2466 | command = CSR_READ_1(sc, RL_COMMAND)((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0037))); | |||
| 2467 | if (!ISSET(command, RL_CMD_RX_ENB)((command) & (0x0008)) || command == 0xff) | |||
| 2468 | return (ENETDOWN50); | |||
| 2469 | ||||
| 2470 | map = re_ks_sc->re_ks_sc_map; | |||
| 2471 | cmd = map->dm_segs[0].ds_addr | RE_DTCCR_CMD(1U << 3); | |||
| 2472 | ||||
| 2473 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x01)) | |||
| 2474 | BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x01)); | |||
| 2475 | ||||
| 2476 | CSR_WRITE_4(sc, RE_DTCCR_HI, cmd >> 32)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x14), (cmd >> 32))); | |||
| 2477 | bus_space_barrier(sc->rl_btag, sc->rl_bhandle, RE_DTCCR_HI0x14, 8, | |||
| 2478 | BUS_SPACE_BARRIER_WRITE0x02); | |||
| 2479 | CSR_WRITE_4(sc, RE_DTCCR_LO, cmd)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x10), (cmd ))); | |||
| 2480 | bus_space_barrier(sc->rl_btag, sc->rl_bhandle, RE_DTCCR_LO0x10, 4, | |||
| 2481 | BUS_SPACE_BARRIER_READ0x01|BUS_SPACE_BARRIER_WRITE0x02); | |||
| 2482 | ||||
| 2483 | tmo = 1000; | |||
| 2484 | do { | |||
| 2485 | reg = CSR_READ_4(sc, RE_DTCCR_LO)((sc->rl_btag)->read_4((sc->rl_bhandle), (0x10))); | |||
| 2486 | if (!ISSET(reg, RE_DTCCR_CMD)((reg) & ((1U << 3)))) | |||
| 2487 | break; | |||
| 2488 | ||||
| 2489 | delay(10)(*delay_func)(10); | |||
| 2490 | bus_space_barrier(sc->rl_btag, sc->rl_bhandle, RE_DTCCR_LO0x10, 4, | |||
| 2491 | BUS_SPACE_BARRIER_READ0x01); | |||
| 2492 | } while (--tmo); | |||
| 2493 | ||||
| 2494 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x02)) | |||
| 2495 | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x02)); | |||
| 2496 | ||||
| 2497 | if (ISSET(reg, RE_DTCCR_CMD)((reg) & ((1U << 3)))) | |||
| 2498 | return (EIO5); | |||
| 2499 | ||||
| 2500 | nanouptime(&ks->ks_updated); | |||
| 2501 | ||||
| 2502 | return (0); | |||
| 2503 | } | |||
| 2504 | ||||
| 2505 | static int | |||
| 2506 | re_kstat_copy(struct kstat *ks, void *dst) | |||
| 2507 | { | |||
| 2508 | struct re_kstat_softc *re_ks_sc = ks->ks_ptr; | |||
| 2509 | struct re_stats *rs = re_ks_sc->re_ks_sc_stats; | |||
| 2510 | struct re_kstats *kvs = dst; | |||
| 2511 | ||||
| 2512 | *kvs = re_kstats_tpl; | |||
| 2513 | kstat_kv_u64(&kvs->tx_ok)(&kvs->tx_ok)->kv_v.v_u64 = lemtoh64(&rs->re_tx_ok)((__uint64_t)(*(__uint64_t *)(&rs->re_tx_ok))); | |||
| 2514 | kstat_kv_u64(&kvs->rx_ok)(&kvs->rx_ok)->kv_v.v_u64 = lemtoh64(&rs->re_rx_ok)((__uint64_t)(*(__uint64_t *)(&rs->re_rx_ok))); | |||
| 2515 | kstat_kv_u64(&kvs->tx_er)(&kvs->tx_er)->kv_v.v_u64 = lemtoh64(&rs->re_tx_er)((__uint64_t)(*(__uint64_t *)(&rs->re_tx_er))); | |||
| 2516 | kstat_kv_u32(&kvs->rx_er)(&kvs->rx_er)->kv_v.v_u32 = lemtoh32(&rs->re_rx_er)((__uint32_t)(*(__uint32_t *)(&rs->re_rx_er))); | |||
| 2517 | kstat_kv_u16(&kvs->miss_pkt)(&kvs->miss_pkt)->kv_v.v_u16 = lemtoh16(&rs->re_miss_pkt)((__uint16_t)(*(__uint16_t *)(&rs->re_miss_pkt))); | |||
| 2518 | kstat_kv_u16(&kvs->fae)(&kvs->fae)->kv_v.v_u16 = lemtoh16(&rs->re_fae)((__uint16_t)(*(__uint16_t *)(&rs->re_fae))); | |||
| 2519 | kstat_kv_u32(&kvs->tx_1col)(&kvs->tx_1col)->kv_v.v_u32 = lemtoh32(&rs->re_tx_1col)((__uint32_t)(*(__uint32_t *)(&rs->re_tx_1col))); | |||
| 2520 | kstat_kv_u32(&kvs->tx_mcol)(&kvs->tx_mcol)->kv_v.v_u32 = lemtoh32(&rs->re_tx_mcol)((__uint32_t)(*(__uint32_t *)(&rs->re_tx_mcol))); | |||
| 2521 | kstat_kv_u64(&kvs->rx_ok_phy)(&kvs->rx_ok_phy)->kv_v.v_u64 = lemtoh64(&rs->re_rx_ok_phy)((__uint64_t)(*(__uint64_t *)(&rs->re_rx_ok_phy))); | |||
| 2522 | kstat_kv_u64(&kvs->rx_ok_brd)(&kvs->rx_ok_brd)->kv_v.v_u64 = lemtoh64(&rs->re_rx_ok_brd)((__uint64_t)(*(__uint64_t *)(&rs->re_rx_ok_brd))); | |||
| 2523 | kstat_kv_u32(&kvs->rx_ok_mul)(&kvs->rx_ok_mul)->kv_v.v_u32 = lemtoh32(&rs->re_rx_ok_mul)((__uint32_t)(*(__uint32_t *)(&rs->re_rx_ok_mul))); | |||
| 2524 | kstat_kv_u16(&kvs->tx_abt)(&kvs->tx_abt)->kv_v.v_u16 = lemtoh16(&rs->re_tx_abt)((__uint16_t)(*(__uint16_t *)(&rs->re_tx_abt))); | |||
| 2525 | kstat_kv_u16(&kvs->tx_undrn)(&kvs->tx_undrn)->kv_v.v_u16 = lemtoh16(&rs->re_tx_undrn)((__uint16_t)(*(__uint16_t *)(&rs->re_tx_undrn))); | |||
| 2526 | ||||
| 2527 | return (0); | |||
| 2528 | } | |||
| 2529 | ||||
| 2530 | void | |||
| 2531 | re_kstat_attach(struct rl_softc *sc) | |||
| 2532 | { | |||
| 2533 | struct re_kstat_softc *re_ks_sc; | |||
| 2534 | struct kstat *ks; | |||
| 2535 | ||||
| 2536 | re_ks_sc = malloc(sizeof(*re_ks_sc), M_DEVBUF2, M_NOWAIT0x0002); | |||
| 2537 | if (re_ks_sc == NULL((void *)0)) { | |||
| 2538 | printf("%s: cannot allocate kstat softc\n", | |||
| 2539 | sc->sc_dev.dv_xname); | |||
| 2540 | return; | |||
| 2541 | } | |||
| 2542 | ||||
| 2543 | if (bus_dmamap_create(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sizeof (struct re_stats)), (1), (sizeof(struct re_stats)), (0), (0x0001 | 0x0002 | 0x2000), (&re_ks_sc->re_ks_sc_map)) | |||
| 2544 | sizeof(struct re_stats), 1, sizeof(struct re_stats), 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sizeof (struct re_stats)), (1), (sizeof(struct re_stats)), (0), (0x0001 | 0x0002 | 0x2000), (&re_ks_sc->re_ks_sc_map)) | |||
| 2545 | BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sizeof (struct re_stats)), (1), (sizeof(struct re_stats)), (0), (0x0001 | 0x0002 | 0x2000), (&re_ks_sc->re_ks_sc_map)) | |||
| 2546 | &re_ks_sc->re_ks_sc_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sizeof (struct re_stats)), (1), (sizeof(struct re_stats)), (0), (0x0001 | 0x0002 | 0x2000), (&re_ks_sc->re_ks_sc_map)) != 0) { | |||
| 2547 | printf("%s: cannot create counter dma memory map\n", | |||
| 2548 | sc->sc_dev.dv_xname); | |||
| 2549 | goto free; | |||
| 2550 | } | |||
| 2551 | ||||
| 2552 | if (bus_dmamem_alloc(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sizeof (struct re_stats)), (64), (0), (&re_ks_sc->re_ks_sc_seg ), (1), (&re_ks_sc->re_ks_sc_nsegs), (0x0001 | 0x1000) ) | |||
| 2553 | sizeof(struct re_stats), RE_STATS_ALIGNMENT, 0,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sizeof (struct re_stats)), (64), (0), (&re_ks_sc->re_ks_sc_seg ), (1), (&re_ks_sc->re_ks_sc_nsegs), (0x0001 | 0x1000) ) | |||
| 2554 | &re_ks_sc->re_ks_sc_seg, 1, &re_ks_sc->re_ks_sc_nsegs,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sizeof (struct re_stats)), (64), (0), (&re_ks_sc->re_ks_sc_seg ), (1), (&re_ks_sc->re_ks_sc_nsegs), (0x0001 | 0x1000) ) | |||
| 2555 | BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sizeof (struct re_stats)), (64), (0), (&re_ks_sc->re_ks_sc_seg ), (1), (&re_ks_sc->re_ks_sc_nsegs), (0x0001 | 0x1000) ) != 0) { | |||
| 2556 | printf("%s: cannot allocate counter dma memory\n", | |||
| 2557 | sc->sc_dev.dv_xname); | |||
| 2558 | goto destroy; | |||
| 2559 | } | |||
| 2560 | ||||
| 2561 | if (bus_dmamem_map(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&re_ks_sc ->re_ks_sc_seg), (re_ks_sc->re_ks_sc_nsegs), (sizeof(struct re_stats)), ((caddr_t *)&re_ks_sc->re_ks_sc_stats), ( 0x0001)) | |||
| 2562 | &re_ks_sc->re_ks_sc_seg, re_ks_sc->re_ks_sc_nsegs,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&re_ks_sc ->re_ks_sc_seg), (re_ks_sc->re_ks_sc_nsegs), (sizeof(struct re_stats)), ((caddr_t *)&re_ks_sc->re_ks_sc_stats), ( 0x0001)) | |||
| 2563 | sizeof(struct re_stats), (caddr_t *)&re_ks_sc->re_ks_sc_stats,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&re_ks_sc ->re_ks_sc_seg), (re_ks_sc->re_ks_sc_nsegs), (sizeof(struct re_stats)), ((caddr_t *)&re_ks_sc->re_ks_sc_stats), ( 0x0001)) | |||
| 2564 | BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&re_ks_sc ->re_ks_sc_seg), (re_ks_sc->re_ks_sc_nsegs), (sizeof(struct re_stats)), ((caddr_t *)&re_ks_sc->re_ks_sc_stats), ( 0x0001)) != 0) { | |||
| 2565 | printf("%s: cannot map counter dma memory\n", | |||
| 2566 | sc->sc_dev.dv_xname); | |||
| 2567 | goto freedma; | |||
| 2568 | } | |||
| 2569 | ||||
| 2570 | if (bus_dmamap_load(sc->sc_dmat, re_ks_sc->re_ks_sc_map,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (re_ks_sc ->re_ks_sc_map), ((caddr_t)re_ks_sc->re_ks_sc_stats), ( sizeof(struct re_stats)), (((void *)0)), (0x0001)) | |||
| 2571 | (caddr_t)re_ks_sc->re_ks_sc_stats, sizeof(struct re_stats),(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (re_ks_sc ->re_ks_sc_map), ((caddr_t)re_ks_sc->re_ks_sc_stats), ( sizeof(struct re_stats)), (((void *)0)), (0x0001)) | |||
| 2572 | NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (re_ks_sc ->re_ks_sc_map), ((caddr_t)re_ks_sc->re_ks_sc_stats), ( sizeof(struct re_stats)), (((void *)0)), (0x0001)) != 0) { | |||
| 2573 | printf("%s: cannot load counter dma memory\n", | |||
| 2574 | sc->sc_dev.dv_xname); | |||
| 2575 | goto unmap; | |||
| 2576 | } | |||
| 2577 | ||||
| 2578 | ks = kstat_create(sc->sc_dev.dv_xname, 0, "re-stats", 0, | |||
| 2579 | KSTAT_T_KV1, 0); | |||
| 2580 | if (ks == NULL((void *)0)) { | |||
| 2581 | printf("%s: cannot create re-stats kstat\n", | |||
| 2582 | sc->sc_dev.dv_xname); | |||
| 2583 | goto unload; | |||
| 2584 | } | |||
| 2585 | ||||
| 2586 | ks->ks_datalen = sizeof(re_kstats_tpl); | |||
| 2587 | ||||
| 2588 | rw_init(&re_ks_sc->re_ks_sc_rwl, "restats")_rw_init_flags(&re_ks_sc->re_ks_sc_rwl, "restats", 0, ( (void *)0)); | |||
| 2589 | kstat_set_wlock(ks, &re_ks_sc->re_ks_sc_rwl); | |||
| 2590 | ks->ks_softc = sc; | |||
| 2591 | ks->ks_ptr = re_ks_sc; | |||
| 2592 | ks->ks_read = re_kstat_read; | |||
| 2593 | ks->ks_copy = re_kstat_copy; | |||
| 2594 | ||||
| 2595 | kstat_install(ks); | |||
| 2596 | ||||
| 2597 | sc->rl_kstat = ks; | |||
| 2598 | ||||
| 2599 | return; | |||
| 2600 | ||||
| 2601 | unload: | |||
| 2602 | bus_dmamap_unload(sc->sc_dmat, re_ks_sc->re_ks_sc_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (re_ks_sc ->re_ks_sc_map)); | |||
| 2603 | unmap: | |||
| 2604 | bus_dmamem_unmap(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t )re_ks_sc->re_ks_sc_stats), (sizeof(struct re_stats))) | |||
| 2605 | (caddr_t)re_ks_sc->re_ks_sc_stats, sizeof(struct re_stats))(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t )re_ks_sc->re_ks_sc_stats), (sizeof(struct re_stats))); | |||
| 2606 | freedma: | |||
| 2607 | bus_dmamem_free(sc->sc_dmat, &re_ks_sc->re_ks_sc_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& re_ks_sc->re_ks_sc_seg), (1)); | |||
| 2608 | destroy: | |||
| 2609 | bus_dmamap_destroy(sc->sc_dmat, re_ks_sc->re_ks_sc_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (re_ks_sc ->re_ks_sc_map)); | |||
| 2610 | free: | |||
| 2611 | free(re_ks_sc, M_DEVBUF2, sizeof(*re_ks_sc)); | |||
| 2612 | } | |||
| 2613 | #endif /* NKSTAT > 0 */ |