| File: | dev/ic/gem.c |
| Warning: | line 1779, column 3 The right operand of '|' is a garbage value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* $OpenBSD: gem.c,v 1.128 2023/11/10 15:51:20 bluhm Exp $ */ | |||
| 2 | /* $NetBSD: gem.c,v 1.1 2001/09/16 00:11:43 eeh Exp $ */ | |||
| 3 | ||||
| 4 | /* | |||
| 5 | * | |||
| 6 | * Copyright (C) 2001 Eduardo Horvath. | |||
| 7 | * All rights reserved. | |||
| 8 | * | |||
| 9 | * | |||
| 10 | * Redistribution and use in source and binary forms, with or without | |||
| 11 | * modification, are permitted provided that the following conditions | |||
| 12 | * are met: | |||
| 13 | * 1. Redistributions of source code must retain the above copyright | |||
| 14 | * notice, this list of conditions and the following disclaimer. | |||
| 15 | * 2. Redistributions in binary form must reproduce the above copyright | |||
| 16 | * notice, this list of conditions and the following disclaimer in the | |||
| 17 | * documentation and/or other materials provided with the distribution. | |||
| 18 | * | |||
| 19 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND | |||
| 20 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||
| 21 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||
| 22 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE | |||
| 23 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||
| 24 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |||
| 25 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |||
| 26 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |||
| 27 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |||
| 28 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |||
| 29 | * SUCH DAMAGE. | |||
| 30 | * | |||
| 31 | */ | |||
| 32 | ||||
| 33 | /* | |||
| 34 | * Driver for Sun GEM ethernet controllers. | |||
| 35 | */ | |||
| 36 | ||||
| 37 | #include "bpfilter.h" | |||
| 38 | ||||
| 39 | #include <sys/param.h> | |||
| 40 | #include <sys/systm.h> | |||
| 41 | #include <sys/timeout.h> | |||
| 42 | #include <sys/mbuf.h> | |||
| 43 | #include <sys/syslog.h> | |||
| 44 | #include <sys/malloc.h> | |||
| 45 | #include <sys/kernel.h> | |||
| 46 | #include <sys/socket.h> | |||
| 47 | #include <sys/ioctl.h> | |||
| 48 | #include <sys/errno.h> | |||
| 49 | #include <sys/device.h> | |||
| 50 | #include <sys/endian.h> | |||
| 51 | #include <sys/atomic.h> | |||
| 52 | ||||
| 53 | #include <net/if.h> | |||
| 54 | #include <net/if_media.h> | |||
| 55 | ||||
| 56 | #include <netinet/in.h> | |||
| 57 | #include <netinet/if_ether.h> | |||
| 58 | ||||
| 59 | #if NBPFILTER1 > 0 | |||
| 60 | #include <net/bpf.h> | |||
| 61 | #endif | |||
| 62 | ||||
| 63 | #include <machine/bus.h> | |||
| 64 | #include <machine/intr.h> | |||
| 65 | ||||
| 66 | #include <dev/mii/mii.h> | |||
| 67 | #include <dev/mii/miivar.h> | |||
| 68 | ||||
| 69 | #include <dev/ic/gemreg.h> | |||
| 70 | #include <dev/ic/gemvar.h> | |||
| 71 | ||||
| 72 | #define TRIES10000 10000 | |||
| 73 | ||||
| 74 | struct cfdriver gem_cd = { | |||
| 75 | NULL((void *)0), "gem", DV_IFNET | |||
| 76 | }; | |||
| 77 | ||||
| 78 | void gem_start(struct ifqueue *); | |||
| 79 | void gem_stop(struct ifnet *, int); | |||
| 80 | int gem_ioctl(struct ifnet *, u_long, caddr_t); | |||
| 81 | void gem_tick(void *); | |||
| 82 | void gem_watchdog(struct ifnet *); | |||
| 83 | int gem_init(struct ifnet *); | |||
| 84 | void gem_init_regs(struct gem_softc *); | |||
| 85 | int gem_ringsize(int); | |||
| 86 | int gem_meminit(struct gem_softc *); | |||
| 87 | void gem_mifinit(struct gem_softc *); | |||
| 88 | int gem_bitwait(struct gem_softc *, bus_space_handle_t, int, | |||
| 89 | u_int32_t, u_int32_t); | |||
| 90 | void gem_reset(struct gem_softc *); | |||
| 91 | int gem_reset_rx(struct gem_softc *); | |||
| 92 | int gem_reset_tx(struct gem_softc *); | |||
| 93 | int gem_disable_rx(struct gem_softc *); | |||
| 94 | int gem_disable_tx(struct gem_softc *); | |||
| 95 | void gem_rx_watchdog(void *); | |||
| 96 | void gem_rxdrain(struct gem_softc *); | |||
| 97 | void gem_fill_rx_ring(struct gem_softc *); | |||
| 98 | int gem_add_rxbuf(struct gem_softc *, int idx); | |||
| 99 | int gem_load_mbuf(struct gem_softc *, struct gem_sxd *, | |||
| 100 | struct mbuf *); | |||
| 101 | void gem_iff(struct gem_softc *); | |||
| 102 | ||||
| 103 | /* MII methods & callbacks */ | |||
| 104 | int gem_mii_readreg(struct device *, int, int); | |||
| 105 | void gem_mii_writereg(struct device *, int, int, int); | |||
| 106 | void gem_mii_statchg(struct device *); | |||
| 107 | int gem_pcs_readreg(struct device *, int, int); | |||
| 108 | void gem_pcs_writereg(struct device *, int, int, int); | |||
| 109 | ||||
| 110 | int gem_mediachange(struct ifnet *); | |||
| 111 | void gem_mediastatus(struct ifnet *, struct ifmediareq *); | |||
| 112 | ||||
| 113 | int gem_eint(struct gem_softc *, u_int); | |||
| 114 | int gem_rint(struct gem_softc *); | |||
| 115 | int gem_tint(struct gem_softc *, u_int32_t); | |||
| 116 | int gem_pint(struct gem_softc *); | |||
| 117 | ||||
| 118 | #ifdef GEM_DEBUG | |||
| 119 | #define DPRINTF(sc, x) if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG0x4) \ | |||
| 120 | printf x | |||
| 121 | #else | |||
| 122 | #define DPRINTF(sc, x) /* nothing */ | |||
| 123 | #endif | |||
| 124 | ||||
| 125 | /* | |||
| 126 | * Attach a Gem interface to the system. | |||
| 127 | */ | |||
| 128 | void | |||
| 129 | gem_config(struct gem_softc *sc) | |||
| 130 | { | |||
| 131 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
| 132 | struct mii_data *mii = &sc->sc_mii; | |||
| 133 | struct mii_softc *child; | |||
| 134 | int i, error, mii_flags, phyad; | |||
| 135 | struct ifmedia_entry *ifm; | |||
| 136 | ||||
| 137 | /* Make sure the chip is stopped. */ | |||
| 138 | ifp->if_softc = sc; | |||
| 139 | gem_reset(sc); | |||
| 140 | ||||
| 141 | /* | |||
| 142 | * Allocate the control data structures, and create and load the | |||
| 143 | * DMA map for it. | |||
| 144 | */ | |||
| 145 | if ((error = bus_dmamem_alloc(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), ( sizeof(struct gem_control_data)), ((1 << 12)), (0), (& sc->sc_cdseg), (1), (&sc->sc_cdnseg), (0)) | |||
| 146 | sizeof(struct gem_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), ( sizeof(struct gem_control_data)), ((1 << 12)), (0), (& sc->sc_cdseg), (1), (&sc->sc_cdnseg), (0)) | |||
| 147 | 1, &sc->sc_cdnseg, 0)(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), ( sizeof(struct gem_control_data)), ((1 << 12)), (0), (& sc->sc_cdseg), (1), (&sc->sc_cdnseg), (0))) != 0) { | |||
| 148 | printf("\n%s: unable to allocate control data, error = %d\n", | |||
| 149 | sc->sc_dev.dv_xname, error); | |||
| 150 | goto fail_0; | |||
| 151 | } | |||
| 152 | ||||
| 153 | /* XXX should map this in with correct endianness */ | |||
| 154 | if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,(*(sc->sc_dmatag)->_dmamem_map)((sc->sc_dmatag), (& sc->sc_cdseg), (sc->sc_cdnseg), (sizeof(struct gem_control_data )), ((caddr_t *)&sc->sc_control_data), (0x0004)) | |||
| 155 | sizeof(struct gem_control_data), (caddr_t *)&sc->sc_control_data,(*(sc->sc_dmatag)->_dmamem_map)((sc->sc_dmatag), (& sc->sc_cdseg), (sc->sc_cdnseg), (sizeof(struct gem_control_data )), ((caddr_t *)&sc->sc_control_data), (0x0004)) | |||
| 156 | BUS_DMA_COHERENT)(*(sc->sc_dmatag)->_dmamem_map)((sc->sc_dmatag), (& sc->sc_cdseg), (sc->sc_cdnseg), (sizeof(struct gem_control_data )), ((caddr_t *)&sc->sc_control_data), (0x0004))) != 0) { | |||
| 157 | printf("\n%s: unable to map control data, error = %d\n", | |||
| 158 | sc->sc_dev.dv_xname, error); | |||
| 159 | goto fail_1; | |||
| 160 | } | |||
| 161 | ||||
| 162 | if ((error = bus_dmamap_create(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), ( sizeof(struct gem_control_data)), (1), (sizeof(struct gem_control_data )), (0), (0), (&sc->sc_cddmamap)) | |||
| 163 | sizeof(struct gem_control_data), 1,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), ( sizeof(struct gem_control_data)), (1), (sizeof(struct gem_control_data )), (0), (0), (&sc->sc_cddmamap)) | |||
| 164 | sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), ( sizeof(struct gem_control_data)), (1), (sizeof(struct gem_control_data )), (0), (0), (&sc->sc_cddmamap))) != 0) { | |||
| 165 | printf("\n%s: unable to create control data DMA map, " | |||
| 166 | "error = %d\n", sc->sc_dev.dv_xname, error); | |||
| 167 | goto fail_2; | |||
| 168 | } | |||
| 169 | ||||
| 170 | if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc ->sc_cddmamap), (sc->sc_control_data), (sizeof(struct gem_control_data )), (((void *)0)), (0)) | |||
| 171 | sc->sc_control_data, sizeof(struct gem_control_data), NULL,(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc ->sc_cddmamap), (sc->sc_control_data), (sizeof(struct gem_control_data )), (((void *)0)), (0)) | |||
| 172 | 0)(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc ->sc_cddmamap), (sc->sc_control_data), (sizeof(struct gem_control_data )), (((void *)0)), (0))) != 0) { | |||
| 173 | printf("\n%s: unable to load control data DMA map, error = %d\n", | |||
| 174 | sc->sc_dev.dv_xname, error); | |||
| 175 | goto fail_3; | |||
| 176 | } | |||
| 177 | ||||
| 178 | /* | |||
| 179 | * Create the receive buffer DMA maps. | |||
| 180 | */ | |||
| 181 | for (i = 0; i < GEM_NRXDESC128; i++) { | |||
| 182 | if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), ( (1 << 11)), (1), ((1 << 11)), (0), (0), (&sc-> sc_rxsoft[i].rxs_dmamap)) | |||
| 183 | MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), ( (1 << 11)), (1), ((1 << 11)), (0), (0), (&sc-> sc_rxsoft[i].rxs_dmamap))) != 0) { | |||
| 184 | printf("\n%s: unable to create rx DMA map %d, " | |||
| 185 | "error = %d\n", sc->sc_dev.dv_xname, i, error); | |||
| 186 | goto fail_5; | |||
| 187 | } | |||
| 188 | sc->sc_rxsoft[i].rxs_mbuf = NULL((void *)0); | |||
| 189 | } | |||
| 190 | /* | |||
| 191 | * Create the transmit buffer DMA maps. | |||
| 192 | */ | |||
| 193 | for (i = 0; i < GEM_NTXDESC(64 * 16); i++) { | |||
| 194 | if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), ( (1 << 11)), (16), ((1 << 11)), (0), (0x0001), (& sc->sc_txd[i].sd_map)) | |||
| 195 | GEM_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), ( (1 << 11)), (16), ((1 << 11)), (0), (0x0001), (& sc->sc_txd[i].sd_map)) | |||
| 196 | &sc->sc_txd[i].sd_map)(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), ( (1 << 11)), (16), ((1 << 11)), (0), (0x0001), (& sc->sc_txd[i].sd_map))) != 0) { | |||
| 197 | printf("\n%s: unable to create tx DMA map %d, " | |||
| 198 | "error = %d\n", sc->sc_dev.dv_xname, i, error); | |||
| 199 | goto fail_6; | |||
| 200 | } | |||
| 201 | sc->sc_txd[i].sd_mbuf = NULL((void *)0); | |||
| 202 | } | |||
| 203 | ||||
| 204 | /* | |||
| 205 | * From this point forward, the attachment cannot fail. A failure | |||
| 206 | * before this point releases all resources that may have been | |||
| 207 | * allocated. | |||
| 208 | */ | |||
| 209 | ||||
| 210 | /* Announce ourselves. */ | |||
| 211 | printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); | |||
| 212 | ||||
| 213 | /* Get RX FIFO size */ | |||
| 214 | sc->sc_rxfifosize = 64 * | |||
| 215 | bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_FIFO_SIZE)((sc->sc_bustag)->read_4((sc->sc_h1), (0x4120))); | |||
| 216 | ||||
| 217 | /* Initialize ifnet structure. */ | |||
| 218 | strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname); | |||
| 219 | ifp->if_softc = sc; | |||
| 220 | ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000; | |||
| 221 | ifp->if_xflags = IFXF_MPSAFE0x1; | |||
| 222 | ifp->if_qstart = gem_start; | |||
| 223 | ifp->if_ioctl = gem_ioctl; | |||
| 224 | ifp->if_watchdog = gem_watchdog; | |||
| 225 | ifq_init_maxlen(&ifp->if_snd, GEM_NTXDESC(64 * 16) - 1); | |||
| 226 | ||||
| 227 | ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010; | |||
| 228 | ||||
| 229 | /* Initialize ifmedia structures and MII info */ | |||
| 230 | mii->mii_ifp = ifp; | |||
| 231 | mii->mii_readreg = gem_mii_readreg; | |||
| 232 | mii->mii_writereg = gem_mii_writereg; | |||
| 233 | mii->mii_statchg = gem_mii_statchg; | |||
| 234 | ||||
| 235 | ifmedia_init(&mii->mii_media, 0, gem_mediachange, gem_mediastatus); | |||
| 236 | ||||
| 237 | /* Bad things will happen if we touch this register on ERI. */ | |||
| 238 | if (sc->sc_variant != GEM_SUN_ERI2) | |||
| 239 | bus_space_write_4(sc->sc_bustag, sc->sc_h1,((sc->sc_bustag)->write_4((sc->sc_h1), (0x9050), (0) )) | |||
| 240 | GEM_MII_DATAPATH_MODE, 0)((sc->sc_bustag)->write_4((sc->sc_h1), (0x9050), (0) )); | |||
| 241 | ||||
| 242 | gem_mifinit(sc); | |||
| 243 | ||||
| 244 | mii_flags = MIIF_DOPAUSE0x0100; | |||
| 245 | ||||
| 246 | /* | |||
| 247 | * Look for an external PHY. | |||
| 248 | */ | |||
| 249 | if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI10x00000200) { | |||
| 250 | sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL0x00000001; | |||
| 251 | bus_space_write_4(sc->sc_bustag, sc->sc_h1,((sc->sc_bustag)->write_4((sc->sc_h1), (0x6210), (sc ->sc_mif_config))) | |||
| 252 | GEM_MIF_CONFIG, sc->sc_mif_config)((sc->sc_bustag)->write_4((sc->sc_h1), (0x6210), (sc ->sc_mif_config))); | |||
| 253 | ||||
| 254 | switch (sc->sc_variant) { | |||
| 255 | case GEM_SUN_ERI2: | |||
| 256 | phyad = GEM_PHYAD_EXTERNAL0; | |||
| 257 | break; | |||
| 258 | default: | |||
| 259 | phyad = MII_PHY_ANY-1; | |||
| 260 | break; | |||
| 261 | } | |||
| 262 | ||||
| 263 | mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad, | |||
| 264 | MII_OFFSET_ANY-1, mii_flags); | |||
| 265 | } | |||
| 266 | ||||
| 267 | /* | |||
| 268 | * Fall back on an internal PHY if no external PHY was found. | |||
| 269 | * Note that with Apple (K2) GMACs GEM_MIF_CONFIG_MDI0 can't be | |||
| 270 | * trusted when the firmware has powered down the chip | |||
| 271 | */ | |||
| 272 | child = LIST_FIRST(&mii->mii_phys)((&mii->mii_phys)->lh_first); | |||
| 273 | if (child == NULL((void *)0) && | |||
| 274 | (sc->sc_mif_config & GEM_MIF_CONFIG_MDI00x00000100 || GEM_IS_APPLE(sc)((sc)->sc_variant == 3 || (sc)->sc_variant == 4))) { | |||
| 275 | sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL0x00000001; | |||
| 276 | bus_space_write_4(sc->sc_bustag, sc->sc_h1,((sc->sc_bustag)->write_4((sc->sc_h1), (0x6210), (sc ->sc_mif_config))) | |||
| 277 | GEM_MIF_CONFIG, sc->sc_mif_config)((sc->sc_bustag)->write_4((sc->sc_h1), (0x6210), (sc ->sc_mif_config))); | |||
| 278 | ||||
| 279 | switch (sc->sc_variant) { | |||
| 280 | case GEM_SUN_ERI2: | |||
| 281 | case GEM_APPLE_K2_GMAC4: | |||
| 282 | phyad = GEM_PHYAD_INTERNAL1; | |||
| 283 | break; | |||
| 284 | case GEM_APPLE_GMAC3: | |||
| 285 | phyad = GEM_PHYAD_EXTERNAL0; | |||
| 286 | break; | |||
| 287 | default: | |||
| 288 | phyad = MII_PHY_ANY-1; | |||
| 289 | break; | |||
| 290 | } | |||
| 291 | ||||
| 292 | mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad, | |||
| 293 | MII_OFFSET_ANY-1, mii_flags); | |||
| 294 | } | |||
| 295 | ||||
| 296 | /* | |||
| 297 | * Try the external PCS SERDES if we didn't find any MII | |||
| 298 | * devices. | |||
| 299 | */ | |||
| 300 | child = LIST_FIRST(&mii->mii_phys)((&mii->mii_phys)->lh_first); | |||
| 301 | if (child == NULL((void *)0) && sc->sc_variant != GEM_SUN_ERI2) { | |||
| 302 | bus_space_write_4(sc->sc_bustag, sc->sc_h1,((sc->sc_bustag)->write_4((sc->sc_h1), (0x9050), (0x00000002 ))) | |||
| 303 | GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_SERDES)((sc->sc_bustag)->write_4((sc->sc_h1), (0x9050), (0x00000002 ))); | |||
| 304 | ||||
| 305 | bus_space_write_4(sc->sc_bustag, sc->sc_h1,((sc->sc_bustag)->write_4((sc->sc_h1), (0x9054), (0x00000001 |0x00000002))) | |||
| 306 | GEM_MII_SLINK_CONTROL,((sc->sc_bustag)->write_4((sc->sc_h1), (0x9054), (0x00000001 |0x00000002))) | |||
| 307 | GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D)((sc->sc_bustag)->write_4((sc->sc_h1), (0x9054), (0x00000001 |0x00000002))); | |||
| 308 | ||||
| 309 | bus_space_write_4(sc->sc_bustag, sc->sc_h1,((sc->sc_bustag)->write_4((sc->sc_h1), (0x9010), (0x00000001 ))) | |||
| 310 | GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE)((sc->sc_bustag)->write_4((sc->sc_h1), (0x9010), (0x00000001 ))); | |||
| 311 | ||||
| 312 | mii->mii_readreg = gem_pcs_readreg; | |||
| 313 | mii->mii_writereg = gem_pcs_writereg; | |||
| 314 | ||||
| 315 | mii_flags |= MIIF_NOISOLATE0x0002; | |||
| 316 | ||||
| 317 | mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY-1, | |||
| 318 | MII_OFFSET_ANY-1, mii_flags); | |||
| 319 | } | |||
| 320 | ||||
| 321 | child = LIST_FIRST(&mii->mii_phys)((&mii->mii_phys)->lh_first); | |||
| 322 | if (child == NULL((void *)0)) { | |||
| 323 | /* No PHY attached */ | |||
| 324 | ifmedia_add(&sc->sc_mediasc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_MANUAL1ULL, 0, NULL((void *)0)); | |||
| 325 | ifmedia_set(&sc->sc_mediasc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_MANUAL1ULL); | |||
| 326 | } else { | |||
| 327 | /* | |||
| 328 | * XXX - we can really do the following ONLY if the | |||
| 329 | * phy indeed has the auto negotiation capability!! | |||
| 330 | */ | |||
| 331 | ifmedia_set(&sc->sc_mediasc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL); | |||
| 332 | } | |||
| 333 | ||||
| 334 | /* Check if we support GigE media. */ | |||
| 335 | mtx_enter(&ifmedia_mtx); | |||
| 336 | TAILQ_FOREACH(ifm, &sc->sc_media.ifm_list, ifm_list)for((ifm) = ((&sc->sc_mii.mii_media.ifm_list)->tqh_first ); (ifm) != ((void *)0); (ifm) = ((ifm)->ifm_list.tqe_next )) { | |||
| 337 | if (IFM_SUBTYPE(ifm->ifm_media)((ifm->ifm_media) & 0x00000000000000ffULL) == IFM_1000_T16 || | |||
| 338 | IFM_SUBTYPE(ifm->ifm_media)((ifm->ifm_media) & 0x00000000000000ffULL) == IFM_1000_SX11 || | |||
| 339 | IFM_SUBTYPE(ifm->ifm_media)((ifm->ifm_media) & 0x00000000000000ffULL) == IFM_1000_LX14 || | |||
| 340 | IFM_SUBTYPE(ifm->ifm_media)((ifm->ifm_media) & 0x00000000000000ffULL) == IFM_1000_CX15) { | |||
| 341 | sc->sc_flags |= GEM_GIGABIT0x0001; | |||
| 342 | break; | |||
| 343 | } | |||
| 344 | } | |||
| 345 | mtx_leave(&ifmedia_mtx); | |||
| 346 | ||||
| 347 | /* Attach the interface. */ | |||
| 348 | if_attach(ifp); | |||
| 349 | ether_ifattach(ifp); | |||
| 350 | ||||
| 351 | timeout_set(&sc->sc_tick_ch, gem_tick, sc); | |||
| 352 | timeout_set(&sc->sc_rx_watchdog, gem_rx_watchdog, sc); | |||
| 353 | return; | |||
| 354 | ||||
| 355 | /* | |||
| 356 | * Free any resources we've allocated during the failed attach | |||
| 357 | * attempt. Do this in reverse order and fall through. | |||
| 358 | */ | |||
| 359 | fail_6: | |||
| 360 | for (i = 0; i < GEM_NTXDESC(64 * 16); i++) { | |||
| 361 | if (sc->sc_txd[i].sd_map != NULL((void *)0)) | |||
| 362 | bus_dmamap_destroy(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_txd[i].sd_map)) | |||
| 363 | sc->sc_txd[i].sd_map)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_txd[i].sd_map)); | |||
| 364 | } | |||
| 365 | fail_5: | |||
| 366 | for (i = 0; i < GEM_NRXDESC128; i++) { | |||
| 367 | if (sc->sc_rxsoft[i].rxs_dmamap != NULL((void *)0)) | |||
| 368 | bus_dmamap_destroy(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_rxsoft[i].rxs_dmamap)) | |||
| 369 | sc->sc_rxsoft[i].rxs_dmamap)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_rxsoft[i].rxs_dmamap)); | |||
| 370 | } | |||
| 371 | bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), ( sc->sc_cddmamap)); | |||
| 372 | fail_3: | |||
| 373 | bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_cddmamap)); | |||
| 374 | fail_2: | |||
| 375 | bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,(*(sc->sc_dmatag)->_dmamem_unmap)((sc->sc_dmatag), ( (caddr_t)sc->sc_control_data), (sizeof(struct gem_control_data ))) | |||
| 376 | sizeof(struct gem_control_data))(*(sc->sc_dmatag)->_dmamem_unmap)((sc->sc_dmatag), ( (caddr_t)sc->sc_control_data), (sizeof(struct gem_control_data ))); | |||
| 377 | fail_1: | |||
| 378 | bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg)(*(sc->sc_dmatag)->_dmamem_free)((sc->sc_dmatag), (& sc->sc_cdseg), (sc->sc_cdnseg)); | |||
| 379 | fail_0: | |||
| 380 | return; | |||
| 381 | } | |||
| 382 | ||||
| 383 | void | |||
| 384 | gem_unconfig(struct gem_softc *sc) | |||
| 385 | { | |||
| 386 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
| 387 | int i; | |||
| 388 | ||||
| 389 | gem_stop(ifp, 1); | |||
| 390 | ||||
| 391 | for (i = 0; i < GEM_NTXDESC(64 * 16); i++) { | |||
| 392 | if (sc->sc_txd[i].sd_map != NULL((void *)0)) | |||
| 393 | bus_dmamap_destroy(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_txd[i].sd_map)) | |||
| 394 | sc->sc_txd[i].sd_map)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_txd[i].sd_map)); | |||
| 395 | } | |||
| 396 | for (i = 0; i < GEM_NRXDESC128; i++) { | |||
| 397 | if (sc->sc_rxsoft[i].rxs_dmamap != NULL((void *)0)) | |||
| 398 | bus_dmamap_destroy(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_rxsoft[i].rxs_dmamap)) | |||
| 399 | sc->sc_rxsoft[i].rxs_dmamap)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_rxsoft[i].rxs_dmamap)); | |||
| 400 | } | |||
| 401 | bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), ( sc->sc_cddmamap)); | |||
| 402 | bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_cddmamap)); | |||
| 403 | bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,(*(sc->sc_dmatag)->_dmamem_unmap)((sc->sc_dmatag), ( (caddr_t)sc->sc_control_data), (sizeof(struct gem_control_data ))) | |||
| 404 | sizeof(struct gem_control_data))(*(sc->sc_dmatag)->_dmamem_unmap)((sc->sc_dmatag), ( (caddr_t)sc->sc_control_data), (sizeof(struct gem_control_data ))); | |||
| 405 | bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg)(*(sc->sc_dmatag)->_dmamem_free)((sc->sc_dmatag), (& sc->sc_cdseg), (sc->sc_cdnseg)); | |||
| 406 | ||||
| 407 | /* Detach all PHYs */ | |||
| 408 | mii_detach(&sc->sc_mii, MII_PHY_ANY-1, MII_OFFSET_ANY-1); | |||
| 409 | ||||
| 410 | /* Delete all remaining media. */ | |||
| 411 | ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY((uint64_t) -1)); | |||
| 412 | ||||
| 413 | ether_ifdetach(ifp); | |||
| 414 | if_detach(ifp); | |||
| 415 | } | |||
| 416 | ||||
| 417 | ||||
| 418 | void | |||
| 419 | gem_tick(void *arg) | |||
| 420 | { | |||
| 421 | struct gem_softc *sc = arg; | |||
| 422 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
| 423 | bus_space_tag_t t = sc->sc_bustag; | |||
| 424 | bus_space_handle_t mac = sc->sc_h1; | |||
| 425 | int s; | |||
| 426 | u_int32_t v; | |||
| 427 | ||||
| 428 | s = splnet()splraise(0x4); | |||
| 429 | /* unload collisions counters */ | |||
| 430 | v = bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT)((t)->read_4((mac), (0x6108))) + | |||
| 431 | bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT)((t)->read_4((mac), (0x610c))); | |||
| 432 | ifp->if_collisionsif_data.ifi_collisions += v + | |||
| 433 | bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT)((t)->read_4((mac), (0x6100))) + | |||
| 434 | bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT)((t)->read_4((mac), (0x6104))); | |||
| 435 | ifp->if_oerrorsif_data.ifi_oerrors += v; | |||
| 436 | ||||
| 437 | /* read error counters */ | |||
| 438 | ifp->if_ierrorsif_data.ifi_ierrors += | |||
| 439 | bus_space_read_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT)((t)->read_4((mac), (0x611c))) + | |||
| 440 | bus_space_read_4(t, mac, GEM_MAC_RX_ALIGN_ERR)((t)->read_4((mac), (0x6120))) + | |||
| 441 | bus_space_read_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT)((t)->read_4((mac), (0x6124))) + | |||
| 442 | bus_space_read_4(t, mac, GEM_MAC_RX_CODE_VIOL)((t)->read_4((mac), (0x6128))); | |||
| 443 | ||||
| 444 | /* clear the hardware counters */ | |||
| 445 | bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0)((t)->write_4((mac), (0x6100), (0))); | |||
| 446 | bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0)((t)->write_4((mac), (0x6104), (0))); | |||
| 447 | bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0)((t)->write_4((mac), (0x6108), (0))); | |||
| 448 | bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0)((t)->write_4((mac), (0x610c), (0))); | |||
| 449 | bus_space_write_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT, 0)((t)->write_4((mac), (0x611c), (0))); | |||
| 450 | bus_space_write_4(t, mac, GEM_MAC_RX_ALIGN_ERR, 0)((t)->write_4((mac), (0x6120), (0))); | |||
| 451 | bus_space_write_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT, 0)((t)->write_4((mac), (0x6124), (0))); | |||
| 452 | bus_space_write_4(t, mac, GEM_MAC_RX_CODE_VIOL, 0)((t)->write_4((mac), (0x6128), (0))); | |||
| 453 | ||||
| 454 | /* | |||
| 455 | * If buffer allocation fails, the receive ring may become | |||
| 456 | * empty. There is no receive interrupt to recover from that. | |||
| 457 | */ | |||
| 458 | if (if_rxr_inuse(&sc->sc_rx_ring)((&sc->sc_rx_ring)->rxr_alive) == 0) { | |||
| 459 | gem_fill_rx_ring(sc); | |||
| 460 | bus_space_write_4(t, mac, GEM_RX_KICK, sc->sc_rx_prod)((t)->write_4((mac), (0x4100), (sc->sc_rx_prod))); | |||
| 461 | } | |||
| 462 | ||||
| 463 | mii_tick(&sc->sc_mii); | |||
| 464 | splx(s)spllower(s); | |||
| 465 | ||||
| 466 | timeout_add_sec(&sc->sc_tick_ch, 1); | |||
| 467 | } | |||
| 468 | ||||
| 469 | int | |||
| 470 | gem_bitwait(struct gem_softc *sc, bus_space_handle_t h, int r, | |||
| 471 | u_int32_t clr, u_int32_t set) | |||
| 472 | { | |||
| 473 | int i; | |||
| 474 | u_int32_t reg; | |||
| 475 | ||||
| 476 | for (i = TRIES10000; i--; DELAY(100)(*delay_func)(100)) { | |||
| 477 | reg = bus_space_read_4(sc->sc_bustag, h, r)((sc->sc_bustag)->read_4((h), (r))); | |||
| 478 | if ((reg & clr) == 0 && (reg & set) == set) | |||
| 479 | return (1); | |||
| 480 | } | |||
| 481 | ||||
| 482 | return (0); | |||
| 483 | } | |||
| 484 | ||||
| 485 | void | |||
| 486 | gem_reset(struct gem_softc *sc) | |||
| 487 | { | |||
| 488 | bus_space_tag_t t = sc->sc_bustag; | |||
| 489 | bus_space_handle_t h = sc->sc_h2; | |||
| 490 | int s; | |||
| 491 | ||||
| 492 | s = splnet()splraise(0x4); | |||
| 493 | DPRINTF(sc, ("%s: gem_reset\n", sc->sc_dev.dv_xname)); | |||
| 494 | gem_reset_rx(sc); | |||
| 495 | gem_reset_tx(sc); | |||
| 496 | ||||
| 497 | /* Do a full reset */ | |||
| 498 | bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX)((t)->write_4((h), (0x0010), (0x000000002|0x000000001))); | |||
| 499 | if (!gem_bitwait(sc, h, GEM_RESET0x0010, GEM_RESET_RX0x000000002 | GEM_RESET_TX0x000000001, 0)) | |||
| 500 | printf("%s: cannot reset device\n", sc->sc_dev.dv_xname); | |||
| 501 | splx(s)spllower(s); | |||
| 502 | } | |||
| 503 | ||||
| 504 | ||||
| 505 | /* | |||
| 506 | * Drain the receive queue. | |||
| 507 | */ | |||
| 508 | void | |||
| 509 | gem_rxdrain(struct gem_softc *sc) | |||
| 510 | { | |||
| 511 | struct gem_rxsoft *rxs; | |||
| 512 | int i; | |||
| 513 | ||||
| 514 | for (i = 0; i < GEM_NRXDESC128; i++) { | |||
| 515 | rxs = &sc->sc_rxsoft[i]; | |||
| 516 | if (rxs->rxs_mbuf != NULL((void *)0)) { | |||
| 517 | bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (rxs ->rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), ( 0x02)) | |||
| 518 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (rxs ->rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), ( 0x02)); | |||
| 519 | bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), ( rxs->rxs_dmamap)); | |||
| 520 | m_freem(rxs->rxs_mbuf); | |||
| 521 | rxs->rxs_mbuf = NULL((void *)0); | |||
| 522 | } | |||
| 523 | } | |||
| 524 | sc->sc_rx_prod = sc->sc_rx_cons = 0; | |||
| 525 | } | |||
| 526 | ||||
| 527 | /* | |||
| 528 | * Reset the whole thing. | |||
| 529 | */ | |||
| 530 | void | |||
| 531 | gem_stop(struct ifnet *ifp, int softonly) | |||
| 532 | { | |||
| 533 | struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; | |||
| 534 | struct gem_sxd *sd; | |||
| 535 | u_int32_t i; | |||
| 536 | ||||
| 537 | DPRINTF(sc, ("%s: gem_stop\n", sc->sc_dev.dv_xname)); | |||
| 538 | ||||
| 539 | timeout_del(&sc->sc_tick_ch); | |||
| 540 | ||||
| 541 | /* | |||
| 542 | * Mark the interface down and cancel the watchdog timer. | |||
| 543 | */ | |||
| 544 | ifp->if_flags &= ~IFF_RUNNING0x40; | |||
| 545 | ifq_clr_oactive(&ifp->if_snd); | |||
| 546 | ifp->if_timer = 0; | |||
| 547 | ||||
| 548 | if (!softonly) { | |||
| 549 | mii_down(&sc->sc_mii); | |||
| 550 | ||||
| 551 | gem_reset_rx(sc); | |||
| 552 | gem_reset_tx(sc); | |||
| 553 | } | |||
| 554 | ||||
| 555 | intr_barrier(sc->sc_ih); | |||
| 556 | ifq_barrier(&ifp->if_snd); | |||
| 557 | ||||
| 558 | KASSERT((ifp->if_flags & IFF_RUNNING) == 0)(((ifp->if_flags & 0x40) == 0) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/ic/gem.c", 558, "(ifp->if_flags & IFF_RUNNING) == 0" )); | |||
| 559 | ||||
| 560 | /* | |||
| 561 | * Release any queued transmit buffers. | |||
| 562 | */ | |||
| 563 | for (i = 0; i < GEM_NTXDESC(64 * 16); i++) { | |||
| 564 | sd = &sc->sc_txd[i]; | |||
| 565 | if (sd->sd_mbuf != NULL((void *)0)) { | |||
| 566 | bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sd ->sd_map), (0), (sd->sd_map->dm_mapsize), (0x08)) | |||
| 567 | sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sd ->sd_map), (0), (sd->sd_map->dm_mapsize), (0x08)); | |||
| 568 | bus_dmamap_unload(sc->sc_dmatag, sd->sd_map)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), ( sd->sd_map)); | |||
| 569 | m_freem(sd->sd_mbuf); | |||
| 570 | sd->sd_mbuf = NULL((void *)0); | |||
| 571 | } | |||
| 572 | } | |||
| 573 | sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0; | |||
| 574 | ||||
| 575 | gem_rxdrain(sc); | |||
| 576 | } | |||
| 577 | ||||
| 578 | ||||
| 579 | /* | |||
| 580 | * Reset the receiver | |||
| 581 | */ | |||
| 582 | int | |||
| 583 | gem_reset_rx(struct gem_softc *sc) | |||
| 584 | { | |||
| 585 | bus_space_tag_t t = sc->sc_bustag; | |||
| 586 | bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2; | |||
| 587 | ||||
| 588 | /* | |||
| 589 | * Resetting while DMA is in progress can cause a bus hang, so we | |||
| 590 | * disable DMA first. | |||
| 591 | */ | |||
| 592 | gem_disable_rx(sc); | |||
| 593 | bus_space_write_4(t, h, GEM_RX_CONFIG, 0)((t)->write_4((h), (0x4000), (0))); | |||
| 594 | /* Wait till it finishes */ | |||
| 595 | if (!gem_bitwait(sc, h, GEM_RX_CONFIG0x4000, 1, 0)) | |||
| 596 | printf("%s: cannot disable rx dma\n", sc->sc_dev.dv_xname); | |||
| 597 | /* Wait 5ms extra. */ | |||
| 598 | delay(5000)(*delay_func)(5000); | |||
| 599 | ||||
| 600 | /* Finally, reset the ERX */ | |||
| 601 | bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_RX)((t)->write_4((h2), (0x0010), (0x000000002))); | |||
| 602 | /* Wait till it finishes */ | |||
| 603 | if (!gem_bitwait(sc, h2, GEM_RESET0x0010, GEM_RESET_RX0x000000002, 0)) { | |||
| 604 | printf("%s: cannot reset receiver\n", sc->sc_dev.dv_xname); | |||
| 605 | return (1); | |||
| 606 | } | |||
| 607 | return (0); | |||
| 608 | } | |||
| 609 | ||||
| 610 | ||||
| 611 | /* | |||
| 612 | * Reset the transmitter | |||
| 613 | */ | |||
| 614 | int | |||
| 615 | gem_reset_tx(struct gem_softc *sc) | |||
| 616 | { | |||
| 617 | bus_space_tag_t t = sc->sc_bustag; | |||
| 618 | bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2; | |||
| 619 | ||||
| 620 | /* | |||
| 621 | * Resetting while DMA is in progress can cause a bus hang, so we | |||
| 622 | * disable DMA first. | |||
| 623 | */ | |||
| 624 | gem_disable_tx(sc); | |||
| 625 | bus_space_write_4(t, h, GEM_TX_CONFIG, 0)((t)->write_4((h), (0x2004), (0))); | |||
| 626 | /* Wait till it finishes */ | |||
| 627 | if (!gem_bitwait(sc, h, GEM_TX_CONFIG0x2004, 1, 0)) | |||
| 628 | printf("%s: cannot disable tx dma\n", sc->sc_dev.dv_xname); | |||
| 629 | /* Wait 5ms extra. */ | |||
| 630 | delay(5000)(*delay_func)(5000); | |||
| 631 | ||||
| 632 | /* Finally, reset the ETX */ | |||
| 633 | bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_TX)((t)->write_4((h2), (0x0010), (0x000000001))); | |||
| 634 | /* Wait till it finishes */ | |||
| 635 | if (!gem_bitwait(sc, h2, GEM_RESET0x0010, GEM_RESET_TX0x000000001, 0)) { | |||
| 636 | printf("%s: cannot reset transmitter\n", | |||
| 637 | sc->sc_dev.dv_xname); | |||
| 638 | return (1); | |||
| 639 | } | |||
| 640 | return (0); | |||
| 641 | } | |||
| 642 | ||||
| 643 | /* | |||
| 644 | * Disable receiver. | |||
| 645 | */ | |||
| 646 | int | |||
| 647 | gem_disable_rx(struct gem_softc *sc) | |||
| 648 | { | |||
| 649 | bus_space_tag_t t = sc->sc_bustag; | |||
| 650 | bus_space_handle_t h = sc->sc_h1; | |||
| 651 | u_int32_t cfg; | |||
| 652 | ||||
| 653 | /* Flip the enable bit */ | |||
| 654 | cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG)((t)->read_4((h), (0x6034))); | |||
| 655 | cfg &= ~GEM_MAC_RX_ENABLE0x00000001; | |||
| 656 | bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg)((t)->write_4((h), (0x6034), (cfg))); | |||
| 657 | ||||
| 658 | /* Wait for it to finish */ | |||
| 659 | return (gem_bitwait(sc, h, GEM_MAC_RX_CONFIG0x6034, GEM_MAC_RX_ENABLE0x00000001, 0)); | |||
| 660 | } | |||
| 661 | ||||
| 662 | /* | |||
| 663 | * Disable transmitter. | |||
| 664 | */ | |||
| 665 | int | |||
| 666 | gem_disable_tx(struct gem_softc *sc) | |||
| 667 | { | |||
| 668 | bus_space_tag_t t = sc->sc_bustag; | |||
| 669 | bus_space_handle_t h = sc->sc_h1; | |||
| 670 | u_int32_t cfg; | |||
| 671 | ||||
| 672 | /* Flip the enable bit */ | |||
| 673 | cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG)((t)->read_4((h), (0x6030))); | |||
| 674 | cfg &= ~GEM_MAC_TX_ENABLE0x00000001; | |||
| 675 | bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg)((t)->write_4((h), (0x6030), (cfg))); | |||
| 676 | ||||
| 677 | /* Wait for it to finish */ | |||
| 678 | return (gem_bitwait(sc, h, GEM_MAC_TX_CONFIG0x6030, GEM_MAC_TX_ENABLE0x00000001, 0)); | |||
| 679 | } | |||
| 680 | ||||
| 681 | /* | |||
| 682 | * Initialize interface. | |||
| 683 | */ | |||
| 684 | int | |||
| 685 | gem_meminit(struct gem_softc *sc) | |||
| 686 | { | |||
| 687 | int i; | |||
| 688 | ||||
| 689 | /* | |||
| 690 | * Initialize the transmit descriptor ring. | |||
| 691 | */ | |||
| 692 | for (i = 0; i < GEM_NTXDESC(64 * 16); i++) { | |||
| 693 | sc->sc_txdescssc_control_data->gcd_txdescs[i].gd_flags = 0; | |||
| 694 | sc->sc_txdescssc_control_data->gcd_txdescs[i].gd_addr = 0; | |||
| 695 | } | |||
| 696 | GEM_CDTXSYNC(sc, 0, GEM_NTXDESC,do { int __x, __n; __x = (0); __n = ((64 * 16)); if ((__x + __n ) > (64 * 16)) { (*((sc)->sc_dmatag)->_dmamap_sync)( ((sc)->sc_dmatag), ((sc)->sc_cddmamap), (__builtin_offsetof (struct gem_control_data, gcd_txdescs[(__x)])), (sizeof(struct gem_desc) * ((64 * 16) - __x)), ((0x01|0x04))); __n -= ((64 * 16) - __x); __x = 0; } (*((sc)->sc_dmatag)->_dmamap_sync )(((sc)->sc_dmatag), ((sc)->sc_cddmamap), (__builtin_offsetof (struct gem_control_data, gcd_txdescs[(__x)])), (sizeof(struct gem_desc) * __n), ((0x01|0x04))); } while (0) | |||
| 697 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)do { int __x, __n; __x = (0); __n = ((64 * 16)); if ((__x + __n ) > (64 * 16)) { (*((sc)->sc_dmatag)->_dmamap_sync)( ((sc)->sc_dmatag), ((sc)->sc_cddmamap), (__builtin_offsetof (struct gem_control_data, gcd_txdescs[(__x)])), (sizeof(struct gem_desc) * ((64 * 16) - __x)), ((0x01|0x04))); __n -= ((64 * 16) - __x); __x = 0; } (*((sc)->sc_dmatag)->_dmamap_sync )(((sc)->sc_dmatag), ((sc)->sc_cddmamap), (__builtin_offsetof (struct gem_control_data, gcd_txdescs[(__x)])), (sizeof(struct gem_desc) * __n), ((0x01|0x04))); } while (0); | |||
| 698 | ||||
| 699 | /* | |||
| 700 | * Initialize the receive descriptor and receive job | |||
| 701 | * descriptor rings. | |||
| 702 | */ | |||
| 703 | for (i = 0; i < GEM_NRXDESC128; i++) { | |||
| 704 | sc->sc_rxdescssc_control_data->gcd_rxdescs[i].gd_flags = 0; | |||
| 705 | sc->sc_rxdescssc_control_data->gcd_rxdescs[i].gd_addr = 0; | |||
| 706 | } | |||
| 707 | /* Hardware reads RX descriptors in multiples of four. */ | |||
| 708 | if_rxr_init(&sc->sc_rx_ring, 4, GEM_NRXDESC128 - 4); | |||
| 709 | gem_fill_rx_ring(sc); | |||
| 710 | ||||
| 711 | return (0); | |||
| 712 | } | |||
| 713 | ||||
| 714 | int | |||
| 715 | gem_ringsize(int sz) | |||
| 716 | { | |||
| 717 | switch (sz) { | |||
| 718 | case 32: | |||
| 719 | return GEM_RING_SZ_32(0<<1); | |||
| 720 | case 64: | |||
| 721 | return GEM_RING_SZ_64(1<<1); | |||
| 722 | case 128: | |||
| 723 | return GEM_RING_SZ_128(2<<1); | |||
| 724 | case 256: | |||
| 725 | return GEM_RING_SZ_256(3<<1); | |||
| 726 | case 512: | |||
| 727 | return GEM_RING_SZ_512(4<<1); | |||
| 728 | case 1024: | |||
| 729 | return GEM_RING_SZ_1024(5<<1); | |||
| 730 | case 2048: | |||
| 731 | return GEM_RING_SZ_2048(6<<1); | |||
| 732 | case 4096: | |||
| 733 | return GEM_RING_SZ_4096(7<<1); | |||
| 734 | case 8192: | |||
| 735 | return GEM_RING_SZ_8192(8<<1); | |||
| 736 | default: | |||
| 737 | printf("gem: invalid Receive Descriptor ring size %d\n", sz); | |||
| 738 | return GEM_RING_SZ_32(0<<1); | |||
| 739 | } | |||
| 740 | } | |||
| 741 | ||||
| 742 | /* | |||
| 743 | * Initialization of interface; set up initialization block | |||
| 744 | * and transmit/receive descriptor rings. | |||
| 745 | */ | |||
| 746 | int | |||
| 747 | gem_init(struct ifnet *ifp) | |||
| 748 | { | |||
| 749 | ||||
| 750 | struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; | |||
| 751 | bus_space_tag_t t = sc->sc_bustag; | |||
| 752 | bus_space_handle_t h = sc->sc_h1; | |||
| 753 | int s; | |||
| 754 | u_int32_t v; | |||
| 755 | ||||
| 756 | s = splnet()splraise(0x4); | |||
| 757 | ||||
| 758 | DPRINTF(sc, ("%s: gem_init: calling stop\n", sc->sc_dev.dv_xname)); | |||
| 759 | /* | |||
| 760 | * Initialization sequence. The numbered steps below correspond | |||
| 761 | * to the sequence outlined in section 6.3.5.1 in the Ethernet | |||
| 762 | * Channel Engine manual (part of the PCIO manual). | |||
| 763 | * See also the STP2002-STQ document from Sun Microsystems. | |||
| 764 | */ | |||
| 765 | ||||
| 766 | /* step 1 & 2. Reset the Ethernet Channel */ | |||
| 767 | gem_stop(ifp, 0); | |||
| 768 | gem_reset(sc); | |||
| 769 | DPRINTF(sc, ("%s: gem_init: restarting\n", sc->sc_dev.dv_xname)); | |||
| 770 | ||||
| 771 | /* Re-initialize the MIF */ | |||
| 772 | gem_mifinit(sc); | |||
| 773 | ||||
| 774 | /* Call MI reset function if any */ | |||
| 775 | if (sc->sc_hwreset) | |||
| 776 | (*sc->sc_hwreset)(sc); | |||
| 777 | ||||
| 778 | /* step 3. Setup data structures in host memory */ | |||
| 779 | gem_meminit(sc); | |||
| 780 | ||||
| 781 | /* step 4. TX MAC registers & counters */ | |||
| 782 | gem_init_regs(sc); | |||
| 783 | ||||
| 784 | /* step 5. RX MAC registers & counters */ | |||
| 785 | gem_iff(sc); | |||
| 786 | ||||
| 787 | /* step 6 & 7. Program Descriptor Ring Base Addresses */ | |||
| 788 | bus_space_write_4(t, h, GEM_TX_RING_PTR_HI,((t)->write_4((h), (0x200c), ((((uint64_t)((sc)->sc_cddmamap ->dm_segs[0].ds_addr + __builtin_offsetof(struct gem_control_data , gcd_txdescs[((0))]))) >> 32)))) | |||
| 789 | (((uint64_t)GEM_CDTXADDR(sc,0)) >> 32))((t)->write_4((h), (0x200c), ((((uint64_t)((sc)->sc_cddmamap ->dm_segs[0].ds_addr + __builtin_offsetof(struct gem_control_data , gcd_txdescs[((0))]))) >> 32)))); | |||
| 790 | bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0))((t)->write_4((h), (0x2008), (((sc)->sc_cddmamap->dm_segs [0].ds_addr + __builtin_offsetof(struct gem_control_data, gcd_txdescs [((0))]))))); | |||
| 791 | ||||
| 792 | bus_space_write_4(t, h, GEM_RX_RING_PTR_HI,((t)->write_4((h), (0x4008), ((((uint64_t)((sc)->sc_cddmamap ->dm_segs[0].ds_addr + __builtin_offsetof(struct gem_control_data , gcd_rxdescs[((0))]))) >> 32)))) | |||
| 793 | (((uint64_t)GEM_CDRXADDR(sc,0)) >> 32))((t)->write_4((h), (0x4008), ((((uint64_t)((sc)->sc_cddmamap ->dm_segs[0].ds_addr + __builtin_offsetof(struct gem_control_data , gcd_rxdescs[((0))]))) >> 32)))); | |||
| 794 | bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0))((t)->write_4((h), (0x4004), (((sc)->sc_cddmamap->dm_segs [0].ds_addr + __builtin_offsetof(struct gem_control_data, gcd_rxdescs [((0))]))))); | |||
| 795 | ||||
| 796 | /* step 8. Global Configuration & Interrupt Mask */ | |||
| 797 | bus_space_write_4(t, h, GEM_INTMASK,((t)->write_4((h), (0x0010), (~(0x000000001| 0x000000002| 0x000000010 |0x000000020| 0x000000040|0x000002000| 0x000010000|0x000020000 | 0x000040000)))) | |||
| 798 | ~(GEM_INTR_TX_INTME|((t)->write_4((h), (0x0010), (~(0x000000001| 0x000000002| 0x000000010 |0x000000020| 0x000000040|0x000002000| 0x000010000|0x000020000 | 0x000040000)))) | |||
| 799 | GEM_INTR_TX_EMPTY|((t)->write_4((h), (0x0010), (~(0x000000001| 0x000000002| 0x000000010 |0x000000020| 0x000000040|0x000002000| 0x000010000|0x000020000 | 0x000040000)))) | |||
| 800 | GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF|((t)->write_4((h), (0x0010), (~(0x000000001| 0x000000002| 0x000000010 |0x000000020| 0x000000040|0x000002000| 0x000010000|0x000020000 | 0x000040000)))) | |||
| 801 | GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS|((t)->write_4((h), (0x0010), (~(0x000000001| 0x000000002| 0x000000010 |0x000000020| 0x000000040|0x000002000| 0x000010000|0x000020000 | 0x000040000)))) | |||
| 802 | GEM_INTR_MAC_CONTROL|GEM_INTR_MIF|((t)->write_4((h), (0x0010), (~(0x000000001| 0x000000002| 0x000000010 |0x000000020| 0x000000040|0x000002000| 0x000010000|0x000020000 | 0x000040000)))) | |||
| 803 | GEM_INTR_BERR))((t)->write_4((h), (0x0010), (~(0x000000001| 0x000000002| 0x000000010 |0x000000020| 0x000000040|0x000002000| 0x000010000|0x000020000 | 0x000040000)))); | |||
| 804 | bus_space_write_4(t, h, GEM_MAC_RX_MASK,((t)->write_4((h), (0x6024), (0x00000001|0x00000004))) | |||
| 805 | GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT)((t)->write_4((h), (0x6024), (0x00000001|0x00000004))); | |||
| 806 | bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff)((t)->write_4((h), (0x6020), (0xffff))); /* XXXX */ | |||
| 807 | bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0)((t)->write_4((h), (0x6028), (0))); /* XXXX */ | |||
| 808 | ||||
| 809 | /* step 9. ETX Configuration: use mostly default values */ | |||
| 810 | ||||
| 811 | /* Enable DMA */ | |||
| 812 | v = gem_ringsize(GEM_NTXDESC(64 * 16) /*XXX*/); | |||
| 813 | v |= ((sc->sc_variant == GEM_SUN_ERI2 ? 0x100 : 0x04ff) << 10) & | |||
| 814 | GEM_TX_CONFIG_TXFIFO_TH0x001ffc00; | |||
| 815 | bus_space_write_4(t, h, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN)((t)->write_4((h), (0x2004), (v | 0x00000001))); | |||
| 816 | bus_space_write_4(t, h, GEM_TX_KICK, 0)((t)->write_4((h), (0x2000), (0))); | |||
| 817 | ||||
| 818 | /* step 10. ERX Configuration */ | |||
| 819 | ||||
| 820 | /* Encode Receive Descriptor ring size: four possible values */ | |||
| 821 | v = gem_ringsize(GEM_NRXDESC128 /*XXX*/); | |||
| 822 | /* Enable DMA */ | |||
| 823 | bus_space_write_4(t, h, GEM_RX_CONFIG,((t)->write_4((h), (0x4000), (v|(4<<24)| (2<<10 )|0x00000001| (0<<13)))) | |||
| 824 | v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|((t)->write_4((h), (0x4000), (v|(4<<24)| (2<<10 )|0x00000001| (0<<13)))) | |||
| 825 | (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN|((t)->write_4((h), (0x4000), (v|(4<<24)| (2<<10 )|0x00000001| (0<<13)))) | |||
| 826 | (0<<GEM_RX_CONFIG_CXM_START_SHFT))((t)->write_4((h), (0x4000), (v|(4<<24)| (2<<10 )|0x00000001| (0<<13)))); | |||
| 827 | /* | |||
| 828 | * The following value is for an OFF Threshold of about 3/4 full | |||
| 829 | * and an ON Threshold of 1/4 full. | |||
| 830 | */ | |||
| 831 | bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH,((t)->write_4((h), (0x4020), ((3 * sc->sc_rxfifosize / 256 ) | ((sc->sc_rxfifosize / 256) << 12)))) | |||
| 832 | (3 * sc->sc_rxfifosize / 256) |((t)->write_4((h), (0x4020), ((3 * sc->sc_rxfifosize / 256 ) | ((sc->sc_rxfifosize / 256) << 12)))) | |||
| 833 | ((sc->sc_rxfifosize / 256) << 12))((t)->write_4((h), (0x4020), ((3 * sc->sc_rxfifosize / 256 ) | ((sc->sc_rxfifosize / 256) << 12)))); | |||
| 834 | bus_space_write_4(t, h, GEM_RX_BLANKING, (6 << 12) | 6)((t)->write_4((h), (0x4108), ((6 << 12) | 6))); | |||
| 835 | ||||
| 836 | /* step 11. Configure Media */ | |||
| 837 | mii_mediachg(&sc->sc_mii); | |||
| 838 | ||||
| 839 | /* step 12. RX_MAC Configuration Register */ | |||
| 840 | v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG)((t)->read_4((h), (0x6034))); | |||
| 841 | v |= GEM_MAC_RX_ENABLE0x00000001 | GEM_MAC_RX_STRIP_CRC0x00000004; | |||
| 842 | bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v)((t)->write_4((h), (0x6034), (v))); | |||
| 843 | ||||
| 844 | /* step 14. Issue Transmit Pending command */ | |||
| 845 | ||||
| 846 | /* Call MI initialization function if any */ | |||
| 847 | if (sc->sc_hwinit) | |||
| 848 | (*sc->sc_hwinit)(sc); | |||
| 849 | ||||
| 850 | /* step 15. Give the receiver a swift kick */ | |||
| 851 | bus_space_write_4(t, h, GEM_RX_KICK, sc->sc_rx_prod)((t)->write_4((h), (0x4100), (sc->sc_rx_prod))); | |||
| 852 | ||||
| 853 | /* Start the one second timer. */ | |||
| 854 | timeout_add_sec(&sc->sc_tick_ch, 1); | |||
| 855 | ||||
| 856 | ifp->if_flags |= IFF_RUNNING0x40; | |||
| 857 | ifq_clr_oactive(&ifp->if_snd); | |||
| 858 | ||||
| 859 | splx(s)spllower(s); | |||
| 860 | ||||
| 861 | return (0); | |||
| 862 | } | |||
| 863 | ||||
| 864 | void | |||
| 865 | gem_init_regs(struct gem_softc *sc) | |||
| 866 | { | |||
| 867 | bus_space_tag_t t = sc->sc_bustag; | |||
| 868 | bus_space_handle_t h = sc->sc_h1; | |||
| 869 | u_int32_t v; | |||
| 870 | ||||
| 871 | /* These regs are not cleared on reset */ | |||
| 872 | sc->sc_inited = 0; | |||
| 873 | if (!sc->sc_inited) { | |||
| 874 | /* Load recommended values */ | |||
| 875 | bus_space_write_4(t, h, GEM_MAC_IPG0, 0x00)((t)->write_4((h), (0x6040), (0x00))); | |||
| 876 | bus_space_write_4(t, h, GEM_MAC_IPG1, 0x08)((t)->write_4((h), (0x6044), (0x08))); | |||
| 877 | bus_space_write_4(t, h, GEM_MAC_IPG2, 0x04)((t)->write_4((h), (0x6048), (0x04))); | |||
| 878 | ||||
| 879 | bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN)((t)->write_4((h), (0x6050), (64))); | |||
| 880 | /* Max frame and max burst size */ | |||
| 881 | bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,((t)->write_4((h), (0x6054), ((1518 + 4) | (0x2000 << 16)))) | |||
| 882 | (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16))((t)->write_4((h), (0x6054), ((1518 + 4) | (0x2000 << 16)))); | |||
| 883 | ||||
| 884 | bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x07)((t)->write_4((h), (0x6058), (0x07))); | |||
| 885 | bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x04)((t)->write_4((h), (0x605c), (0x04))); | |||
| 886 | bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10)((t)->write_4((h), (0x6060), (0x10))); | |||
| 887 | bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088)((t)->write_4((h), (0x6064), (0x8088))); | |||
| 888 | bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED,((t)->write_4((h), (0x6130), (((sc->sc_arpcom.ac_enaddr [5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff))) | |||
| 889 | ((sc->sc_arpcom.ac_enaddr[5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff)((t)->write_4((h), (0x6130), (((sc->sc_arpcom.ac_enaddr [5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff))); | |||
| 890 | ||||
| 891 | /* Secondary MAC addr set to 0:0:0:0:0:0 */ | |||
| 892 | bus_space_write_4(t, h, GEM_MAC_ADDR3, 0)((t)->write_4((h), (0x608c), (0))); | |||
| 893 | bus_space_write_4(t, h, GEM_MAC_ADDR4, 0)((t)->write_4((h), (0x6090), (0))); | |||
| 894 | bus_space_write_4(t, h, GEM_MAC_ADDR5, 0)((t)->write_4((h), (0x6094), (0))); | |||
| 895 | ||||
| 896 | /* MAC control addr set to 0:1:c2:0:1:80 */ | |||
| 897 | bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001)((t)->write_4((h), (0x6098), (0x0001))); | |||
| 898 | bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200)((t)->write_4((h), (0x609c), (0xc200))); | |||
| 899 | bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180)((t)->write_4((h), (0x60a0), (0x0180))); | |||
| 900 | ||||
| 901 | /* MAC filter addr set to 0:0:0:0:0:0 */ | |||
| 902 | bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0)((t)->write_4((h), (0x60a4), (0))); | |||
| 903 | bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0)((t)->write_4((h), (0x60a8), (0))); | |||
| 904 | bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0)((t)->write_4((h), (0x60ac), (0))); | |||
| 905 | ||||
| 906 | bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0)((t)->write_4((h), (0x60b0), (0))); | |||
| 907 | bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0)((t)->write_4((h), (0x60b4), (0))); | |||
| 908 | ||||
| 909 | sc->sc_inited = 1; | |||
| 910 | } | |||
| 911 | ||||
| 912 | /* Counters need to be zeroed */ | |||
| 913 | bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0)((t)->write_4((h), (0x6100), (0))); | |||
| 914 | bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0)((t)->write_4((h), (0x6104), (0))); | |||
| 915 | bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0)((t)->write_4((h), (0x6108), (0))); | |||
| 916 | bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0)((t)->write_4((h), (0x610c), (0))); | |||
| 917 | bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0)((t)->write_4((h), (0x6110), (0))); | |||
| 918 | bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0)((t)->write_4((h), (0x6114), (0))); | |||
| 919 | bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0)((t)->write_4((h), (0x6118), (0))); | |||
| 920 | bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0)((t)->write_4((h), (0x611c), (0))); | |||
| 921 | bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0)((t)->write_4((h), (0x6120), (0))); | |||
| 922 | bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0)((t)->write_4((h), (0x6124), (0))); | |||
| 923 | bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0)((t)->write_4((h), (0x6128), (0))); | |||
| 924 | ||||
| 925 | /* Set XOFF PAUSE time */ | |||
| 926 | bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1bf0)((t)->write_4((h), (0x6008), (0x1bf0))); | |||
| 927 | ||||
| 928 | /* | |||
| 929 | * Set the internal arbitration to "infinite" bursts of the | |||
| 930 | * maximum length of 31 * 64 bytes so DMA transfers aren't | |||
| 931 | * split up in cache line size chunks. This greatly improves | |||
| 932 | * especially RX performance. | |||
| 933 | * Enable silicon bug workarounds for the Apple variants. | |||
| 934 | */ | |||
| 935 | v = GEM_CONFIG_TXDMA_LIMIT0x00000003e | GEM_CONFIG_RXDMA_LIMIT0x0000007c0; | |||
| 936 | if (sc->sc_pci) | |||
| 937 | v |= GEM_CONFIG_BURST_INF0x000000001; | |||
| 938 | else | |||
| 939 | v |= GEM_CONFIG_BURST_640x000000000; | |||
| 940 | if (sc->sc_variant != GEM_SUN_GEM1 && sc->sc_variant != GEM_SUN_ERI2) | |||
| 941 | v |= GEM_CONFIG_RONPAULBIT0x000000800 | GEM_CONFIG_BUG2FIX0x000001000; | |||
| 942 | bus_space_write_4(t, h, GEM_CONFIG, v)((t)->write_4((h), (0x0004), (v))); | |||
| 943 | ||||
| 944 | /* | |||
| 945 | * Set the station address. | |||
| 946 | */ | |||
| 947 | bus_space_write_4(t, h, GEM_MAC_ADDR0,((t)->write_4((h), (0x6080), ((sc->sc_arpcom.ac_enaddr[ 4]<<8) | sc->sc_arpcom.ac_enaddr[5]))) | |||
| 948 | (sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5])((t)->write_4((h), (0x6080), ((sc->sc_arpcom.ac_enaddr[ 4]<<8) | sc->sc_arpcom.ac_enaddr[5]))); | |||
| 949 | bus_space_write_4(t, h, GEM_MAC_ADDR1,((t)->write_4((h), (0x6084), ((sc->sc_arpcom.ac_enaddr[ 2]<<8) | sc->sc_arpcom.ac_enaddr[3]))) | |||
| 950 | (sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3])((t)->write_4((h), (0x6084), ((sc->sc_arpcom.ac_enaddr[ 2]<<8) | sc->sc_arpcom.ac_enaddr[3]))); | |||
| 951 | bus_space_write_4(t, h, GEM_MAC_ADDR2,((t)->write_4((h), (0x6088), ((sc->sc_arpcom.ac_enaddr[ 0]<<8) | sc->sc_arpcom.ac_enaddr[1]))) | |||
| 952 | (sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1])((t)->write_4((h), (0x6088), ((sc->sc_arpcom.ac_enaddr[ 0]<<8) | sc->sc_arpcom.ac_enaddr[1]))); | |||
| 953 | } | |||
| 954 | ||||
| 955 | /* | |||
| 956 | * Receive interrupt. | |||
| 957 | */ | |||
| 958 | int | |||
| 959 | gem_rint(struct gem_softc *sc) | |||
| 960 | { | |||
| 961 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
| 962 | bus_space_tag_t t = sc->sc_bustag; | |||
| 963 | bus_space_handle_t h = sc->sc_h1; | |||
| 964 | struct gem_rxsoft *rxs; | |||
| 965 | struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 }; | |||
| 966 | struct mbuf *m; | |||
| 967 | u_int64_t rxstat; | |||
| 968 | int i, len; | |||
| 969 | ||||
| 970 | if (if_rxr_inuse(&sc->sc_rx_ring)((&sc->sc_rx_ring)->rxr_alive) == 0) | |||
| 971 | return (0); | |||
| 972 | ||||
| 973 | for (i = sc->sc_rx_cons; if_rxr_inuse(&sc->sc_rx_ring)((&sc->sc_rx_ring)->rxr_alive) > 0; | |||
| 974 | i = GEM_NEXTRX(i)((i + 1) & (128 - 1))) { | |||
| 975 | rxs = &sc->sc_rxsoft[i]; | |||
| 976 | ||||
| 977 | GEM_CDRXSYNC(sc, i,(*((sc)->sc_dmatag)->_dmamap_sync)(((sc)->sc_dmatag) , ((sc)->sc_cddmamap), (__builtin_offsetof(struct gem_control_data , gcd_rxdescs[((i))])), (sizeof(struct gem_desc)), ((0x02|0x08 ))) | |||
| 978 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*((sc)->sc_dmatag)->_dmamap_sync)(((sc)->sc_dmatag) , ((sc)->sc_cddmamap), (__builtin_offsetof(struct gem_control_data , gcd_rxdescs[((i))])), (sizeof(struct gem_desc)), ((0x02|0x08 ))); | |||
| 979 | ||||
| 980 | rxstat = GEM_DMA_READ(sc, &sc->sc_rxdescs[i].gd_flags)(((sc)->sc_pci) ? ((__uint64_t)(*(__uint64_t *)(&sc-> sc_control_data->gcd_rxdescs[i].gd_flags))) : (__uint64_t) (__builtin_constant_p(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) ? (__uint64_t)((((__uint64_t)( *(__uint64_t *)(&sc->sc_control_data->gcd_rxdescs[i ].gd_flags)) & 0xff) << 56) | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data->gcd_rxdescs[i].gd_flags)) & 0xff00ULL) << 40 | ((__uint64_t)(*(__uint64_t *) (&sc->sc_control_data->gcd_rxdescs[i].gd_flags)) & 0xff0000ULL) << 24 | ((__uint64_t)(*(__uint64_t *)(& sc->sc_control_data->gcd_rxdescs[i].gd_flags)) & 0xff000000ULL ) << 8 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) & 0xff00000000ULL) >> 8 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) & 0xff00000000000000ULL) >> 56) : __swap64md(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)))); | |||
| 981 | ||||
| 982 | if (rxstat & GEM_RD_OWN0x0000000080000000LL) { | |||
| 983 | /* We have processed all of the receive buffers. */ | |||
| 984 | break; | |||
| 985 | } | |||
| 986 | ||||
| 987 | bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (rxs ->rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), ( 0x02)) | |||
| 988 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (rxs ->rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), ( 0x02)); | |||
| 989 | bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), ( rxs->rxs_dmamap)); | |||
| 990 | ||||
| 991 | m = rxs->rxs_mbuf; | |||
| 992 | rxs->rxs_mbuf = NULL((void *)0); | |||
| 993 | ||||
| 994 | if_rxr_put(&sc->sc_rx_ring, 1)do { (&sc->sc_rx_ring)->rxr_alive -= (1); } while ( 0); | |||
| 995 | ||||
| 996 | if (rxstat & GEM_RD_BAD_CRC0x4000000000000000LL) { | |||
| 997 | ifp->if_ierrorsif_data.ifi_ierrors++; | |||
| 998 | #ifdef GEM_DEBUG | |||
| 999 | printf("%s: receive error: CRC error\n", | |||
| 1000 | sc->sc_dev.dv_xname); | |||
| 1001 | #endif | |||
| 1002 | m_freem(m); | |||
| 1003 | continue; | |||
| 1004 | } | |||
| 1005 | ||||
| 1006 | #ifdef GEM_DEBUG | |||
| 1007 | if (ifp->if_flags & IFF_DEBUG0x4) { | |||
| 1008 | printf(" rxsoft %p descriptor %d: ", rxs, i); | |||
| 1009 | printf("gd_flags: 0x%016llx\t", (long long) | |||
| 1010 | GEM_DMA_READ(sc, &sc->sc_rxdescs[i].gd_flags)(((sc)->sc_pci) ? ((__uint64_t)(*(__uint64_t *)(&sc-> sc_control_data->gcd_rxdescs[i].gd_flags))) : (__uint64_t) (__builtin_constant_p(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) ? (__uint64_t)((((__uint64_t)( *(__uint64_t *)(&sc->sc_control_data->gcd_rxdescs[i ].gd_flags)) & 0xff) << 56) | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data->gcd_rxdescs[i].gd_flags)) & 0xff00ULL) << 40 | ((__uint64_t)(*(__uint64_t *) (&sc->sc_control_data->gcd_rxdescs[i].gd_flags)) & 0xff0000ULL) << 24 | ((__uint64_t)(*(__uint64_t *)(& sc->sc_control_data->gcd_rxdescs[i].gd_flags)) & 0xff000000ULL ) << 8 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) & 0xff00000000ULL) >> 8 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) & 0xff00000000000000ULL) >> 56) : __swap64md(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags))))); | |||
| 1011 | printf("gd_addr: 0x%016llx\n", (long long) | |||
| 1012 | GEM_DMA_READ(sc, &sc->sc_rxdescs[i].gd_addr)(((sc)->sc_pci) ? ((__uint64_t)(*(__uint64_t *)(&sc-> sc_control_data->gcd_rxdescs[i].gd_addr))) : (__uint64_t)( __builtin_constant_p(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_addr)) ? (__uint64_t)((((__uint64_t)(* (__uint64_t *)(&sc->sc_control_data->gcd_rxdescs[i] .gd_addr)) & 0xff) << 56) | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data->gcd_rxdescs[i].gd_addr)) & 0xff00ULL) << 40 | ((__uint64_t)(*(__uint64_t *)(& sc->sc_control_data->gcd_rxdescs[i].gd_addr)) & 0xff0000ULL ) << 24 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_addr)) & 0xff000000ULL) << 8 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data-> gcd_rxdescs[i].gd_addr)) & 0xff00000000ULL) >> 8 | ( (__uint64_t)(*(__uint64_t *)(&sc->sc_control_data-> gcd_rxdescs[i].gd_addr)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data-> gcd_rxdescs[i].gd_addr)) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data-> gcd_rxdescs[i].gd_addr)) & 0xff00000000000000ULL) >> 56) : __swap64md(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_addr))))); | |||
| 1013 | } | |||
| 1014 | #endif | |||
| 1015 | ||||
| 1016 | /* No errors; receive the packet. */ | |||
| 1017 | len = GEM_RD_BUFLEN(rxstat)(((rxstat)&0x000000007fff0000LL)>>16); | |||
| 1018 | ||||
| 1019 | m->m_datam_hdr.mh_data += 2; /* We're already off by two */ | |||
| 1020 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len; | |||
| 1021 | ||||
| 1022 | ml_enqueue(&ml, m); | |||
| 1023 | } | |||
| 1024 | ||||
| 1025 | if (ifiq_input(&ifp->if_rcv, &ml)) | |||
| 1026 | if_rxr_livelocked(&sc->sc_rx_ring); | |||
| 1027 | ||||
| 1028 | /* Update the receive pointer. */ | |||
| 1029 | sc->sc_rx_cons = i; | |||
| 1030 | gem_fill_rx_ring(sc); | |||
| 1031 | bus_space_write_4(t, h, GEM_RX_KICK, sc->sc_rx_prod)((t)->write_4((h), (0x4100), (sc->sc_rx_prod))); | |||
| 1032 | ||||
| 1033 | DPRINTF(sc, ("gem_rint: done sc->sc_rx_cons %d, complete %d\n", | |||
| 1034 | sc->sc_rx_cons, bus_space_read_4(t, h, GEM_RX_COMPLETION))); | |||
| 1035 | ||||
| 1036 | return (1); | |||
| 1037 | } | |||
| 1038 | ||||
| 1039 | void | |||
| 1040 | gem_fill_rx_ring(struct gem_softc *sc) | |||
| 1041 | { | |||
| 1042 | u_int slots; | |||
| 1043 | ||||
| 1044 | for (slots = if_rxr_get(&sc->sc_rx_ring, GEM_NRXDESC128 - 4); | |||
| 1045 | slots > 0; slots--) { | |||
| 1046 | if (gem_add_rxbuf(sc, sc->sc_rx_prod)) | |||
| 1047 | break; | |||
| 1048 | } | |||
| 1049 | if_rxr_put(&sc->sc_rx_ring, slots)do { (&sc->sc_rx_ring)->rxr_alive -= (slots); } while (0); | |||
| 1050 | } | |||
| 1051 | ||||
| 1052 | /* | |||
| 1053 | * Add a receive buffer to the indicated descriptor. | |||
| 1054 | */ | |||
| 1055 | int | |||
| 1056 | gem_add_rxbuf(struct gem_softc *sc, int idx) | |||
| 1057 | { | |||
| 1058 | struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; | |||
| 1059 | struct mbuf *m; | |||
| 1060 | int error; | |||
| 1061 | ||||
| 1062 | m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES)m_clget((((void *)0)), (0x0002), ((1 << 11))); | |||
| 1063 | if (!m) | |||
| 1064 | return (ENOBUFS55); | |||
| 1065 | m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = MCLBYTES(1 << 11); | |||
| 1066 | ||||
| 1067 | #ifdef GEM_DEBUG | |||
| 1068 | /* bzero the packet to check dma */ | |||
| 1069 | memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size)__builtin_memset((m->M_dat.MH.MH_dat.MH_ext.ext_buf), (0), (m->M_dat.MH.MH_dat.MH_ext.ext_size)); | |||
| 1070 | #endif | |||
| 1071 | ||||
| 1072 | rxs->rxs_mbuf = m; | |||
| 1073 | ||||
| 1074 | error = bus_dmamap_load_mbuf(sc->sc_dmatag, rxs->rxs_dmamap, m,(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag ), (rxs->rxs_dmamap), (m), (0x0200|0x0001)) | |||
| 1075 | BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag ), (rxs->rxs_dmamap), (m), (0x0200|0x0001)); | |||
| 1076 | if (error) { | |||
| 1077 | printf("%s: can't load rx DMA map %d, error = %d\n", | |||
| 1078 | sc->sc_dev.dv_xname, idx, error); | |||
| 1079 | panic("gem_add_rxbuf"); /* XXX */ | |||
| 1080 | } | |||
| 1081 | ||||
| 1082 | bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (rxs ->rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), ( 0x01)) | |||
| 1083 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (rxs ->rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), ( 0x01)); | |||
| 1084 | ||||
| 1085 | GEM_INIT_RXDESC(sc, idx)do { struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(idx)]; struct gem_desc *__rxd = &sc->sc_control_data->gcd_rxdescs [(idx)]; struct mbuf *__m = __rxs->rxs_mbuf; ((((sc))-> sc_pci) ? (*(__uint64_t *)((&__rxd->gd_addr)) = ((__uint64_t )((__rxs->rxs_dmamap->dm_segs[0].ds_addr)))) : (*(__uint64_t *)((&__rxd->gd_addr)) = (__uint64_t)(__builtin_constant_p ((__rxs->rxs_dmamap->dm_segs[0].ds_addr)) ? (__uint64_t )((((__uint64_t)((__rxs->rxs_dmamap->dm_segs[0].ds_addr )) & 0xff) << 56) | ((__uint64_t)((__rxs->rxs_dmamap ->dm_segs[0].ds_addr)) & 0xff00ULL) << 40 | ((__uint64_t )((__rxs->rxs_dmamap->dm_segs[0].ds_addr)) & 0xff0000ULL ) << 24 | ((__uint64_t)((__rxs->rxs_dmamap->dm_segs [0].ds_addr)) & 0xff000000ULL) << 8 | ((__uint64_t) ((__rxs->rxs_dmamap->dm_segs[0].ds_addr)) & 0xff00000000ULL ) >> 8 | ((__uint64_t)((__rxs->rxs_dmamap->dm_segs [0].ds_addr)) & 0xff0000000000ULL) >> 24 | ((__uint64_t )((__rxs->rxs_dmamap->dm_segs[0].ds_addr)) & 0xff000000000000ULL ) >> 40 | ((__uint64_t)((__rxs->rxs_dmamap->dm_segs [0].ds_addr)) & 0xff00000000000000ULL) >> 56) : __swap64md ((__rxs->rxs_dmamap->dm_segs[0].ds_addr))))); ((((sc))-> sc_pci) ? (*(__uint64_t *)((&__rxd->gd_flags)) = ((__uint64_t )(((((__m->M_dat.MH.MH_dat.MH_ext.ext_size)<<16) & 0x000000007fff0000LL) | 0x0000000080000000LL)))) : (*(__uint64_t *)((&__rxd->gd_flags)) = (__uint64_t)(__builtin_constant_p (((((__m->M_dat.MH.MH_dat.MH_ext.ext_size)<<16) & 0x000000007fff0000LL) | 0x0000000080000000LL)) ? (__uint64_t )((((__uint64_t)(((((__m->M_dat.MH.MH_dat.MH_ext.ext_size) <<16) & 0x000000007fff0000LL) | 0x0000000080000000LL )) & 0xff) << 56) | ((__uint64_t)(((((__m->M_dat .MH.MH_dat.MH_ext.ext_size)<<16) & 0x000000007fff0000LL ) | 0x0000000080000000LL)) & 0xff00ULL) << 40 | ((__uint64_t )(((((__m->M_dat.MH.MH_dat.MH_ext.ext_size)<<16) & 0x000000007fff0000LL) | 0x0000000080000000LL)) & 0xff0000ULL ) << 24 | ((__uint64_t)(((((__m->M_dat.MH.MH_dat.MH_ext .ext_size)<<16) & 0x000000007fff0000LL) | 0x0000000080000000LL )) & 0xff000000ULL) << 8 | ((__uint64_t)(((((__m-> M_dat.MH.MH_dat.MH_ext.ext_size)<<16) & 0x000000007fff0000LL ) | 0x0000000080000000LL)) & 0xff00000000ULL) >> 8 | ((__uint64_t)(((((__m->M_dat.MH.MH_dat.MH_ext.ext_size)<< 16) & 0x000000007fff0000LL) | 0x0000000080000000LL)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(((((__m->M_dat .MH.MH_dat.MH_ext.ext_size)<<16) & 0x000000007fff0000LL ) | 0x0000000080000000LL)) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(((((__m->M_dat.MH.MH_dat.MH_ext.ext_size )<<16) & 0x000000007fff0000LL) | 0x0000000080000000LL )) & 0xff00000000000000ULL) >> 56) : __swap64md(((( (__m->M_dat.MH.MH_dat.MH_ext.ext_size)<<16) & 0x000000007fff0000LL ) | 0x0000000080000000LL))))); (*(((sc))->sc_dmatag)->_dmamap_sync )((((sc))->sc_dmatag), (((sc))->sc_cddmamap), (__builtin_offsetof (struct gem_control_data, gcd_rxdescs[(((idx)))])), (sizeof(struct gem_desc)), ((0x01|0x04))); } while (0); | |||
| 1086 | ||||
| 1087 | sc->sc_rx_prod = GEM_NEXTRX(sc->sc_rx_prod)((sc->sc_rx_prod + 1) & (128 - 1)); | |||
| 1088 | ||||
| 1089 | return (0); | |||
| 1090 | } | |||
| 1091 | ||||
| 1092 | int | |||
| 1093 | gem_eint(struct gem_softc *sc, u_int status) | |||
| 1094 | { | |||
| 1095 | if ((status & GEM_INTR_MIF0x000020000) != 0) { | |||
| 1096 | #ifdef GEM_DEBUG | |||
| 1097 | printf("%s: link status changed\n", sc->sc_dev.dv_xname); | |||
| 1098 | #endif | |||
| 1099 | return (1); | |||
| 1100 | } | |||
| 1101 | ||||
| 1102 | printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, GEM_INTR_BITS"\020" "\1INTME\2TXEMPTY\3TXDONE" "\5RXDONE\6RXNOBUF\7RX_TAG_ERR" "\16PCS\17TXMAC\20RXMAC" "\21MACCONTROL\22MIF\23BERR"); | |||
| 1103 | return (1); | |||
| 1104 | } | |||
| 1105 | ||||
| 1106 | int | |||
| 1107 | gem_pint(struct gem_softc *sc) | |||
| 1108 | { | |||
| 1109 | bus_space_tag_t t = sc->sc_bustag; | |||
| 1110 | bus_space_handle_t seb = sc->sc_h1; | |||
| 1111 | u_int32_t status; | |||
| 1112 | ||||
| 1113 | status = bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS)((t)->read_4((seb), (0x9018))); | |||
| 1114 | status |= bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS)((t)->read_4((seb), (0x9018))); | |||
| 1115 | #ifdef GEM_DEBUG | |||
| 1116 | if (status) | |||
| 1117 | printf("%s: link status changed\n", sc->sc_dev.dv_xname); | |||
| 1118 | #endif | |||
| 1119 | return (1); | |||
| 1120 | } | |||
| 1121 | ||||
| 1122 | int | |||
| 1123 | gem_intr(void *v) | |||
| 1124 | { | |||
| 1125 | struct gem_softc *sc = (struct gem_softc *)v; | |||
| 1126 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
| 1127 | bus_space_tag_t t = sc->sc_bustag; | |||
| 1128 | bus_space_handle_t seb = sc->sc_h1; | |||
| 1129 | u_int32_t status; | |||
| 1130 | int r = 0; | |||
| 1131 | ||||
| 1132 | status = bus_space_read_4(t, seb, GEM_STATUS)((t)->read_4((seb), (0x000c))); | |||
| 1133 | DPRINTF(sc, ("%s: gem_intr: cplt %xstatus %b\n", | |||
| 1134 | sc->sc_dev.dv_xname, (status>>19), status, GEM_INTR_BITS)); | |||
| 1135 | ||||
| 1136 | if (status == 0xffffffff) | |||
| 1137 | return (0); | |||
| 1138 | ||||
| 1139 | if ((status & GEM_INTR_PCS0x000002000) != 0) | |||
| 1140 | r |= gem_pint(sc); | |||
| 1141 | ||||
| 1142 | if ((status & (GEM_INTR_RX_TAG_ERR0x000000040 | GEM_INTR_BERR0x000040000)) != 0) | |||
| 1143 | r |= gem_eint(sc, status); | |||
| 1144 | ||||
| 1145 | if ((status & (GEM_INTR_TX_EMPTY0x000000002 | GEM_INTR_TX_INTME0x000000001)) != 0) | |||
| 1146 | r |= gem_tint(sc, status); | |||
| 1147 | ||||
| 1148 | if ((status & (GEM_INTR_RX_DONE0x000000010 | GEM_INTR_RX_NOBUF0x000000020)) != 0) | |||
| 1149 | r |= gem_rint(sc); | |||
| 1150 | ||||
| 1151 | /* We should eventually do more than just print out error stats. */ | |||
| 1152 | if (status & GEM_INTR_TX_MAC0x000004000) { | |||
| 1153 | int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS)((t)->read_4((seb), (0x6010))); | |||
| 1154 | #ifdef GEM_DEBUG | |||
| 1155 | if (txstat & ~GEM_MAC_TX_XMIT_DONE0x00000001) | |||
| 1156 | printf("%s: MAC tx fault, status %x\n", | |||
| 1157 | sc->sc_dev.dv_xname, txstat); | |||
| 1158 | #endif | |||
| 1159 | if (txstat & (GEM_MAC_TX_UNDERRUN0x00000002 | GEM_MAC_TX_PKT_TOO_LONG0x00000004)) { | |||
| 1160 | KERNEL_LOCK()_kernel_lock(); | |||
| 1161 | gem_init(ifp); | |||
| 1162 | KERNEL_UNLOCK()_kernel_unlock(); | |||
| 1163 | } | |||
| 1164 | } | |||
| 1165 | if (status & GEM_INTR_RX_MAC0x000008000) { | |||
| 1166 | int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS)((t)->read_4((seb), (0x6014))); | |||
| 1167 | #ifdef GEM_DEBUG | |||
| 1168 | if (rxstat & ~GEM_MAC_RX_DONE0x00000001) | |||
| 1169 | printf("%s: MAC rx fault, status %x\n", | |||
| 1170 | sc->sc_dev.dv_xname, rxstat); | |||
| 1171 | #endif | |||
| 1172 | if (rxstat & GEM_MAC_RX_OVERFLOW0x00000002) { | |||
| 1173 | ifp->if_ierrorsif_data.ifi_ierrors++; | |||
| 1174 | ||||
| 1175 | /* | |||
| 1176 | * Apparently a silicon bug causes ERI to hang | |||
| 1177 | * from time to time. So if we detect an RX | |||
| 1178 | * FIFO overflow, we fire off a timer, and | |||
| 1179 | * check whether we're still making progress | |||
| 1180 | * by looking at the RX FIFO write and read | |||
| 1181 | * pointers. | |||
| 1182 | */ | |||
| 1183 | sc->sc_rx_fifo_wr_ptr = | |||
| 1184 | bus_space_read_4(t, seb, GEM_RX_FIFO_WR_PTR)((t)->read_4((seb), (0x400c))); | |||
| 1185 | sc->sc_rx_fifo_rd_ptr = | |||
| 1186 | bus_space_read_4(t, seb, GEM_RX_FIFO_RD_PTR)((t)->read_4((seb), (0x4014))); | |||
| 1187 | timeout_add_msec(&sc->sc_rx_watchdog, 400); | |||
| 1188 | } | |||
| 1189 | #ifdef GEM_DEBUG | |||
| 1190 | else if (rxstat & ~(GEM_MAC_RX_DONE0x00000001 | GEM_MAC_RX_FRAME_CNT0x00000004)) | |||
| 1191 | printf("%s: MAC rx fault, status %x\n", | |||
| 1192 | sc->sc_dev.dv_xname, rxstat); | |||
| 1193 | #endif | |||
| 1194 | } | |||
| 1195 | return (r); | |||
| 1196 | } | |||
| 1197 | ||||
| 1198 | void | |||
| 1199 | gem_rx_watchdog(void *arg) | |||
| 1200 | { | |||
| 1201 | struct gem_softc *sc = arg; | |||
| 1202 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
| 1203 | bus_space_tag_t t = sc->sc_bustag; | |||
| 1204 | bus_space_handle_t h = sc->sc_h1; | |||
| 1205 | u_int32_t rx_fifo_wr_ptr; | |||
| 1206 | u_int32_t rx_fifo_rd_ptr; | |||
| 1207 | u_int32_t state; | |||
| 1208 | ||||
| 1209 | if ((ifp->if_flags & IFF_RUNNING0x40) == 0) | |||
| 1210 | return; | |||
| 1211 | ||||
| 1212 | rx_fifo_wr_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_WR_PTR)((t)->read_4((h), (0x400c))); | |||
| 1213 | rx_fifo_rd_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_RD_PTR)((t)->read_4((h), (0x4014))); | |||
| 1214 | state = bus_space_read_4(t, h, GEM_MAC_MAC_STATE)((t)->read_4((h), (0x6134))); | |||
| 1215 | if ((state & GEM_MAC_STATE_OVERFLOW0x03800000) == GEM_MAC_STATE_OVERFLOW0x03800000) { | |||
| 1216 | if ((rx_fifo_wr_ptr == rx_fifo_rd_ptr) || | |||
| 1217 | ((sc->sc_rx_fifo_wr_ptr == rx_fifo_wr_ptr) && | |||
| 1218 | (sc->sc_rx_fifo_rd_ptr == rx_fifo_rd_ptr))) { | |||
| 1219 | /* | |||
| 1220 | * The RX state machine is still in overflow state and | |||
| 1221 | * the RX FIFO write and read pointers seem to be | |||
| 1222 | * stuck. Whack the chip over the head to get things | |||
| 1223 | * going again. | |||
| 1224 | */ | |||
| 1225 | gem_init(ifp); | |||
| 1226 | } else { | |||
| 1227 | /* | |||
| 1228 | * We made some progress, but is not certain that the | |||
| 1229 | * overflow condition has been resolved. Check again. | |||
| 1230 | */ | |||
| 1231 | sc->sc_rx_fifo_wr_ptr = rx_fifo_wr_ptr; | |||
| 1232 | sc->sc_rx_fifo_rd_ptr = rx_fifo_rd_ptr; | |||
| 1233 | timeout_add_msec(&sc->sc_rx_watchdog, 400); | |||
| 1234 | } | |||
| 1235 | } | |||
| 1236 | } | |||
| 1237 | ||||
| 1238 | void | |||
| 1239 | gem_watchdog(struct ifnet *ifp) | |||
| 1240 | { | |||
| 1241 | struct gem_softc *sc = ifp->if_softc; | |||
| 1242 | ||||
| 1243 | DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " | |||
| 1244 | "GEM_MAC_RX_CONFIG %x\n", | |||
| 1245 | bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_CONFIG), | |||
| 1246 | bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_STATUS), | |||
| 1247 | bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_CONFIG))); | |||
| 1248 | ||||
| 1249 | log(LOG_ERR3, "%s: device timeout\n", sc->sc_dev.dv_xname); | |||
| 1250 | ++ifp->if_oerrorsif_data.ifi_oerrors; | |||
| 1251 | ||||
| 1252 | /* Try to get more packets going. */ | |||
| 1253 | gem_init(ifp); | |||
| 1254 | } | |||
| 1255 | ||||
| 1256 | /* | |||
| 1257 | * Initialize the MII Management Interface | |||
| 1258 | */ | |||
| 1259 | void | |||
| 1260 | gem_mifinit(struct gem_softc *sc) | |||
| 1261 | { | |||
| 1262 | bus_space_tag_t t = sc->sc_bustag; | |||
| 1263 | bus_space_handle_t mif = sc->sc_h1; | |||
| 1264 | ||||
| 1265 | /* Configure the MIF in frame mode */ | |||
| 1266 | sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG)((t)->read_4((mif), (0x6210))); | |||
| 1267 | sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA0x00000004; | |||
| 1268 | bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config)((t)->write_4((mif), (0x6210), (sc->sc_mif_config))); | |||
| 1269 | } | |||
| 1270 | ||||
| 1271 | /* | |||
| 1272 | * MII interface | |||
| 1273 | * | |||
| 1274 | * The GEM MII interface supports at least three different operating modes: | |||
| 1275 | * | |||
| 1276 | * Bitbang mode is implemented using data, clock and output enable registers. | |||
| 1277 | * | |||
| 1278 | * Frame mode is implemented by loading a complete frame into the frame | |||
| 1279 | * register and polling the valid bit for completion. | |||
| 1280 | * | |||
| 1281 | * Polling mode uses the frame register but completion is indicated by | |||
| 1282 | * an interrupt. | |||
| 1283 | * | |||
| 1284 | */ | |||
| 1285 | int | |||
| 1286 | gem_mii_readreg(struct device *self, int phy, int reg) | |||
| 1287 | { | |||
| 1288 | struct gem_softc *sc = (void *)self; | |||
| 1289 | bus_space_tag_t t = sc->sc_bustag; | |||
| 1290 | bus_space_handle_t mif = sc->sc_h1; | |||
| 1291 | int n; | |||
| 1292 | u_int32_t v; | |||
| 1293 | ||||
| 1294 | #ifdef GEM_DEBUG | |||
| 1295 | if (sc->sc_debug) | |||
| 1296 | printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); | |||
| 1297 | #endif | |||
| 1298 | ||||
| 1299 | /* Construct the frame command */ | |||
| 1300 | v = (reg << GEM_MIF_REG_SHIFT18) | (phy << GEM_MIF_PHY_SHIFT23) | | |||
| 1301 | GEM_MIF_FRAME_READ0x60020000; | |||
| 1302 | ||||
| 1303 | bus_space_write_4(t, mif, GEM_MIF_FRAME, v)((t)->write_4((mif), (0x620c), (v))); | |||
| 1304 | for (n = 0; n < 100; n++) { | |||
| 1305 | DELAY(1)(*delay_func)(1); | |||
| 1306 | v = bus_space_read_4(t, mif, GEM_MIF_FRAME)((t)->read_4((mif), (0x620c))); | |||
| 1307 | if (v & GEM_MIF_FRAME_TA00x00010000) | |||
| 1308 | return (v & GEM_MIF_FRAME_DATA0x0000ffff); | |||
| 1309 | } | |||
| 1310 | ||||
| 1311 | printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname); | |||
| 1312 | return (0); | |||
| 1313 | } | |||
| 1314 | ||||
| 1315 | void | |||
| 1316 | gem_mii_writereg(struct device *self, int phy, int reg, int val) | |||
| 1317 | { | |||
| 1318 | struct gem_softc *sc = (void *)self; | |||
| 1319 | bus_space_tag_t t = sc->sc_bustag; | |||
| 1320 | bus_space_handle_t mif = sc->sc_h1; | |||
| 1321 | int n; | |||
| 1322 | u_int32_t v; | |||
| 1323 | ||||
| 1324 | #ifdef GEM_DEBUG | |||
| 1325 | if (sc->sc_debug) | |||
| 1326 | printf("gem_mii_writereg: phy %d reg %d val %x\n", | |||
| 1327 | phy, reg, val); | |||
| 1328 | #endif | |||
| 1329 | ||||
| 1330 | /* Construct the frame command */ | |||
| 1331 | v = GEM_MIF_FRAME_WRITE0x50020000 | | |||
| 1332 | (phy << GEM_MIF_PHY_SHIFT23) | | |||
| 1333 | (reg << GEM_MIF_REG_SHIFT18) | | |||
| 1334 | (val & GEM_MIF_FRAME_DATA0x0000ffff); | |||
| 1335 | ||||
| 1336 | bus_space_write_4(t, mif, GEM_MIF_FRAME, v)((t)->write_4((mif), (0x620c), (v))); | |||
| 1337 | for (n = 0; n < 100; n++) { | |||
| 1338 | DELAY(1)(*delay_func)(1); | |||
| 1339 | v = bus_space_read_4(t, mif, GEM_MIF_FRAME)((t)->read_4((mif), (0x620c))); | |||
| 1340 | if (v & GEM_MIF_FRAME_TA00x00010000) | |||
| 1341 | return; | |||
| 1342 | } | |||
| 1343 | ||||
| 1344 | printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname); | |||
| 1345 | } | |||
| 1346 | ||||
| 1347 | void | |||
| 1348 | gem_mii_statchg(struct device *dev) | |||
| 1349 | { | |||
| 1350 | struct gem_softc *sc = (void *)dev; | |||
| 1351 | #ifdef GEM_DEBUG | |||
| 1352 | uint64_t instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media)(((sc->sc_mii.mii_media.ifm_cur->ifm_media) & 0xff00000000000000ULL ) >> 56); | |||
| 1353 | #endif | |||
| 1354 | bus_space_tag_t t = sc->sc_bustag; | |||
| 1355 | bus_space_handle_t mac = sc->sc_h1; | |||
| 1356 | u_int32_t v; | |||
| 1357 | ||||
| 1358 | #ifdef GEM_DEBUG | |||
| 1359 | if (sc->sc_debug) | |||
| 1360 | printf("gem_mii_statchg: status change: phy = %lld\n", instance); | |||
| 1361 | #endif | |||
| 1362 | ||||
| 1363 | /* Set tx full duplex options */ | |||
| 1364 | bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0)((t)->write_4((mac), (0x6030), (0))); | |||
| 1365 | delay(10000)(*delay_func)(10000); /* reg must be cleared and delay before changing. */ | |||
| 1366 | v = GEM_MAC_TX_ENA_IPG00x00000008|GEM_MAC_TX_NGU0x00000010|GEM_MAC_TX_NGU_LIMIT0x00000020| | |||
| 1367 | GEM_MAC_TX_ENABLE0x00000001; | |||
| 1368 | if ((IFM_OPTIONS(sc->sc_mii.mii_media_active)((sc->sc_mii.mii_media_active) & (0x00000000ffff0000ULL |0x00ffff0000000000ULL)) & IFM_FDX0x0000010000000000ULL) != 0) { | |||
| 1369 | v |= GEM_MAC_TX_IGN_CARRIER0x00000002|GEM_MAC_TX_IGN_COLLIS0x00000004; | |||
| 1370 | } | |||
| 1371 | bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v)((t)->write_4((mac), (0x6030), (v))); | |||
| 1372 | ||||
| 1373 | /* XIF Configuration */ | |||
| 1374 | v = GEM_MAC_XIF_TX_MII_ENA0x00000001; | |||
| 1375 | v |= GEM_MAC_XIF_LINK_LED0x00000020; | |||
| 1376 | ||||
| 1377 | /* External MII needs echo disable if half duplex. */ | |||
| 1378 | if ((IFM_OPTIONS(sc->sc_mii.mii_media_active)((sc->sc_mii.mii_media_active) & (0x00000000ffff0000ULL |0x00ffff0000000000ULL)) & IFM_FDX0x0000010000000000ULL) != 0) | |||
| 1379 | /* turn on full duplex LED */ | |||
| 1380 | v |= GEM_MAC_XIF_FDPLX_LED0x00000040; | |||
| 1381 | else | |||
| 1382 | /* half duplex -- disable echo */ | |||
| 1383 | v |= GEM_MAC_XIF_ECHO_DISABL0x00000004; | |||
| 1384 | ||||
| 1385 | switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)((sc->sc_mii.mii_media_active) & 0x00000000000000ffULL )) { | |||
| 1386 | case IFM_1000_T16: /* Gigabit using GMII interface */ | |||
| 1387 | case IFM_1000_SX11: | |||
| 1388 | v |= GEM_MAC_XIF_GMII_MODE0x00000008; | |||
| 1389 | break; | |||
| 1390 | default: | |||
| 1391 | v &= ~GEM_MAC_XIF_GMII_MODE0x00000008; | |||
| 1392 | } | |||
| 1393 | bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v)((t)->write_4((mac), (0x603c), (v))); | |||
| 1394 | ||||
| 1395 | /* | |||
| 1396 | * 802.3x flow control | |||
| 1397 | */ | |||
| 1398 | v = bus_space_read_4(t, mac, GEM_MAC_CONTROL_CONFIG)((t)->read_4((mac), (0x6038))); | |||
| 1399 | v &= ~(GEM_MAC_CC_RX_PAUSE0x00000002 | GEM_MAC_CC_TX_PAUSE0x00000001); | |||
| 1400 | if ((IFM_OPTIONS(sc->sc_mii.mii_media_active)((sc->sc_mii.mii_media_active) & (0x00000000ffff0000ULL |0x00ffff0000000000ULL)) & IFM_ETH_RXPAUSE0x0000000000020000ULL) != 0) | |||
| 1401 | v |= GEM_MAC_CC_RX_PAUSE0x00000002; | |||
| 1402 | if ((IFM_OPTIONS(sc->sc_mii.mii_media_active)((sc->sc_mii.mii_media_active) & (0x00000000ffff0000ULL |0x00ffff0000000000ULL)) & IFM_ETH_TXPAUSE0x0000000000040000ULL) != 0) | |||
| 1403 | v |= GEM_MAC_CC_TX_PAUSE0x00000001; | |||
| 1404 | bus_space_write_4(t, mac, GEM_MAC_CONTROL_CONFIG, v)((t)->write_4((mac), (0x6038), (v))); | |||
| 1405 | } | |||
| 1406 | ||||
| 1407 | int | |||
| 1408 | gem_pcs_readreg(struct device *self, int phy, int reg) | |||
| 1409 | { | |||
| 1410 | struct gem_softc *sc = (void *)self; | |||
| 1411 | bus_space_tag_t t = sc->sc_bustag; | |||
| 1412 | bus_space_handle_t pcs = sc->sc_h1; | |||
| 1413 | ||||
| 1414 | #ifdef GEM_DEBUG | |||
| 1415 | if (sc->sc_debug) | |||
| 1416 | printf("gem_pcs_readreg: phy %d reg %d\n", phy, reg); | |||
| 1417 | #endif | |||
| 1418 | ||||
| 1419 | if (phy != GEM_PHYAD_EXTERNAL0) | |||
| 1420 | return (0); | |||
| 1421 | ||||
| 1422 | switch (reg) { | |||
| 1423 | case MII_BMCR0x00: | |||
| 1424 | reg = GEM_MII_CONTROL0x9000; | |||
| 1425 | break; | |||
| 1426 | case MII_BMSR0x01: | |||
| 1427 | reg = GEM_MII_STATUS0x9004; | |||
| 1428 | break; | |||
| 1429 | case MII_ANAR0x04: | |||
| 1430 | reg = GEM_MII_ANAR0x9008; | |||
| 1431 | break; | |||
| 1432 | case MII_ANLPAR0x05: | |||
| 1433 | reg = GEM_MII_ANLPAR0x900c; | |||
| 1434 | break; | |||
| 1435 | case MII_EXTSR0x0f: | |||
| 1436 | return (EXTSR_1000XFDX0x8000|EXTSR_1000XHDX0x4000); | |||
| 1437 | default: | |||
| 1438 | return (0); | |||
| 1439 | } | |||
| 1440 | ||||
| 1441 | return bus_space_read_4(t, pcs, reg)((t)->read_4((pcs), (reg))); | |||
| 1442 | } | |||
| 1443 | ||||
| 1444 | void | |||
| 1445 | gem_pcs_writereg(struct device *self, int phy, int reg, int val) | |||
| 1446 | { | |||
| 1447 | struct gem_softc *sc = (void *)self; | |||
| 1448 | bus_space_tag_t t = sc->sc_bustag; | |||
| 1449 | bus_space_handle_t pcs = sc->sc_h1; | |||
| 1450 | int reset = 0; | |||
| 1451 | ||||
| 1452 | #ifdef GEM_DEBUG | |||
| 1453 | if (sc->sc_debug) | |||
| 1454 | printf("gem_pcs_writereg: phy %d reg %d val %x\n", | |||
| 1455 | phy, reg, val); | |||
| 1456 | #endif | |||
| 1457 | ||||
| 1458 | if (phy != GEM_PHYAD_EXTERNAL0) | |||
| 1459 | return; | |||
| 1460 | ||||
| 1461 | if (reg == MII_ANAR0x04) | |||
| 1462 | bus_space_write_4(t, pcs, GEM_MII_CONFIG, 0)((t)->write_4((pcs), (0x9010), (0))); | |||
| 1463 | ||||
| 1464 | switch (reg) { | |||
| 1465 | case MII_BMCR0x00: | |||
| 1466 | reset = (val & GEM_MII_CONTROL_RESET0x00008000); | |||
| 1467 | reg = GEM_MII_CONTROL0x9000; | |||
| 1468 | break; | |||
| 1469 | case MII_BMSR0x01: | |||
| 1470 | reg = GEM_MII_STATUS0x9004; | |||
| 1471 | break; | |||
| 1472 | case MII_ANAR0x04: | |||
| 1473 | reg = GEM_MII_ANAR0x9008; | |||
| 1474 | break; | |||
| 1475 | case MII_ANLPAR0x05: | |||
| 1476 | reg = GEM_MII_ANLPAR0x900c; | |||
| 1477 | break; | |||
| 1478 | default: | |||
| 1479 | return; | |||
| 1480 | } | |||
| 1481 | ||||
| 1482 | bus_space_write_4(t, pcs, reg, val)((t)->write_4((pcs), (reg), (val))); | |||
| 1483 | ||||
| 1484 | if (reset) | |||
| 1485 | gem_bitwait(sc, pcs, GEM_MII_CONTROL0x9000, GEM_MII_CONTROL_RESET0x00008000, 0); | |||
| 1486 | ||||
| 1487 | if (reg == GEM_MII_ANAR0x9008 || reset) { | |||
| 1488 | bus_space_write_4(t, pcs, GEM_MII_SLINK_CONTROL,((t)->write_4((pcs), (0x9054), (0x00000001|0x00000002))) | |||
| 1489 | GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D)((t)->write_4((pcs), (0x9054), (0x00000001|0x00000002))); | |||
| 1490 | bus_space_write_4(t, pcs, GEM_MII_CONFIG,((t)->write_4((pcs), (0x9010), (0x00000001))) | |||
| 1491 | GEM_MII_CONFIG_ENABLE)((t)->write_4((pcs), (0x9010), (0x00000001))); | |||
| 1492 | } | |||
| 1493 | } | |||
| 1494 | ||||
| 1495 | int | |||
| 1496 | gem_mediachange(struct ifnet *ifp) | |||
| 1497 | { | |||
| 1498 | struct gem_softc *sc = ifp->if_softc; | |||
| 1499 | struct mii_data *mii = &sc->sc_mii; | |||
| 1500 | ||||
| 1501 | if (mii->mii_instance) { | |||
| 1502 | struct mii_softc *miisc; | |||
| 1503 | LIST_FOREACH(miisc, &mii->mii_phys, mii_list)for((miisc) = ((&mii->mii_phys)->lh_first); (miisc) != ((void *)0); (miisc) = ((miisc)->mii_list.le_next)) | |||
| 1504 | mii_phy_reset(miisc); | |||
| 1505 | } | |||
| 1506 | ||||
| 1507 | return (mii_mediachg(&sc->sc_mii)); | |||
| 1508 | } | |||
| 1509 | ||||
| 1510 | void | |||
| 1511 | gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) | |||
| 1512 | { | |||
| 1513 | struct gem_softc *sc = ifp->if_softc; | |||
| 1514 | ||||
| 1515 | mii_pollstat(&sc->sc_mii); | |||
| 1516 | ifmr->ifm_active = sc->sc_mii.mii_media_active; | |||
| 1517 | ifmr->ifm_status = sc->sc_mii.mii_media_status; | |||
| 1518 | } | |||
| 1519 | ||||
| 1520 | /* | |||
| 1521 | * Process an ioctl request. | |||
| 1522 | */ | |||
| 1523 | int | |||
| 1524 | gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) | |||
| 1525 | { | |||
| 1526 | struct gem_softc *sc = ifp->if_softc; | |||
| 1527 | struct ifreq *ifr = (struct ifreq *)data; | |||
| 1528 | int s, error = 0; | |||
| 1529 | ||||
| 1530 | s = splnet()splraise(0x4); | |||
| 1531 | ||||
| 1532 | switch (cmd) { | |||
| 1533 | case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((12))): | |||
| 1534 | ifp->if_flags |= IFF_UP0x1; | |||
| 1535 | if ((ifp->if_flags & IFF_RUNNING0x40) == 0) | |||
| 1536 | gem_init(ifp); | |||
| 1537 | break; | |||
| 1538 | ||||
| 1539 | case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))): | |||
| 1540 | if (ifp->if_flags & IFF_UP0x1) { | |||
| 1541 | if (ifp->if_flags & IFF_RUNNING0x40) | |||
| 1542 | error = ENETRESET52; | |||
| 1543 | else | |||
| 1544 | gem_init(ifp); | |||
| 1545 | } else { | |||
| 1546 | if (ifp->if_flags & IFF_RUNNING0x40) | |||
| 1547 | gem_stop(ifp, 0); | |||
| 1548 | } | |||
| 1549 | #ifdef GEM_DEBUG | |||
| 1550 | sc->sc_debug = (ifp->if_flags & IFF_DEBUG0x4) != 0 ? 1 : 0; | |||
| 1551 | #endif | |||
| 1552 | break; | |||
| 1553 | ||||
| 1554 | case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifmediareq) & 0x1fff) << 16) | ((('i')) << 8) | ((56))): | |||
| 1555 | case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((55))): | |||
| 1556 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_mediasc_mii.mii_media, cmd); | |||
| 1557 | break; | |||
| 1558 | ||||
| 1559 | case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((170))): | |||
| 1560 | error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data, | |||
| 1561 | NULL((void *)0), MCLBYTES(1 << 11), &sc->sc_rx_ring); | |||
| 1562 | break; | |||
| 1563 | ||||
| 1564 | default: | |||
| 1565 | error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); | |||
| 1566 | } | |||
| 1567 | ||||
| 1568 | if (error == ENETRESET52) { | |||
| 1569 | if (ifp->if_flags & IFF_RUNNING0x40) | |||
| 1570 | gem_iff(sc); | |||
| 1571 | error = 0; | |||
| 1572 | } | |||
| 1573 | ||||
| 1574 | splx(s)spllower(s); | |||
| 1575 | return (error); | |||
| 1576 | } | |||
| 1577 | ||||
| 1578 | void | |||
| 1579 | gem_iff(struct gem_softc *sc) | |||
| 1580 | { | |||
| 1581 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
| 1582 | struct arpcom *ac = &sc->sc_arpcom; | |||
| 1583 | struct ether_multi *enm; | |||
| 1584 | struct ether_multistep step; | |||
| 1585 | bus_space_tag_t t = sc->sc_bustag; | |||
| 1586 | bus_space_handle_t h = sc->sc_h1; | |||
| 1587 | u_int32_t crc, hash[16], rxcfg; | |||
| 1588 | int i; | |||
| 1589 | ||||
| 1590 | rxcfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG)((t)->read_4((h), (0x6034))); | |||
| 1591 | rxcfg &= ~(GEM_MAC_RX_HASH_FILTER0x00000020 | GEM_MAC_RX_PROMISCUOUS0x00000008 | | |||
| 1592 | GEM_MAC_RX_PROMISC_GRP0x00000010); | |||
| 1593 | ifp->if_flags &= ~IFF_ALLMULTI0x200; | |||
| 1594 | ||||
| 1595 | if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0) { | |||
| 1596 | ifp->if_flags |= IFF_ALLMULTI0x200; | |||
| 1597 | if (ifp->if_flags & IFF_PROMISC0x100) | |||
| 1598 | rxcfg |= GEM_MAC_RX_PROMISCUOUS0x00000008; | |||
| 1599 | else | |||
| 1600 | rxcfg |= GEM_MAC_RX_PROMISC_GRP0x00000010; | |||
| 1601 | } else { | |||
| 1602 | /* | |||
| 1603 | * Set up multicast address filter by passing all multicast | |||
| 1604 | * addresses through a crc generator, and then using the | |||
| 1605 | * high order 8 bits as an index into the 256 bit logical | |||
| 1606 | * address filter. The high order 4 bits selects the word, | |||
| 1607 | * while the other 4 bits select the bit within the word | |||
| 1608 | * (where bit 0 is the MSB). | |||
| 1609 | */ | |||
| 1610 | ||||
| 1611 | rxcfg |= GEM_MAC_RX_HASH_FILTER0x00000020; | |||
| 1612 | ||||
| 1613 | /* Clear hash table */ | |||
| 1614 | for (i = 0; i < 16; i++) | |||
| 1615 | hash[i] = 0; | |||
| 1616 | ||||
| 1617 | ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first ); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step )).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while ( 0); | |||
| 1618 | while (enm != NULL((void *)0)) { | |||
| 1619 | crc = ether_crc32_le(enm->enm_addrlo, | |||
| 1620 | ETHER_ADDR_LEN6); | |||
| 1621 | ||||
| 1622 | /* Just want the 8 most significant bits. */ | |||
| 1623 | crc >>= 24; | |||
| 1624 | ||||
| 1625 | /* Set the corresponding bit in the filter. */ | |||
| 1626 | hash[crc >> 4] |= 1 << (15 - (crc & 15)); | |||
| 1627 | ||||
| 1628 | ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm = (((enm))->enm_list.le_next); } while ( 0); | |||
| 1629 | } | |||
| 1630 | ||||
| 1631 | /* Now load the hash table into the chip (if we are using it) */ | |||
| 1632 | for (i = 0; i < 16; i++) { | |||
| 1633 | bus_space_write_4(t, h,((t)->write_4((h), (0x60c0 + i * (0x60c4 - 0x60c0)), (hash [i]))) | |||
| 1634 | GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0),((t)->write_4((h), (0x60c0 + i * (0x60c4 - 0x60c0)), (hash [i]))) | |||
| 1635 | hash[i])((t)->write_4((h), (0x60c0 + i * (0x60c4 - 0x60c0)), (hash [i]))); | |||
| 1636 | } | |||
| 1637 | } | |||
| 1638 | ||||
| 1639 | bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, rxcfg)((t)->write_4((h), (0x6034), (rxcfg))); | |||
| 1640 | } | |||
| 1641 | ||||
| 1642 | /* | |||
| 1643 | * Transmit interrupt. | |||
| 1644 | */ | |||
| 1645 | int | |||
| 1646 | gem_tint(struct gem_softc *sc, u_int32_t status) | |||
| 1647 | { | |||
| 1648 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
| 1649 | struct gem_sxd *sd; | |||
| 1650 | u_int32_t cons, prod; | |||
| 1651 | int free = 0; | |||
| 1652 | ||||
| 1653 | prod = status >> 19; | |||
| 1654 | cons = sc->sc_tx_cons; | |||
| 1655 | while (cons != prod) { | |||
| 1656 | sd = &sc->sc_txd[cons]; | |||
| 1657 | if (sd->sd_mbuf != NULL((void *)0)) { | |||
| 1658 | bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sd ->sd_map), (0), (sd->sd_map->dm_mapsize), (0x08)) | |||
| 1659 | sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sd ->sd_map), (0), (sd->sd_map->dm_mapsize), (0x08)); | |||
| 1660 | bus_dmamap_unload(sc->sc_dmatag, sd->sd_map)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), ( sd->sd_map)); | |||
| 1661 | m_freem(sd->sd_mbuf); | |||
| 1662 | sd->sd_mbuf = NULL((void *)0); | |||
| 1663 | } | |||
| 1664 | ||||
| 1665 | free = 1; | |||
| 1666 | ||||
| 1667 | cons++; | |||
| 1668 | cons &= GEM_NTXDESC(64 * 16) - 1; | |||
| 1669 | } | |||
| 1670 | ||||
| 1671 | if (free == 0) | |||
| 1672 | return (0); | |||
| 1673 | ||||
| 1674 | sc->sc_tx_cons = cons; | |||
| 1675 | ||||
| 1676 | if (sc->sc_tx_prod == cons) | |||
| 1677 | ifp->if_timer = 0; | |||
| 1678 | ||||
| 1679 | if (ifq_is_oactive(&ifp->if_snd)) | |||
| 1680 | ifq_restart(&ifp->if_snd); | |||
| 1681 | ||||
| 1682 | return (1); | |||
| 1683 | } | |||
| 1684 | ||||
| 1685 | int | |||
| 1686 | gem_load_mbuf(struct gem_softc *sc, struct gem_sxd *sd, struct mbuf *m) | |||
| 1687 | { | |||
| 1688 | int error; | |||
| 1689 | ||||
| 1690 | error = bus_dmamap_load_mbuf(sc->sc_dmatag, sd->sd_map, m,(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag ), (sd->sd_map), (m), (0x0001)) | |||
| 1691 | BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag ), (sd->sd_map), (m), (0x0001)); | |||
| 1692 | switch (error) { | |||
| 1693 | case 0: | |||
| 1694 | break; | |||
| 1695 | ||||
| 1696 | case EFBIG27: /* mbuf chain is too fragmented */ | |||
| 1697 | if (m_defrag(m, M_DONTWAIT0x0002) == 0 && | |||
| 1698 | bus_dmamap_load_mbuf(sc->sc_dmatag, sd->sd_map, m,(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag ), (sd->sd_map), (m), (0x0001)) | |||
| 1699 | BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag ), (sd->sd_map), (m), (0x0001)) == 0) | |||
| 1700 | break; | |||
| 1701 | /* FALLTHROUGH */ | |||
| 1702 | default: | |||
| 1703 | return (1); | |||
| 1704 | } | |||
| 1705 | ||||
| 1706 | return (0); | |||
| 1707 | } | |||
| 1708 | ||||
| 1709 | void | |||
| 1710 | gem_start(struct ifqueue *ifq) | |||
| 1711 | { | |||
| 1712 | struct ifnet *ifp = ifq->ifq_if; | |||
| 1713 | struct gem_softc *sc = ifp->if_softc; | |||
| 1714 | struct gem_sxd *sd; | |||
| 1715 | struct mbuf *m; | |||
| 1716 | uint64_t flags, nflags; | |||
| ||||
| 1717 | bus_dmamap_t map; | |||
| 1718 | uint32_t prod; | |||
| 1719 | uint32_t free, used = 0; | |||
| 1720 | uint32_t first, last; | |||
| 1721 | int i; | |||
| 1722 | ||||
| 1723 | prod = sc->sc_tx_prod; | |||
| 1724 | ||||
| 1725 | /* figure out space */ | |||
| 1726 | free = sc->sc_tx_cons; | |||
| 1727 | if (free <= prod) | |||
| 1728 | free += GEM_NTXDESC(64 * 16); | |||
| 1729 | free -= prod; | |||
| 1730 | ||||
| 1731 | bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc ->sc_cddmamap), (0), (sizeof(struct gem_desc) * (64 * 16)) , (0x04)) | |||
| 1732 | 0, sizeof(struct gem_desc) * GEM_NTXDESC,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc ->sc_cddmamap), (0), (sizeof(struct gem_desc) * (64 * 16)) , (0x04)) | |||
| 1733 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc ->sc_cddmamap), (0), (sizeof(struct gem_desc) * (64 * 16)) , (0x04)); | |||
| 1734 | ||||
| 1735 | for (;;) { | |||
| 1736 | if (used + GEM_NTXSEGS16 + 1 > free) { | |||
| 1737 | ifq_set_oactive(&ifp->if_snd); | |||
| 1738 | break; | |||
| 1739 | } | |||
| 1740 | ||||
| 1741 | m = ifq_dequeue(ifq); | |||
| 1742 | if (m == NULL((void *)0)) | |||
| 1743 | break; | |||
| 1744 | ||||
| 1745 | first = prod; | |||
| 1746 | sd = &sc->sc_txd[first]; | |||
| 1747 | map = sd->sd_map; | |||
| 1748 | ||||
| 1749 | if (gem_load_mbuf(sc, sd, m)) { | |||
| 1750 | m_freem(m); | |||
| 1751 | ifp->if_oerrorsif_data.ifi_oerrors++; | |||
| 1752 | continue; | |||
| 1753 | } | |||
| 1754 | ||||
| 1755 | #if NBPFILTER1 > 0 | |||
| 1756 | if (ifp->if_bpf) | |||
| 1757 | bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1)); | |||
| 1758 | #endif | |||
| 1759 | ||||
| 1760 | bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (map ), (0), (map->dm_mapsize), (0x04)) | |||
| 1761 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (map ), (0), (map->dm_mapsize), (0x04)); | |||
| 1762 | ||||
| 1763 | nflags = GEM_TD_START_OF_PACKET0x0000000080000000LL; | |||
| 1764 | for (i = 0; i < map->dm_nsegs; i++) { | |||
| 1765 | flags = nflags | | |||
| 1766 | (map->dm_segs[i].ds_len & GEM_TD_BUFSIZE0x0000000000007fffLL); | |||
| 1767 | ||||
| 1768 | GEM_DMA_WRITE(sc, &sc->sc_txdescs[prod].gd_addr,(((sc)->sc_pci) ? (*(__uint64_t *)((&sc->sc_control_data ->gcd_txdescs[prod].gd_addr)) = ((__uint64_t)((map->dm_segs [i].ds_addr)))) : (*(__uint64_t *)((&sc->sc_control_data ->gcd_txdescs[prod].gd_addr)) = (__uint64_t)(__builtin_constant_p ((map->dm_segs[i].ds_addr)) ? (__uint64_t)((((__uint64_t)( (map->dm_segs[i].ds_addr)) & 0xff) << 56) | ((__uint64_t )((map->dm_segs[i].ds_addr)) & 0xff00ULL) << 40 | ((__uint64_t)((map->dm_segs[i].ds_addr)) & 0xff0000ULL ) << 24 | ((__uint64_t)((map->dm_segs[i].ds_addr)) & 0xff000000ULL) << 8 | ((__uint64_t)((map->dm_segs[i ].ds_addr)) & 0xff00000000ULL) >> 8 | ((__uint64_t) ((map->dm_segs[i].ds_addr)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)((map->dm_segs[i].ds_addr)) & 0xff000000000000ULL ) >> 40 | ((__uint64_t)((map->dm_segs[i].ds_addr)) & 0xff00000000000000ULL) >> 56) : __swap64md((map->dm_segs [i].ds_addr))))) | |||
| 1769 | map->dm_segs[i].ds_addr)(((sc)->sc_pci) ? (*(__uint64_t *)((&sc->sc_control_data ->gcd_txdescs[prod].gd_addr)) = ((__uint64_t)((map->dm_segs [i].ds_addr)))) : (*(__uint64_t *)((&sc->sc_control_data ->gcd_txdescs[prod].gd_addr)) = (__uint64_t)(__builtin_constant_p ((map->dm_segs[i].ds_addr)) ? (__uint64_t)((((__uint64_t)( (map->dm_segs[i].ds_addr)) & 0xff) << 56) | ((__uint64_t )((map->dm_segs[i].ds_addr)) & 0xff00ULL) << 40 | ((__uint64_t)((map->dm_segs[i].ds_addr)) & 0xff0000ULL ) << 24 | ((__uint64_t)((map->dm_segs[i].ds_addr)) & 0xff000000ULL) << 8 | ((__uint64_t)((map->dm_segs[i ].ds_addr)) & 0xff00000000ULL) >> 8 | ((__uint64_t) ((map->dm_segs[i].ds_addr)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)((map->dm_segs[i].ds_addr)) & 0xff000000000000ULL ) >> 40 | ((__uint64_t)((map->dm_segs[i].ds_addr)) & 0xff00000000000000ULL) >> 56) : __swap64md((map->dm_segs [i].ds_addr))))); | |||
| 1770 | GEM_DMA_WRITE(sc, &sc->sc_txdescs[prod].gd_flags,(((sc)->sc_pci) ? (*(__uint64_t *)((&sc->sc_control_data ->gcd_txdescs[prod].gd_flags)) = ((__uint64_t)((flags)))) : (*(__uint64_t *)((&sc->sc_control_data->gcd_txdescs [prod].gd_flags)) = (__uint64_t)(__builtin_constant_p((flags) ) ? (__uint64_t)((((__uint64_t)((flags)) & 0xff) << 56) | ((__uint64_t)((flags)) & 0xff00ULL) << 40 | ( (__uint64_t)((flags)) & 0xff0000ULL) << 24 | ((__uint64_t )((flags)) & 0xff000000ULL) << 8 | ((__uint64_t)((flags )) & 0xff00000000ULL) >> 8 | ((__uint64_t)((flags)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)((flags) ) & 0xff000000000000ULL) >> 40 | ((__uint64_t)((flags )) & 0xff00000000000000ULL) >> 56) : __swap64md((flags ))))) | |||
| 1771 | flags)(((sc)->sc_pci) ? (*(__uint64_t *)((&sc->sc_control_data ->gcd_txdescs[prod].gd_flags)) = ((__uint64_t)((flags)))) : (*(__uint64_t *)((&sc->sc_control_data->gcd_txdescs [prod].gd_flags)) = (__uint64_t)(__builtin_constant_p((flags) ) ? (__uint64_t)((((__uint64_t)((flags)) & 0xff) << 56) | ((__uint64_t)((flags)) & 0xff00ULL) << 40 | ( (__uint64_t)((flags)) & 0xff0000ULL) << 24 | ((__uint64_t )((flags)) & 0xff000000ULL) << 8 | ((__uint64_t)((flags )) & 0xff00000000ULL) >> 8 | ((__uint64_t)((flags)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)((flags) ) & 0xff000000000000ULL) >> 40 | ((__uint64_t)((flags )) & 0xff00000000000000ULL) >> 56) : __swap64md((flags ))))); | |||
| 1772 | ||||
| 1773 | last = prod; | |||
| 1774 | prod++; | |||
| 1775 | prod &= GEM_NTXDESC(64 * 16) - 1; | |||
| 1776 | ||||
| 1777 | nflags = 0; | |||
| 1778 | } | |||
| 1779 | GEM_DMA_WRITE(sc, &sc->sc_txdescs[last].gd_flags,(((sc)->sc_pci) ? (*(__uint64_t *)((&sc->sc_control_data ->gcd_txdescs[last].gd_flags)) = ((__uint64_t)((0x0000000040000000LL | flags)))) : (*(__uint64_t *)((&sc->sc_control_data-> gcd_txdescs[last].gd_flags)) = (__uint64_t)(__builtin_constant_p ((0x0000000040000000LL | flags)) ? (__uint64_t)((((__uint64_t )((0x0000000040000000LL | flags)) & 0xff) << 56) | ( (__uint64_t)((0x0000000040000000LL | flags)) & 0xff00ULL) << 40 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff0000ULL) << 24 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff000000ULL) << 8 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff00000000ULL) >> 8 | ((__uint64_t)( (0x0000000040000000LL | flags)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff000000000000ULL ) >> 40 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff00000000000000ULL) >> 56) : __swap64md((0x0000000040000000LL | flags))))) | |||
| ||||
| 1780 | GEM_TD_END_OF_PACKET | flags)(((sc)->sc_pci) ? (*(__uint64_t *)((&sc->sc_control_data ->gcd_txdescs[last].gd_flags)) = ((__uint64_t)((0x0000000040000000LL | flags)))) : (*(__uint64_t *)((&sc->sc_control_data-> gcd_txdescs[last].gd_flags)) = (__uint64_t)(__builtin_constant_p ((0x0000000040000000LL | flags)) ? (__uint64_t)((((__uint64_t )((0x0000000040000000LL | flags)) & 0xff) << 56) | ( (__uint64_t)((0x0000000040000000LL | flags)) & 0xff00ULL) << 40 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff0000ULL) << 24 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff000000ULL) << 8 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff00000000ULL) >> 8 | ((__uint64_t)( (0x0000000040000000LL | flags)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff000000000000ULL ) >> 40 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff00000000000000ULL) >> 56) : __swap64md((0x0000000040000000LL | flags))))); | |||
| 1781 | ||||
| 1782 | used += map->dm_nsegs; | |||
| 1783 | sc->sc_txd[last].sd_mbuf = m; | |||
| 1784 | sc->sc_txd[first].sd_map = sc->sc_txd[last].sd_map; | |||
| 1785 | sc->sc_txd[last].sd_map = map; | |||
| 1786 | } | |||
| 1787 | ||||
| 1788 | bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc ->sc_cddmamap), (0), (sizeof(struct gem_desc) * (64 * 16)) , (0x08)) | |||
| 1789 | 0, sizeof(struct gem_desc) * GEM_NTXDESC,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc ->sc_cddmamap), (0), (sizeof(struct gem_desc) * (64 * 16)) , (0x08)) | |||
| 1790 | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc ->sc_cddmamap), (0), (sizeof(struct gem_desc) * (64 * 16)) , (0x08)); | |||
| 1791 | ||||
| 1792 | if (used == 0) | |||
| 1793 | return; | |||
| 1794 | ||||
| 1795 | /* Commit. */ | |||
| 1796 | sc->sc_tx_prod = prod; | |||
| 1797 | ||||
| 1798 | /* Transmit. */ | |||
| 1799 | bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK, prod)((sc->sc_bustag)->write_4((sc->sc_h1), (0x2000), (prod ))); | |||
| 1800 | ||||
| 1801 | /* Set timeout in case hardware has problems transmitting. */ | |||
| 1802 | ifp->if_timer = 5; | |||
| 1803 | } |