| File: | dev/pci/if_ix.c |
| Warning: | line 1493, column 25 Dereference of null pointer |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* $OpenBSD: if_ix.c,v 1.206 2023/11/10 15:51:20 bluhm Exp $ */ | |||
| 2 | ||||
| 3 | /****************************************************************************** | |||
| 4 | ||||
| 5 | Copyright (c) 2001-2013, Intel Corporation | |||
| 6 | All rights reserved. | |||
| 7 | ||||
| 8 | Redistribution and use in source and binary forms, with or without | |||
| 9 | modification, are permitted provided that the following conditions are met: | |||
| 10 | ||||
| 11 | 1. Redistributions of source code must retain the above copyright notice, | |||
| 12 | this list of conditions and the following disclaimer. | |||
| 13 | ||||
| 14 | 2. Redistributions in binary form must reproduce the above copyright | |||
| 15 | notice, this list of conditions and the following disclaimer in the | |||
| 16 | documentation and/or other materials provided with the distribution. | |||
| 17 | ||||
| 18 | 3. Neither the name of the Intel Corporation nor the names of its | |||
| 19 | contributors may be used to endorse or promote products derived from | |||
| 20 | this software without specific prior written permission. | |||
| 21 | ||||
| 22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||
| 23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||
| 24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||
| 25 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | |||
| 26 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |||
| 27 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |||
| 28 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |||
| 29 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |||
| 30 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |||
| 31 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |||
| 32 | POSSIBILITY OF SUCH DAMAGE. | |||
| 33 | ||||
| 34 | ******************************************************************************/ | |||
| 35 | /* FreeBSD: src/sys/dev/ixgbe/ixgbe.c 251964 Jun 18 21:28:19 2013 UTC */ | |||
| 36 | ||||
| 37 | #include <dev/pci/if_ix.h> | |||
| 38 | #include <dev/pci/ixgbe_type.h> | |||
| 39 | ||||
| 40 | /* | |||
| 41 | * Our TCP/IP Stack is unable to handle packets greater than MAXMCLBYTES. | |||
| 42 | * This interface is unable to handle packets greater than IXGBE_TSO_SIZE. | |||
| 43 | */ | |||
| 44 | CTASSERT(MAXMCLBYTES <= IXGBE_TSO_SIZE)extern char _ctassert[((64 * 1024) <= 262140) ? 1 : -1 ] __attribute__ ((__unused__)); | |||
| 45 | ||||
| 46 | /********************************************************************* | |||
| 47 | * Driver version | |||
| 48 | *********************************************************************/ | |||
| 49 | /* char ixgbe_driver_version[] = "2.5.13"; */ | |||
| 50 | ||||
| 51 | /********************************************************************* | |||
| 52 | * PCI Device ID Table | |||
| 53 | * | |||
| 54 | * Used by probe to select devices to load on | |||
| 55 | *********************************************************************/ | |||
| 56 | ||||
| 57 | const struct pci_matchid ixgbe_devices[] = { | |||
| 58 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_825980x10b6 }, | |||
| 59 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598_BX0x1508 }, | |||
| 60 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598AF_DUAL0x10c6 }, | |||
| 61 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598AF0x10c7 }, | |||
| 62 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598AT0x10c8 }, | |||
| 63 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598AT20x150b }, | |||
| 64 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598AT_DUAL0x10d7 }, | |||
| 65 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598EB_CX40x10dd }, | |||
| 66 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598EB_CX4_DUAL0x10ec }, | |||
| 67 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598EB_XF_LR0x10f4 }, | |||
| 68 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598EB_SFP0x10db }, | |||
| 69 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598_SR_DUAL_EM0x10e1 }, | |||
| 70 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598_DA_DUAL0x10f1 }, | |||
| 71 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_KX40x10f7 }, | |||
| 72 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_KX4_MEZZ0x1514 }, | |||
| 73 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_XAUI0x10fc }, | |||
| 74 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_COMBO_BP0x10f8 }, | |||
| 75 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_BPLANE_FCOE0x152a }, | |||
| 76 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_CX40x10f9 }, | |||
| 77 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_T3_LOM0x151c }, | |||
| 78 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_SFP0x10fb }, | |||
| 79 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_SFP_EM0x1507 }, | |||
| 80 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_SFP_SF_QP0x154a }, | |||
| 81 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_SFP_SF20x154d }, | |||
| 82 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_SFP_FCOE0x1529 }, | |||
| 83 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599EN_SFP0x1557 }, | |||
| 84 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_QSFP_SF_QP0x1558 }, | |||
| 85 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X540T0x1528 }, | |||
| 86 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X540T10x1560 }, | |||
| 87 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550T0x1563 }, | |||
| 88 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550T10x15d1 }, | |||
| 89 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_X_KX40x15aa }, | |||
| 90 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_X_KR0x15ab }, | |||
| 91 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_X_SFP0x15ac }, | |||
| 92 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_X_10G_T0x15ad }, | |||
| 93 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_X_1G_T0x15ae }, | |||
| 94 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_A_KR0x15c2 }, | |||
| 95 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_A_KR_L0x15c3 }, | |||
| 96 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_A_SFP_N0x15c4 }, | |||
| 97 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_A_SFP0x15ce }, | |||
| 98 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_A_SGMII0x15c6 }, | |||
| 99 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_A_SGMII_L0x15c7 }, | |||
| 100 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_A_10G_T0x15c8 }, | |||
| 101 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_A_1G_T0x15e4 }, | |||
| 102 | { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_A_1G_T_L0x15e5 } | |||
| 103 | }; | |||
| 104 | ||||
| 105 | /********************************************************************* | |||
| 106 | * Function prototypes | |||
| 107 | *********************************************************************/ | |||
| 108 | int ixgbe_probe(struct device *, void *, void *); | |||
| 109 | void ixgbe_attach(struct device *, struct device *, void *); | |||
| 110 | int ixgbe_detach(struct device *, int); | |||
| 111 | int ixgbe_activate(struct device *, int); | |||
| 112 | void ixgbe_start(struct ifqueue *); | |||
| 113 | int ixgbe_ioctl(struct ifnet *, u_long, caddr_t); | |||
| 114 | int ixgbe_rxrinfo(struct ix_softc *, struct if_rxrinfo *); | |||
| 115 | int ixgbe_get_sffpage(struct ix_softc *, struct if_sffpage *); | |||
| 116 | void ixgbe_watchdog(struct ifnet *); | |||
| 117 | void ixgbe_init(void *); | |||
| 118 | void ixgbe_stop(void *); | |||
| 119 | void ixgbe_media_status(struct ifnet *, struct ifmediareq *); | |||
| 120 | int ixgbe_media_change(struct ifnet *); | |||
| 121 | void ixgbe_identify_hardware(struct ix_softc *); | |||
| 122 | int ixgbe_allocate_pci_resources(struct ix_softc *); | |||
| 123 | int ixgbe_allocate_legacy(struct ix_softc *); | |||
| 124 | int ixgbe_allocate_msix(struct ix_softc *); | |||
| 125 | void ixgbe_setup_msix(struct ix_softc *); | |||
| 126 | int ixgbe_allocate_queues(struct ix_softc *); | |||
| 127 | void ixgbe_free_pci_resources(struct ix_softc *); | |||
| 128 | void ixgbe_local_timer(void *); | |||
| 129 | void ixgbe_setup_interface(struct ix_softc *); | |||
| 130 | void ixgbe_config_gpie(struct ix_softc *); | |||
| 131 | void ixgbe_config_delay_values(struct ix_softc *); | |||
| 132 | void ixgbe_add_media_types(struct ix_softc *); | |||
| 133 | void ixgbe_config_link(struct ix_softc *); | |||
| 134 | ||||
| 135 | int ixgbe_allocate_transmit_buffers(struct tx_ring *); | |||
| 136 | int ixgbe_setup_transmit_structures(struct ix_softc *); | |||
| 137 | int ixgbe_setup_transmit_ring(struct tx_ring *); | |||
| 138 | void ixgbe_initialize_transmit_units(struct ix_softc *); | |||
| 139 | void ixgbe_free_transmit_structures(struct ix_softc *); | |||
| 140 | void ixgbe_free_transmit_buffers(struct tx_ring *); | |||
| 141 | ||||
| 142 | int ixgbe_allocate_receive_buffers(struct rx_ring *); | |||
| 143 | int ixgbe_setup_receive_structures(struct ix_softc *); | |||
| 144 | int ixgbe_setup_receive_ring(struct rx_ring *); | |||
| 145 | void ixgbe_initialize_receive_units(struct ix_softc *); | |||
| 146 | void ixgbe_free_receive_structures(struct ix_softc *); | |||
| 147 | void ixgbe_free_receive_buffers(struct rx_ring *); | |||
| 148 | void ixgbe_initialize_rss_mapping(struct ix_softc *); | |||
| 149 | int ixgbe_rxfill(struct rx_ring *); | |||
| 150 | void ixgbe_rxrefill(void *); | |||
| 151 | ||||
| 152 | int ixgbe_intr(struct ix_softc *sc); | |||
| 153 | void ixgbe_enable_intr(struct ix_softc *); | |||
| 154 | void ixgbe_disable_intr(struct ix_softc *); | |||
| 155 | int ixgbe_txeof(struct tx_ring *); | |||
| 156 | int ixgbe_rxeof(struct rx_ring *); | |||
| 157 | void ixgbe_rx_checksum(uint32_t, struct mbuf *); | |||
| 158 | void ixgbe_iff(struct ix_softc *); | |||
| 159 | void ixgbe_map_queue_statistics(struct ix_softc *); | |||
| 160 | void ixgbe_update_link_status(struct ix_softc *); | |||
| 161 | int ixgbe_get_buf(struct rx_ring *, int); | |||
| 162 | int ixgbe_encap(struct tx_ring *, struct mbuf *); | |||
| 163 | int ixgbe_dma_malloc(struct ix_softc *, bus_size_t, | |||
| 164 | struct ixgbe_dma_alloc *, int); | |||
| 165 | void ixgbe_dma_free(struct ix_softc *, struct ixgbe_dma_alloc *); | |||
| 166 | static int | |||
| 167 | ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *, uint32_t *, | |||
| 168 | uint32_t *); | |||
| 169 | void ixgbe_set_ivar(struct ix_softc *, uint8_t, uint8_t, int8_t); | |||
| 170 | void ixgbe_configure_ivars(struct ix_softc *); | |||
| 171 | uint8_t *ixgbe_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *); | |||
| 172 | ||||
| 173 | void ixgbe_setup_vlan_hw_support(struct ix_softc *); | |||
| 174 | ||||
| 175 | /* Support for pluggable optic modules */ | |||
| 176 | void ixgbe_handle_mod(struct ix_softc *); | |||
| 177 | void ixgbe_handle_msf(struct ix_softc *); | |||
| 178 | void ixgbe_handle_phy(struct ix_softc *); | |||
| 179 | ||||
| 180 | /* Legacy (single vector interrupt handler */ | |||
| 181 | int ixgbe_legacy_intr(void *); | |||
| 182 | void ixgbe_enable_queue(struct ix_softc *, uint32_t); | |||
| 183 | void ixgbe_enable_queues(struct ix_softc *); | |||
| 184 | void ixgbe_disable_queue(struct ix_softc *, uint32_t); | |||
| 185 | void ixgbe_rearm_queue(struct ix_softc *, uint32_t); | |||
| 186 | ||||
| 187 | /* MSI-X (multiple vectors interrupt handlers) */ | |||
| 188 | int ixgbe_link_intr(void *); | |||
| 189 | int ixgbe_queue_intr(void *); | |||
| 190 | ||||
| 191 | #if NKSTAT1 > 0 | |||
| 192 | static void ix_kstats(struct ix_softc *); | |||
| 193 | static void ix_rxq_kstats(struct ix_softc *, struct rx_ring *); | |||
| 194 | static void ix_txq_kstats(struct ix_softc *, struct tx_ring *); | |||
| 195 | static void ix_kstats_tick(void *); | |||
| 196 | #endif | |||
| 197 | ||||
| 198 | /********************************************************************* | |||
| 199 | * OpenBSD Device Interface Entry Points | |||
| 200 | *********************************************************************/ | |||
| 201 | ||||
| 202 | struct cfdriver ix_cd = { | |||
| 203 | NULL((void *)0), "ix", DV_IFNET | |||
| 204 | }; | |||
| 205 | ||||
| 206 | const struct cfattach ix_ca = { | |||
| 207 | sizeof(struct ix_softc), ixgbe_probe, ixgbe_attach, ixgbe_detach, | |||
| 208 | ixgbe_activate | |||
| 209 | }; | |||
| 210 | ||||
| 211 | int ixgbe_smart_speed = ixgbe_smart_speed_on; | |||
| 212 | int ixgbe_enable_msix = 1; | |||
| 213 | ||||
| 214 | /********************************************************************* | |||
| 215 | * Device identification routine | |||
| 216 | * | |||
| 217 | * ixgbe_probe determines if the driver should be loaded on | |||
| 218 | * adapter based on PCI vendor/device id of the adapter. | |||
| 219 | * | |||
| 220 | * return 0 on success, positive on failure | |||
| 221 | *********************************************************************/ | |||
| 222 | ||||
| 223 | int | |||
| 224 | ixgbe_probe(struct device *parent, void *match, void *aux) | |||
| 225 | { | |||
| 226 | INIT_DEBUGOUT("ixgbe_probe: begin")if (0) printf("ixgbe_probe: begin" "\n"); | |||
| 227 | ||||
| 228 | return (pci_matchbyid((struct pci_attach_args *)aux, ixgbe_devices, | |||
| 229 | nitems(ixgbe_devices)(sizeof((ixgbe_devices)) / sizeof((ixgbe_devices)[0])))); | |||
| 230 | } | |||
| 231 | ||||
| 232 | /********************************************************************* | |||
| 233 | * Device initialization routine | |||
| 234 | * | |||
| 235 | * The attach entry point is called when the driver is being loaded. | |||
| 236 | * This routine identifies the type of hardware, allocates all resources | |||
| 237 | * and initializes the hardware. | |||
| 238 | * | |||
| 239 | * return 0 on success, positive on failure | |||
| 240 | *********************************************************************/ | |||
| 241 | ||||
| 242 | void | |||
| 243 | ixgbe_attach(struct device *parent, struct device *self, void *aux) | |||
| 244 | { | |||
| 245 | struct pci_attach_args *pa = (struct pci_attach_args *)aux; | |||
| 246 | struct ix_softc *sc = (struct ix_softc *)self; | |||
| 247 | int error = 0; | |||
| 248 | uint16_t csum; | |||
| 249 | uint32_t ctrl_ext; | |||
| 250 | struct ixgbe_hw *hw = &sc->hw; | |||
| 251 | ||||
| 252 | INIT_DEBUGOUT("ixgbe_attach: begin")if (0) printf("ixgbe_attach: begin" "\n"); | |||
| 253 | ||||
| 254 | sc->osdep.os_sc = sc; | |||
| 255 | sc->osdep.os_pa = *pa; | |||
| 256 | ||||
| 257 | rw_init(&sc->sfflock, "ixsff")_rw_init_flags(&sc->sfflock, "ixsff", 0, ((void *)0)); | |||
| 258 | ||||
| 259 | #if NKSTAT1 > 0 | |||
| 260 | ix_kstats(sc); | |||
| 261 | #endif | |||
| 262 | ||||
| 263 | /* Determine hardware revision */ | |||
| 264 | ixgbe_identify_hardware(sc); | |||
| 265 | ||||
| 266 | /* Indicate to RX setup to use Jumbo Clusters */ | |||
| 267 | sc->num_tx_desc = DEFAULT_TXD256; | |||
| 268 | sc->num_rx_desc = DEFAULT_RXD256; | |||
| 269 | ||||
| 270 | /* Do base PCI setup - map BAR0 */ | |||
| 271 | if (ixgbe_allocate_pci_resources(sc)) | |||
| 272 | goto err_out; | |||
| 273 | ||||
| 274 | /* Allocate our TX/RX Queues */ | |||
| 275 | if (ixgbe_allocate_queues(sc)) | |||
| 276 | goto err_out; | |||
| 277 | ||||
| 278 | /* Allocate multicast array memory. */ | |||
| 279 | sc->mta = mallocarray(IXGBE_ETH_LENGTH_OF_ADDRESS6, | |||
| 280 | MAX_NUM_MULTICAST_ADDRESSES128, M_DEVBUF2, M_NOWAIT0x0002); | |||
| 281 | if (sc->mta == NULL((void *)0)) { | |||
| 282 | printf(": Can not allocate multicast setup array\n"); | |||
| 283 | goto err_late; | |||
| 284 | } | |||
| 285 | ||||
| 286 | /* Initialize the shared code */ | |||
| 287 | error = ixgbe_init_shared_code(hw); | |||
| 288 | if (error) { | |||
| 289 | printf(": Unable to initialize the shared code\n"); | |||
| 290 | goto err_late; | |||
| 291 | } | |||
| 292 | ||||
| 293 | /* Make sure we have a good EEPROM before we read from it */ | |||
| 294 | if (sc->hw.eeprom.ops.validate_checksum(&sc->hw, &csum) < 0) { | |||
| 295 | printf(": The EEPROM Checksum Is Not Valid\n"); | |||
| 296 | goto err_late; | |||
| 297 | } | |||
| 298 | ||||
| 299 | error = ixgbe_init_hw(hw); | |||
| 300 | if (error == IXGBE_ERR_EEPROM_VERSION-24) { | |||
| 301 | printf(": This device is a pre-production adapter/" | |||
| 302 | "LOM. Please be aware there may be issues associated " | |||
| 303 | "with your hardware.\nIf you are experiencing problems " | |||
| 304 | "please contact your Intel or hardware representative " | |||
| 305 | "who provided you with this hardware.\n"); | |||
| 306 | } else if (error && (error != IXGBE_ERR_SFP_NOT_PRESENT-20 && | |||
| 307 | error != IXGBE_ERR_SFP_NOT_SUPPORTED-19)) { | |||
| 308 | printf(": Hardware Initialization Failure\n"); | |||
| 309 | goto err_late; | |||
| 310 | } | |||
| 311 | ||||
| 312 | bcopy(sc->hw.mac.addr, sc->arpcom.ac_enaddr, | |||
| 313 | IXGBE_ETH_LENGTH_OF_ADDRESS6); | |||
| 314 | ||||
| 315 | if (sc->sc_intrmap) | |||
| 316 | error = ixgbe_allocate_msix(sc); | |||
| 317 | else | |||
| 318 | error = ixgbe_allocate_legacy(sc); | |||
| 319 | if (error) | |||
| 320 | goto err_late; | |||
| 321 | ||||
| 322 | /* Enable the optics for 82599 SFP+ fiber */ | |||
| 323 | if (sc->hw.mac.ops.enable_tx_laser) | |||
| 324 | sc->hw.mac.ops.enable_tx_laser(&sc->hw); | |||
| 325 | ||||
| 326 | /* Enable power to the phy */ | |||
| 327 | if (hw->phy.ops.set_phy_power) | |||
| 328 | hw->phy.ops.set_phy_power(&sc->hw, TRUE1); | |||
| 329 | ||||
| 330 | /* Setup OS specific network interface */ | |||
| 331 | ixgbe_setup_interface(sc); | |||
| 332 | ||||
| 333 | /* Get the PCI-E bus info and determine LAN ID */ | |||
| 334 | hw->mac.ops.get_bus_info(hw); | |||
| 335 | ||||
| 336 | /* Set an initial default flow control value */ | |||
| 337 | sc->fc = ixgbe_fc_full; | |||
| 338 | ||||
| 339 | /* let hardware know driver is loaded */ | |||
| 340 | ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->read_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x00018))); | |||
| 341 | ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD0x10000000; | |||
| 342 | IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x00018), (ctrl_ext))); | |||
| 343 | ||||
| 344 | printf(", address %s\n", ether_sprintf(sc->hw.mac.addr)); | |||
| 345 | ||||
| 346 | INIT_DEBUGOUT("ixgbe_attach: end")if (0) printf("ixgbe_attach: end" "\n"); | |||
| 347 | return; | |||
| 348 | ||||
| 349 | err_late: | |||
| 350 | ixgbe_free_transmit_structures(sc); | |||
| 351 | ixgbe_free_receive_structures(sc); | |||
| 352 | err_out: | |||
| 353 | ixgbe_free_pci_resources(sc); | |||
| 354 | free(sc->mta, M_DEVBUF2, IXGBE_ETH_LENGTH_OF_ADDRESS6 * | |||
| 355 | MAX_NUM_MULTICAST_ADDRESSES128); | |||
| 356 | } | |||
| 357 | ||||
| 358 | /********************************************************************* | |||
| 359 | * Device removal routine | |||
| 360 | * | |||
| 361 | * The detach entry point is called when the driver is being removed. | |||
| 362 | * This routine stops the adapter and deallocates all the resources | |||
| 363 | * that were allocated for driver operation. | |||
| 364 | * | |||
| 365 | * return 0 on success, positive on failure | |||
| 366 | *********************************************************************/ | |||
| 367 | ||||
| 368 | int | |||
| 369 | ixgbe_detach(struct device *self, int flags) | |||
| 370 | { | |||
| 371 | struct ix_softc *sc = (struct ix_softc *)self; | |||
| 372 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 373 | uint32_t ctrl_ext; | |||
| 374 | ||||
| 375 | INIT_DEBUGOUT("ixgbe_detach: begin")if (0) printf("ixgbe_detach: begin" "\n"); | |||
| 376 | ||||
| 377 | ixgbe_stop(sc); | |||
| 378 | ||||
| 379 | /* let hardware know driver is unloading */ | |||
| 380 | ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->read_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x00018))); | |||
| 381 | ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD0x10000000; | |||
| 382 | IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x00018), (ctrl_ext))); | |||
| 383 | ||||
| 384 | ether_ifdetach(ifp); | |||
| 385 | if_detach(ifp); | |||
| 386 | ||||
| 387 | ixgbe_free_pci_resources(sc); | |||
| 388 | ||||
| 389 | ixgbe_free_transmit_structures(sc); | |||
| 390 | ixgbe_free_receive_structures(sc); | |||
| 391 | free(sc->mta, M_DEVBUF2, IXGBE_ETH_LENGTH_OF_ADDRESS6 * | |||
| 392 | MAX_NUM_MULTICAST_ADDRESSES128); | |||
| 393 | ||||
| 394 | /* XXX kstat */ | |||
| 395 | ||||
| 396 | return (0); | |||
| 397 | } | |||
| 398 | ||||
| 399 | int | |||
| 400 | ixgbe_activate(struct device *self, int act) | |||
| 401 | { | |||
| 402 | struct ix_softc *sc = (struct ix_softc *)self; | |||
| 403 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 404 | struct ixgbe_hw *hw = &sc->hw; | |||
| 405 | uint32_t ctrl_ext; | |||
| 406 | int rv = 0; | |||
| 407 | ||||
| 408 | switch (act) { | |||
| 409 | case DVACT_QUIESCE2: | |||
| 410 | if (ifp->if_flags & IFF_RUNNING0x40) | |||
| 411 | ixgbe_stop(sc); | |||
| 412 | break; | |||
| 413 | case DVACT_RESUME4: | |||
| 414 | ixgbe_init_hw(hw); | |||
| 415 | ||||
| 416 | /* Enable the optics for 82599 SFP+ fiber */ | |||
| 417 | if (sc->hw.mac.ops.enable_tx_laser) | |||
| 418 | sc->hw.mac.ops.enable_tx_laser(&sc->hw); | |||
| 419 | ||||
| 420 | /* Enable power to the phy */ | |||
| 421 | if (hw->phy.ops.set_phy_power) | |||
| 422 | hw->phy.ops.set_phy_power(&sc->hw, TRUE1); | |||
| 423 | ||||
| 424 | /* Get the PCI-E bus info and determine LAN ID */ | |||
| 425 | hw->mac.ops.get_bus_info(hw); | |||
| 426 | ||||
| 427 | /* let hardware know driver is loaded */ | |||
| 428 | ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->read_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x00018))); | |||
| 429 | ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD0x10000000; | |||
| 430 | IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x00018), (ctrl_ext))); | |||
| 431 | ||||
| 432 | if (ifp->if_flags & IFF_UP0x1) | |||
| 433 | ixgbe_init(sc); | |||
| 434 | break; | |||
| 435 | default: | |||
| 436 | break; | |||
| 437 | } | |||
| 438 | return (rv); | |||
| 439 | } | |||
| 440 | ||||
| 441 | /********************************************************************* | |||
| 442 | * Transmit entry point | |||
| 443 | * | |||
| 444 | * ixgbe_start is called by the stack to initiate a transmit. | |||
| 445 | * The driver will remain in this routine as long as there are | |||
| 446 | * packets to transmit and transmit resources are available. | |||
| 447 | * In case resources are not available stack is notified and | |||
| 448 | * the packet is requeued. | |||
| 449 | **********************************************************************/ | |||
| 450 | ||||
| 451 | void | |||
| 452 | ixgbe_start(struct ifqueue *ifq) | |||
| 453 | { | |||
| 454 | struct ifnet *ifp = ifq->ifq_if; | |||
| 455 | struct ix_softc *sc = ifp->if_softc; | |||
| 456 | struct tx_ring *txr = ifq->ifq_softc_ifq_ptr._ifq_softc; | |||
| 457 | struct mbuf *m_head; | |||
| 458 | unsigned int head, free, used; | |||
| 459 | int post = 0; | |||
| 460 | ||||
| 461 | if (!sc->link_up) | |||
| ||||
| 462 | return; | |||
| 463 | ||||
| 464 | head = txr->next_avail_desc; | |||
| 465 | free = txr->next_to_clean; | |||
| 466 | if (free <= head) | |||
| 467 | free += sc->num_tx_desc; | |||
| 468 | free -= head; | |||
| 469 | ||||
| 470 | membar_consumer()do { __asm volatile("" ::: "memory"); } while (0); | |||
| 471 | ||||
| 472 | bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize ), (0x08)) | |||
| 473 | 0, txr->txdma.dma_map->dm_mapsize,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize ), (0x08)) | |||
| 474 | BUS_DMASYNC_POSTWRITE)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize ), (0x08)); | |||
| 475 | ||||
| 476 | for (;;) { | |||
| 477 | /* Check that we have the minimal number of TX descriptors. */ | |||
| 478 | if (free <= IXGBE_TX_OP_THRESHOLD(sc->num_segs + 2)) { | |||
| 479 | ifq_set_oactive(ifq); | |||
| 480 | break; | |||
| 481 | } | |||
| 482 | ||||
| 483 | m_head = ifq_dequeue(ifq); | |||
| 484 | if (m_head == NULL((void *)0)) | |||
| 485 | break; | |||
| 486 | ||||
| 487 | used = ixgbe_encap(txr, m_head); | |||
| 488 | if (used == 0) { | |||
| 489 | m_freem(m_head); | |||
| 490 | continue; | |||
| 491 | } | |||
| 492 | ||||
| 493 | free -= used; | |||
| 494 | ||||
| 495 | #if NBPFILTER1 > 0 | |||
| 496 | if (ifp->if_bpf) | |||
| 497 | bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT(1 << 1)); | |||
| 498 | #endif | |||
| 499 | ||||
| 500 | /* Set timeout in case hardware has problems transmitting */ | |||
| 501 | txr->watchdog_timer = IXGBE_TX_TIMEOUT5; | |||
| 502 | ifp->if_timer = IXGBE_TX_TIMEOUT5; | |||
| 503 | ||||
| 504 | post = 1; | |||
| 505 | } | |||
| 506 | ||||
| 507 | bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize ), (0x04)) | |||
| 508 | 0, txr->txdma.dma_map->dm_mapsize,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize ), (0x04)) | |||
| 509 | BUS_DMASYNC_PREWRITE)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize ), (0x04)); | |||
| 510 | ||||
| 511 | /* | |||
| 512 | * Advance the Transmit Descriptor Tail (Tdt), this tells the | |||
| 513 | * hardware that this frame is available to transmit. | |||
| 514 | */ | |||
| 515 | if (post) | |||
| 516 | IXGBE_WRITE_REG(&sc->hw, IXGBE_TDT(txr->me),((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((0x06018 + ((txr->me) * 0x40))), (txr-> next_avail_desc))) | |||
| 517 | txr->next_avail_desc)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((0x06018 + ((txr->me) * 0x40))), (txr-> next_avail_desc))); | |||
| 518 | } | |||
| 519 | ||||
| 520 | /********************************************************************* | |||
| 521 | * Ioctl entry point | |||
| 522 | * | |||
| 523 | * ixgbe_ioctl is called when the user wants to configure the | |||
| 524 | * interface. | |||
| 525 | * | |||
| 526 | * return 0 on success, positive on failure | |||
| 527 | **********************************************************************/ | |||
| 528 | ||||
| 529 | int | |||
| 530 | ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data) | |||
| 531 | { | |||
| 532 | struct ix_softc *sc = ifp->if_softc; | |||
| 533 | struct ifreq *ifr = (struct ifreq *) data; | |||
| 534 | int s, error = 0; | |||
| 535 | ||||
| 536 | s = splnet()splraise(0x4); | |||
| 537 | ||||
| 538 | switch (command) { | |||
| 539 | case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((12))): | |||
| 540 | IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)")if (0) printf("ioctl: SIOCxIFADDR (Get/Set Interface Addr)" "\n" ); | |||
| 541 | ifp->if_flags |= IFF_UP0x1; | |||
| 542 | if (!(ifp->if_flags & IFF_RUNNING0x40)) | |||
| 543 | ixgbe_init(sc); | |||
| 544 | break; | |||
| 545 | ||||
| 546 | case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))): | |||
| 547 | IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)")if (0) printf("ioctl: SIOCSIFFLAGS (Set Interface Flags)" "\n" ); | |||
| 548 | if (ifp->if_flags & IFF_UP0x1) { | |||
| 549 | if (ifp->if_flags & IFF_RUNNING0x40) | |||
| 550 | error = ENETRESET52; | |||
| 551 | else | |||
| 552 | ixgbe_init(sc); | |||
| 553 | } else { | |||
| 554 | if (ifp->if_flags & IFF_RUNNING0x40) | |||
| 555 | ixgbe_stop(sc); | |||
| 556 | } | |||
| 557 | break; | |||
| 558 | ||||
| 559 | case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((55))): | |||
| 560 | case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifmediareq) & 0x1fff) << 16) | ((('i')) << 8) | ((56))): | |||
| 561 | IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)")if (0) printf("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)" "\n"); | |||
| 562 | error = ifmedia_ioctl(ifp, ifr, &sc->media, command); | |||
| 563 | break; | |||
| 564 | ||||
| 565 | case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((170))): | |||
| 566 | error = ixgbe_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data); | |||
| 567 | break; | |||
| 568 | ||||
| 569 | case SIOCGIFSFFPAGE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct if_sffpage) & 0x1fff) << 16) | ((('i')) << 8) | ((57))): | |||
| 570 | error = rw_enter(&sc->sfflock, RW_WRITE0x0001UL|RW_INTR0x0010UL); | |||
| 571 | if (error != 0) | |||
| 572 | break; | |||
| 573 | ||||
| 574 | error = ixgbe_get_sffpage(sc, (struct if_sffpage *)data); | |||
| 575 | rw_exit(&sc->sfflock); | |||
| 576 | break; | |||
| 577 | ||||
| 578 | default: | |||
| 579 | error = ether_ioctl(ifp, &sc->arpcom, command, data); | |||
| 580 | } | |||
| 581 | ||||
| 582 | if (error == ENETRESET52) { | |||
| 583 | if (ifp->if_flags & IFF_RUNNING0x40) { | |||
| 584 | ixgbe_disable_intr(sc); | |||
| 585 | ixgbe_iff(sc); | |||
| 586 | ixgbe_enable_intr(sc); | |||
| 587 | ixgbe_enable_queues(sc); | |||
| 588 | } | |||
| 589 | error = 0; | |||
| 590 | } | |||
| 591 | ||||
| 592 | splx(s)spllower(s); | |||
| 593 | return (error); | |||
| 594 | } | |||
| 595 | ||||
| 596 | int | |||
| 597 | ixgbe_get_sffpage(struct ix_softc *sc, struct if_sffpage *sff) | |||
| 598 | { | |||
| 599 | struct ixgbe_hw *hw = &sc->hw; | |||
| 600 | uint32_t swfw_mask = hw->phy.phy_semaphore_mask; | |||
| 601 | uint8_t page; | |||
| 602 | size_t i; | |||
| 603 | int error = EIO5; | |||
| 604 | ||||
| 605 | if (hw->phy.type == ixgbe_phy_fw) | |||
| 606 | return (ENODEV19); | |||
| 607 | ||||
| 608 | if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) | |||
| 609 | return (EBUSY16); /* XXX */ | |||
| 610 | ||||
| 611 | if (sff->sff_addr == IFSFF_ADDR_EEPROM0xa0) { | |||
| 612 | if (hw->phy.ops.read_i2c_byte_unlocked(hw, 127, | |||
| 613 | IFSFF_ADDR_EEPROM0xa0, &page)) | |||
| 614 | goto error; | |||
| 615 | if (page != sff->sff_page && | |||
| 616 | hw->phy.ops.write_i2c_byte_unlocked(hw, 127, | |||
| 617 | IFSFF_ADDR_EEPROM0xa0, sff->sff_page)) | |||
| 618 | goto error; | |||
| 619 | } | |||
| 620 | ||||
| 621 | for (i = 0; i < sizeof(sff->sff_data); i++) { | |||
| 622 | if (hw->phy.ops.read_i2c_byte_unlocked(hw, i, | |||
| 623 | sff->sff_addr, &sff->sff_data[i])) | |||
| 624 | goto error; | |||
| 625 | } | |||
| 626 | ||||
| 627 | if (sff->sff_addr == IFSFF_ADDR_EEPROM0xa0) { | |||
| 628 | if (page != sff->sff_page && | |||
| 629 | hw->phy.ops.write_i2c_byte_unlocked(hw, 127, | |||
| 630 | IFSFF_ADDR_EEPROM0xa0, page)) | |||
| 631 | goto error; | |||
| 632 | } | |||
| 633 | ||||
| 634 | error = 0; | |||
| 635 | error: | |||
| 636 | hw->mac.ops.release_swfw_sync(hw, swfw_mask); | |||
| 637 | return (error); | |||
| 638 | } | |||
| 639 | ||||
| 640 | int | |||
| 641 | ixgbe_rxrinfo(struct ix_softc *sc, struct if_rxrinfo *ifri) | |||
| 642 | { | |||
| 643 | struct if_rxring_info *ifr, ifr1; | |||
| 644 | struct rx_ring *rxr; | |||
| 645 | int error, i; | |||
| 646 | u_int n = 0; | |||
| 647 | ||||
| 648 | if (sc->num_queues > 1) { | |||
| 649 | ifr = mallocarray(sc->num_queues, sizeof(*ifr), M_DEVBUF2, | |||
| 650 | M_WAITOK0x0001 | M_ZERO0x0008); | |||
| 651 | } else | |||
| 652 | ifr = &ifr1; | |||
| 653 | ||||
| 654 | for (i = 0; i < sc->num_queues; i++) { | |||
| 655 | rxr = &sc->rx_rings[i]; | |||
| 656 | ifr[n].ifr_size = MCLBYTES(1 << 11); | |||
| 657 | snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "%d", i); | |||
| 658 | ifr[n].ifr_info = rxr->rx_ring; | |||
| 659 | n++; | |||
| 660 | } | |||
| 661 | ||||
| 662 | error = if_rxr_info_ioctl(ifri, sc->num_queues, ifr); | |||
| 663 | ||||
| 664 | if (sc->num_queues > 1) | |||
| 665 | free(ifr, M_DEVBUF2, sc->num_queues * sizeof(*ifr)); | |||
| 666 | return (error); | |||
| 667 | } | |||
| 668 | ||||
| 669 | /********************************************************************* | |||
| 670 | * Watchdog entry point | |||
| 671 | * | |||
| 672 | **********************************************************************/ | |||
| 673 | ||||
| 674 | void | |||
| 675 | ixgbe_watchdog(struct ifnet * ifp) | |||
| 676 | { | |||
| 677 | struct ix_softc *sc = (struct ix_softc *)ifp->if_softc; | |||
| 678 | struct tx_ring *txr = sc->tx_rings; | |||
| 679 | struct ixgbe_hw *hw = &sc->hw; | |||
| 680 | int tx_hang = FALSE0; | |||
| 681 | int i; | |||
| 682 | ||||
| 683 | /* | |||
| 684 | * The timer is set to 5 every time ixgbe_start() queues a packet. | |||
| 685 | * Anytime all descriptors are clean the timer is set to 0. | |||
| 686 | */ | |||
| 687 | for (i = 0; i < sc->num_queues; i++, txr++) { | |||
| 688 | if (txr->watchdog_timer == 0 || --txr->watchdog_timer) | |||
| 689 | continue; | |||
| 690 | else { | |||
| 691 | tx_hang = TRUE1; | |||
| 692 | break; | |||
| 693 | } | |||
| 694 | } | |||
| 695 | if (tx_hang == FALSE0) | |||
| 696 | return; | |||
| 697 | ||||
| 698 | /* | |||
| 699 | * If we are in this routine because of pause frames, then don't | |||
| 700 | * reset the hardware. | |||
| 701 | */ | |||
| 702 | if (!(IXGBE_READ_REG(hw, IXGBE_TFCS)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x0CE00 ))) & IXGBE_TFCS_TXON0x00000001)) { | |||
| 703 | for (i = 0; i < sc->num_queues; i++, txr++) | |||
| 704 | txr->watchdog_timer = IXGBE_TX_TIMEOUT5; | |||
| 705 | ifp->if_timer = IXGBE_TX_TIMEOUT5; | |||
| 706 | return; | |||
| 707 | } | |||
| 708 | ||||
| 709 | ||||
| 710 | printf("%s: Watchdog timeout -- resetting\n", ifp->if_xname); | |||
| 711 | for (i = 0; i < sc->num_queues; i++, txr++) { | |||
| 712 | printf("%s: Queue(%d) tdh = %d, hw tdt = %d\n", ifp->if_xname, i, | |||
| 713 | IXGBE_READ_REG(hw, IXGBE_TDH(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06010 + ((i) * 0x40))))), | |||
| 714 | IXGBE_READ_REG(hw, IXGBE_TDT(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06018 + ((i) * 0x40)))))); | |||
| 715 | printf("%s: TX(%d) Next TX to Clean = %d\n", ifp->if_xname, | |||
| 716 | i, txr->next_to_clean); | |||
| 717 | } | |||
| 718 | ifp->if_flags &= ~IFF_RUNNING0x40; | |||
| 719 | ||||
| 720 | ixgbe_init(sc); | |||
| 721 | } | |||
| 722 | ||||
| 723 | /********************************************************************* | |||
| 724 | * Init entry point | |||
| 725 | * | |||
| 726 | * This routine is used in two ways. It is used by the stack as | |||
| 727 | * init entry point in network interface structure. It is also used | |||
| 728 | * by the driver as a hw/sw initialization routine to get to a | |||
| 729 | * consistent state. | |||
| 730 | * | |||
| 731 | * return 0 on success, positive on failure | |||
| 732 | **********************************************************************/ | |||
| 733 | #define IXGBE_MHADD_MFS_SHIFT16 16 | |||
| 734 | ||||
| 735 | void | |||
| 736 | ixgbe_init(void *arg) | |||
| 737 | { | |||
| 738 | struct ix_softc *sc = (struct ix_softc *)arg; | |||
| 739 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 740 | struct rx_ring *rxr = sc->rx_rings; | |||
| 741 | uint32_t k, txdctl, rxdctl, rxctrl, mhadd, itr; | |||
| 742 | int i, s, err; | |||
| 743 | ||||
| 744 | INIT_DEBUGOUT("ixgbe_init: begin")if (0) printf("ixgbe_init: begin" "\n"); | |||
| 745 | ||||
| 746 | s = splnet()splraise(0x4); | |||
| 747 | ||||
| 748 | ixgbe_stop(sc); | |||
| 749 | ||||
| 750 | /* reprogram the RAR[0] in case user changed it. */ | |||
| 751 | ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV0x80000000); | |||
| 752 | ||||
| 753 | /* Get the latest mac address, User can use a LAA */ | |||
| 754 | bcopy(sc->arpcom.ac_enaddr, sc->hw.mac.addr, | |||
| 755 | IXGBE_ETH_LENGTH_OF_ADDRESS6); | |||
| 756 | ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, 1); | |||
| 757 | sc->hw.addr_ctrl.rar_used_count = 1; | |||
| 758 | ||||
| 759 | /* Prepare transmit descriptors and buffers */ | |||
| 760 | if (ixgbe_setup_transmit_structures(sc)) { | |||
| 761 | printf("%s: Could not setup transmit structures\n", | |||
| 762 | ifp->if_xname); | |||
| 763 | ixgbe_stop(sc); | |||
| 764 | splx(s)spllower(s); | |||
| 765 | return; | |||
| 766 | } | |||
| 767 | ||||
| 768 | ixgbe_init_hw(&sc->hw); | |||
| 769 | ixgbe_initialize_transmit_units(sc); | |||
| 770 | ||||
| 771 | /* Use 2k clusters, even for jumbo frames */ | |||
| 772 | sc->rx_mbuf_sz = MCLBYTES(1 << 11) + ETHER_ALIGN2; | |||
| 773 | ||||
| 774 | /* Prepare receive descriptors and buffers */ | |||
| 775 | if (ixgbe_setup_receive_structures(sc)) { | |||
| 776 | printf("%s: Could not setup receive structures\n", | |||
| 777 | ifp->if_xname); | |||
| 778 | ixgbe_stop(sc); | |||
| 779 | splx(s)spllower(s); | |||
| 780 | return; | |||
| 781 | } | |||
| 782 | ||||
| 783 | /* Configure RX settings */ | |||
| 784 | ixgbe_initialize_receive_units(sc); | |||
| 785 | ||||
| 786 | /* Enable SDP & MSIX interrupts based on adapter */ | |||
| 787 | ixgbe_config_gpie(sc); | |||
| 788 | ||||
| 789 | /* Program promiscuous mode and multicast filters. */ | |||
| 790 | ixgbe_iff(sc); | |||
| 791 | ||||
| 792 | /* Set MRU size */ | |||
| 793 | mhadd = IXGBE_READ_REG(&sc->hw, IXGBE_MHADD)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->read_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x04268))); | |||
| 794 | mhadd &= ~IXGBE_MHADD_MFS_MASK0xFFFF0000; | |||
| 795 | mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT16; | |||
| 796 | IXGBE_WRITE_REG(&sc->hw, IXGBE_MHADD, mhadd)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x04268), (mhadd))); | |||
| 797 | ||||
| 798 | /* Now enable all the queues */ | |||
| 799 | for (i = 0; i < sc->num_queues; i++) { | |||
| 800 | txdctl = IXGBE_READ_REG(&sc->hw, IXGBE_TXDCTL(i))((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->read_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((0x06028 + ((i) * 0x40))))); | |||
| 801 | txdctl |= IXGBE_TXDCTL_ENABLE0x02000000; | |||
| 802 | /* Set WTHRESH to 8, burst writeback */ | |||
| 803 | txdctl |= (8 << 16); | |||
| 804 | /* | |||
| 805 | * When the internal queue falls below PTHRESH (16), | |||
| 806 | * start prefetching as long as there are at least | |||
| 807 | * HTHRESH (1) buffers ready. | |||
| 808 | */ | |||
| 809 | txdctl |= (16 << 0) | (1 << 8); | |||
| 810 | IXGBE_WRITE_REG(&sc->hw, IXGBE_TXDCTL(i), txdctl)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((0x06028 + ((i) * 0x40))), (txdctl))); | |||
| 811 | } | |||
| 812 | ||||
| 813 | for (i = 0; i < sc->num_queues; i++) { | |||
| 814 | rxdctl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i))((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->read_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((((i) < 64) ? (0x01028 + ((i) * 0x40)) : ( 0x0D028 + (((i) - 64) * 0x40)))))); | |||
| 815 | if (sc->hw.mac.type == ixgbe_mac_82598EB) { | |||
| 816 | /* | |||
| 817 | * PTHRESH = 21 | |||
| 818 | * HTHRESH = 4 | |||
| 819 | * WTHRESH = 8 | |||
| 820 | */ | |||
| 821 | rxdctl &= ~0x3FFFFF; | |||
| 822 | rxdctl |= 0x080420; | |||
| 823 | } | |||
| 824 | rxdctl |= IXGBE_RXDCTL_ENABLE0x02000000; | |||
| 825 | IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), rxdctl)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((((i) < 64) ? (0x01028 + ((i) * 0x40)) : ( 0x0D028 + (((i) - 64) * 0x40)))), (rxdctl))); | |||
| 826 | for (k = 0; k < 10; k++) { | |||
| 827 | if (IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i))((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->read_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((((i) < 64) ? (0x01028 + ((i) * 0x40)) : ( 0x0D028 + (((i) - 64) * 0x40)))))) & | |||
| 828 | IXGBE_RXDCTL_ENABLE0x02000000) | |||
| 829 | break; | |||
| 830 | else | |||
| 831 | msec_delay(1)(*delay_func)(1000 * (1)); | |||
| 832 | } | |||
| 833 | IXGBE_WRITE_FLUSH(&sc->hw)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->read_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x00008))); | |||
| 834 | IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i), rxr->last_desc_filled)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((((i) < 64) ? (0x01018 + ((i) * 0x40)) : ( 0x0D018 + (((i) - 64) * 0x40)))), (rxr->last_desc_filled)) ); | |||
| 835 | } | |||
| 836 | ||||
| 837 | /* Set up VLAN support and filter */ | |||
| 838 | ixgbe_setup_vlan_hw_support(sc); | |||
| 839 | ||||
| 840 | /* Enable Receive engine */ | |||
| 841 | rxctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXCTRL)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->read_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x03000))); | |||
| 842 | if (sc->hw.mac.type == ixgbe_mac_82598EB) | |||
| 843 | rxctrl |= IXGBE_RXCTRL_DMBYPS0x00000002; | |||
| 844 | rxctrl |= IXGBE_RXCTRL_RXEN0x00000001; | |||
| 845 | sc->hw.mac.ops.enable_rx_dma(&sc->hw, rxctrl); | |||
| 846 | ||||
| 847 | /* Set up MSI/X routing */ | |||
| 848 | if (sc->sc_intrmap) { | |||
| 849 | ixgbe_configure_ivars(sc); | |||
| 850 | /* Set up auto-mask */ | |||
| 851 | if (sc->hw.mac.type == ixgbe_mac_82598EB) | |||
| 852 | IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x00890), (0x0000FFFF))); | |||
| 853 | else { | |||
| 854 | IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((0x00AD0 + (0) * 4)), (0xFFFFFFFF))); | |||
| 855 | IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((0x00AD0 + (1) * 4)), (0xFFFFFFFF))); | |||
| 856 | } | |||
| 857 | } else { /* Simple settings for Legacy/MSI */ | |||
| 858 | ixgbe_set_ivar(sc, 0, 0, 0); | |||
| 859 | ixgbe_set_ivar(sc, 0, 0, 1); | |||
| 860 | IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x00890), (0x0000FFFF))); | |||
| 861 | } | |||
| 862 | ||||
| 863 | /* Check on any SFP devices that need to be kick-started */ | |||
| 864 | if (sc->hw.phy.type == ixgbe_phy_none) { | |||
| 865 | err = sc->hw.phy.ops.identify(&sc->hw); | |||
| 866 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED-19) { | |||
| 867 | printf("Unsupported SFP+ module type was detected.\n"); | |||
| 868 | splx(s)spllower(s); | |||
| 869 | return; | |||
| 870 | } | |||
| 871 | } | |||
| 872 | ||||
| 873 | /* Setup interrupt moderation */ | |||
| 874 | itr = (4000000 / IXGBE_INTS_PER_SEC8000) & 0xff8; | |||
| 875 | if (sc->hw.mac.type != ixgbe_mac_82598EB) | |||
| 876 | itr |= IXGBE_EITR_LLI_MOD0x00008000 | IXGBE_EITR_CNT_WDIS0x80000000; | |||
| 877 | IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(0), itr)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((((0) <= 23) ? (0x00820 + ((0) * 4)) : (0x012300 + (((0) - 24) * 4)))), (itr))); | |||
| 878 | ||||
| 879 | if (sc->sc_intrmap) { | |||
| 880 | /* Set moderation on the Link interrupt */ | |||
| 881 | IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(sc->linkvec),((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((((sc->linkvec) <= 23) ? (0x00820 + (( sc->linkvec) * 4)) : (0x012300 + (((sc->linkvec) - 24) * 4)))), (1000))) | |||
| 882 | IXGBE_LINK_ITR)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((((sc->linkvec) <= 23) ? (0x00820 + (( sc->linkvec) * 4)) : (0x012300 + (((sc->linkvec) - 24) * 4)))), (1000))); | |||
| 883 | } | |||
| 884 | ||||
| 885 | /* Enable power to the phy */ | |||
| 886 | if (sc->hw.phy.ops.set_phy_power) | |||
| 887 | sc->hw.phy.ops.set_phy_power(&sc->hw, TRUE1); | |||
| 888 | ||||
| 889 | /* Config/Enable Link */ | |||
| 890 | ixgbe_config_link(sc); | |||
| 891 | ||||
| 892 | /* Hardware Packet Buffer & Flow Control setup */ | |||
| 893 | ixgbe_config_delay_values(sc); | |||
| 894 | ||||
| 895 | /* Initialize the FC settings */ | |||
| 896 | sc->hw.mac.ops.start_hw(&sc->hw); | |||
| 897 | ||||
| 898 | /* And now turn on interrupts */ | |||
| 899 | ixgbe_enable_intr(sc); | |||
| 900 | ixgbe_enable_queues(sc); | |||
| 901 | ||||
| 902 | /* Now inform the stack we're ready */ | |||
| 903 | ifp->if_flags |= IFF_RUNNING0x40; | |||
| 904 | for (i = 0; i < sc->num_queues; i++) | |||
| 905 | ifq_clr_oactive(ifp->if_ifqs[i]); | |||
| 906 | ||||
| 907 | #if NKSTAT1 > 0 | |||
| 908 | ix_kstats_tick(sc); | |||
| 909 | #endif | |||
| 910 | ||||
| 911 | splx(s)spllower(s); | |||
| 912 | } | |||
| 913 | ||||
| 914 | void | |||
| 915 | ixgbe_config_gpie(struct ix_softc *sc) | |||
| 916 | { | |||
| 917 | struct ixgbe_hw *hw = &sc->hw; | |||
| 918 | uint32_t gpie; | |||
| 919 | ||||
| 920 | gpie = IXGBE_READ_REG(&sc->hw, IXGBE_GPIE)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->read_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x00898))); | |||
| 921 | ||||
| 922 | /* Fan Failure Interrupt */ | |||
| 923 | if (hw->device_id == IXGBE_DEV_ID_82598AT0x10C8) | |||
| 924 | gpie |= IXGBE_SDP1_GPIEN0x00000002; | |||
| 925 | ||||
| 926 | if (sc->hw.mac.type == ixgbe_mac_82599EB) { | |||
| 927 | /* Add for Module detection */ | |||
| 928 | gpie |= IXGBE_SDP2_GPIEN0x00000004; | |||
| 929 | ||||
| 930 | /* Media ready */ | |||
| 931 | if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP0x1558) | |||
| 932 | gpie |= IXGBE_SDP1_GPIEN0x00000002; | |||
| 933 | ||||
| 934 | /* | |||
| 935 | * Set LL interval to max to reduce the number of low latency | |||
| 936 | * interrupts hitting the card when the ring is getting full. | |||
| 937 | */ | |||
| 938 | gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT7; | |||
| 939 | } | |||
| 940 | ||||
| 941 | if (sc->hw.mac.type == ixgbe_mac_X540 || | |||
| 942 | sc->hw.mac.type == ixgbe_mac_X550EM_x || | |||
| 943 | sc->hw.mac.type == ixgbe_mac_X550EM_a) { | |||
| 944 | /* | |||
| 945 | * Thermal Failure Detection (X540) | |||
| 946 | * Link Detection (X552 SFP+, X552/X557-AT) | |||
| 947 | */ | |||
| 948 | gpie |= IXGBE_SDP0_GPIEN_X5400x00000002; | |||
| 949 | ||||
| 950 | /* | |||
| 951 | * Set LL interval to max to reduce the number of low latency | |||
| 952 | * interrupts hitting the card when the ring is getting full. | |||
| 953 | */ | |||
| 954 | gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT7; | |||
| 955 | } | |||
| 956 | ||||
| 957 | if (sc->sc_intrmap) { | |||
| 958 | /* Enable Enhanced MSIX mode */ | |||
| 959 | gpie |= IXGBE_GPIE_MSIX_MODE0x00000010; | |||
| 960 | gpie |= IXGBE_GPIE_EIAME0x40000000 | IXGBE_GPIE_PBA_SUPPORT0x80000000 | | |||
| 961 | IXGBE_GPIE_OCD0x00000020; | |||
| 962 | } | |||
| 963 | ||||
| 964 | IXGBE_WRITE_REG(&sc->hw, IXGBE_GPIE, gpie)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x00898), (gpie))); | |||
| 965 | } | |||
| 966 | ||||
| 967 | /* | |||
| 968 | * Requires sc->max_frame_size to be set. | |||
| 969 | */ | |||
| 970 | void | |||
| 971 | ixgbe_config_delay_values(struct ix_softc *sc) | |||
| 972 | { | |||
| 973 | struct ixgbe_hw *hw = &sc->hw; | |||
| 974 | uint32_t rxpb, frame, size, tmp; | |||
| 975 | ||||
| 976 | frame = sc->max_frame_size; | |||
| 977 | ||||
| 978 | /* Calculate High Water */ | |||
| 979 | switch (hw->mac.type) { | |||
| 980 | case ixgbe_mac_X540: | |||
| 981 | case ixgbe_mac_X550: | |||
| 982 | case ixgbe_mac_X550EM_x: | |||
| 983 | case ixgbe_mac_X550EM_a: | |||
| 984 | tmp = IXGBE_DV_X540(frame, frame)((36 * ((frame * 8) + 672 + (2 * 5556) + (2 * (8192 + (2 * 2048 ) + 25600)) + 6144) / 25 + 1) + 2 * (frame * 8)); | |||
| 985 | break; | |||
| 986 | default: | |||
| 987 | tmp = IXGBE_DV(frame, frame)((36 * ((frame * 8) + 672 + (2 * 5556) + (2 * (4096 + (2 * 1024 ) + 12800)) + 6144) / 25 + 1) + 2 * (frame * 8)); | |||
| 988 | break; | |||
| 989 | } | |||
| 990 | size = IXGBE_BT2KB(tmp)((tmp + (8 * 1024 - 1)) / (8 * 1024)); | |||
| 991 | rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x03C00 + ((0) * 4))))) >> 10; | |||
| 992 | hw->fc.high_water[0] = rxpb - size; | |||
| 993 | ||||
| 994 | /* Now calculate Low Water */ | |||
| 995 | switch (hw->mac.type) { | |||
| 996 | case ixgbe_mac_X540: | |||
| 997 | case ixgbe_mac_X550: | |||
| 998 | case ixgbe_mac_X550EM_x: | |||
| 999 | case ixgbe_mac_X550EM_a: | |||
| 1000 | tmp = IXGBE_LOW_DV_X540(frame)(2 * (frame * 8) + (36 * 10000 / 25) + 1); | |||
| 1001 | break; | |||
| 1002 | default: | |||
| 1003 | tmp = IXGBE_LOW_DV(frame)(2 * (2 * (frame * 8) + (36 * 10000 / 25) + 1)); | |||
| 1004 | break; | |||
| 1005 | } | |||
| 1006 | hw->fc.low_water[0] = IXGBE_BT2KB(tmp)((tmp + (8 * 1024 - 1)) / (8 * 1024)); | |||
| 1007 | ||||
| 1008 | hw->fc.requested_mode = sc->fc; | |||
| 1009 | hw->fc.pause_time = IXGBE_FC_PAUSE0xFFFF; | |||
| 1010 | hw->fc.send_xon = TRUE1; | |||
| 1011 | } | |||
| 1012 | ||||
| 1013 | /* | |||
| 1014 | * MSIX Interrupt Handlers | |||
| 1015 | */ | |||
| 1016 | void | |||
| 1017 | ixgbe_enable_queue(struct ix_softc *sc, uint32_t vector) | |||
| 1018 | { | |||
| 1019 | uint64_t queue = 1ULL << vector; | |||
| 1020 | uint32_t mask; | |||
| 1021 | ||||
| 1022 | if (sc->hw.mac.type == ixgbe_mac_82598EB) { | |||
| 1023 | mask = (IXGBE_EIMS_RTX_QUEUE0x0000FFFF & queue); | |||
| 1024 | IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS, mask)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x00880), (mask))); | |||
| 1025 | } else { | |||
| 1026 | mask = (queue & 0xFFFFFFFF); | |||
| 1027 | if (mask) | |||
| 1028 | IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(0), mask)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((0x00AA0 + (0) * 4)), (mask))); | |||
| 1029 | mask = (queue >> 32); | |||
| 1030 | if (mask) | |||
| 1031 | IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(1), mask)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((0x00AA0 + (1) * 4)), (mask))); | |||
| 1032 | } | |||
| 1033 | } | |||
| 1034 | ||||
| 1035 | void | |||
| 1036 | ixgbe_enable_queues(struct ix_softc *sc) | |||
| 1037 | { | |||
| 1038 | struct ix_queue *que; | |||
| 1039 | int i; | |||
| 1040 | ||||
| 1041 | for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++) | |||
| 1042 | ixgbe_enable_queue(sc, que->msix); | |||
| 1043 | } | |||
| 1044 | ||||
| 1045 | void | |||
| 1046 | ixgbe_disable_queue(struct ix_softc *sc, uint32_t vector) | |||
| 1047 | { | |||
| 1048 | uint64_t queue = 1ULL << vector; | |||
| 1049 | uint32_t mask; | |||
| 1050 | ||||
| 1051 | if (sc->hw.mac.type == ixgbe_mac_82598EB) { | |||
| 1052 | mask = (IXGBE_EIMS_RTX_QUEUE0x0000FFFF & queue); | |||
| 1053 | IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, mask)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x00888), (mask))); | |||
| 1054 | } else { | |||
| 1055 | mask = (queue & 0xFFFFFFFF); | |||
| 1056 | if (mask) | |||
| 1057 | IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), mask)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((0x00AB0 + (0) * 4)), (mask))); | |||
| 1058 | mask = (queue >> 32); | |||
| 1059 | if (mask) | |||
| 1060 | IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), mask)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((0x00AB0 + (1) * 4)), (mask))); | |||
| 1061 | } | |||
| 1062 | } | |||
| 1063 | ||||
| 1064 | /* | |||
| 1065 | * MSIX Interrupt Handlers | |||
| 1066 | */ | |||
| 1067 | int | |||
| 1068 | ixgbe_link_intr(void *vsc) | |||
| 1069 | { | |||
| 1070 | struct ix_softc *sc = (struct ix_softc *)vsc; | |||
| 1071 | ||||
| 1072 | return ixgbe_intr(sc); | |||
| 1073 | } | |||
| 1074 | ||||
| 1075 | int | |||
| 1076 | ixgbe_queue_intr(void *vque) | |||
| 1077 | { | |||
| 1078 | struct ix_queue *que = vque; | |||
| 1079 | struct ix_softc *sc = que->sc; | |||
| 1080 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 1081 | struct rx_ring *rxr = que->rxr; | |||
| 1082 | struct tx_ring *txr = que->txr; | |||
| 1083 | ||||
| 1084 | if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) { | |||
| 1085 | ixgbe_rxeof(rxr); | |||
| 1086 | ixgbe_txeof(txr); | |||
| 1087 | ixgbe_rxrefill(rxr); | |||
| 1088 | } | |||
| 1089 | ||||
| 1090 | ixgbe_enable_queue(sc, que->msix); | |||
| 1091 | ||||
| 1092 | return (1); | |||
| 1093 | } | |||
| 1094 | ||||
| 1095 | /********************************************************************* | |||
| 1096 | * | |||
| 1097 | * Legacy Interrupt Service routine | |||
| 1098 | * | |||
| 1099 | **********************************************************************/ | |||
| 1100 | ||||
| 1101 | int | |||
| 1102 | ixgbe_legacy_intr(void *arg) | |||
| 1103 | { | |||
| 1104 | struct ix_softc *sc = (struct ix_softc *)arg; | |||
| 1105 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 1106 | struct rx_ring *rxr = sc->rx_rings; | |||
| 1107 | struct tx_ring *txr = sc->tx_rings; | |||
| 1108 | int rv; | |||
| 1109 | ||||
| 1110 | rv = ixgbe_intr(sc); | |||
| 1111 | if (rv == 0) { | |||
| 1112 | return (0); | |||
| 1113 | } | |||
| 1114 | ||||
| 1115 | if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) { | |||
| 1116 | ixgbe_rxeof(rxr); | |||
| 1117 | ixgbe_txeof(txr); | |||
| 1118 | ixgbe_rxrefill(rxr); | |||
| 1119 | } | |||
| 1120 | ||||
| 1121 | ixgbe_enable_queues(sc); | |||
| 1122 | return (rv); | |||
| 1123 | } | |||
| 1124 | ||||
| 1125 | int | |||
| 1126 | ixgbe_intr(struct ix_softc *sc) | |||
| 1127 | { | |||
| 1128 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 1129 | struct ixgbe_hw *hw = &sc->hw; | |||
| 1130 | uint32_t reg_eicr, mod_mask, msf_mask; | |||
| 1131 | ||||
| 1132 | if (sc->sc_intrmap) { | |||
| 1133 | /* Pause other interrupts */ | |||
| 1134 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00888 ), (0x80000000))); | |||
| 1135 | /* First get the cause */ | |||
| 1136 | reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00808 ))); | |||
| 1137 | /* Be sure the queue bits are not cleared */ | |||
| 1138 | reg_eicr &= ~IXGBE_EICR_RTX_QUEUE0x0000FFFF; | |||
| 1139 | /* Clear interrupt with write */ | |||
| 1140 | IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00800 ), (reg_eicr))); | |||
| 1141 | } else { | |||
| 1142 | reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00800 ))); | |||
| 1143 | if (reg_eicr == 0) { | |||
| 1144 | ixgbe_enable_intr(sc); | |||
| 1145 | ixgbe_enable_queues(sc); | |||
| 1146 | return (0); | |||
| 1147 | } | |||
| 1148 | } | |||
| 1149 | ||||
| 1150 | /* Link status change */ | |||
| 1151 | if (reg_eicr & IXGBE_EICR_LSC0x00100000) { | |||
| 1152 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00888 ), (0x00100000))); | |||
| 1153 | KERNEL_LOCK()_kernel_lock(); | |||
| 1154 | ixgbe_update_link_status(sc); | |||
| 1155 | KERNEL_UNLOCK()_kernel_unlock(); | |||
| 1156 | } | |||
| 1157 | ||||
| 1158 | if (hw->mac.type != ixgbe_mac_82598EB) { | |||
| 1159 | if (reg_eicr & IXGBE_EICR_ECC0x10000000) { | |||
| 1160 | printf("%s: CRITICAL: ECC ERROR!! " | |||
| 1161 | "Please Reboot!!\n", sc->dev.dv_xname); | |||
| 1162 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00800 ), (0x10000000))); | |||
| 1163 | } | |||
| 1164 | /* Check for over temp condition */ | |||
| 1165 | if (reg_eicr & IXGBE_EICR_TS0x00800000) { | |||
| 1166 | printf("%s: CRITICAL: OVER TEMP!! " | |||
| 1167 | "PHY IS SHUT DOWN!!\n", ifp->if_xname); | |||
| 1168 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00800 ), (0x00800000))); | |||
| 1169 | } | |||
| 1170 | } | |||
| 1171 | ||||
| 1172 | /* Pluggable optics-related interrupt */ | |||
| 1173 | if (ixgbe_is_sfp(hw)) { | |||
| 1174 | if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP0x15AC) { | |||
| 1175 | mod_mask = IXGBE_EICR_GPI_SDP0_X5400x02000000; | |||
| 1176 | msf_mask = IXGBE_EICR_GPI_SDP1_X5400x04000000; | |||
| 1177 | } else if (hw->mac.type == ixgbe_mac_X540 || | |||
| 1178 | hw->mac.type == ixgbe_mac_X550 || | |||
| 1179 | hw->mac.type == ixgbe_mac_X550EM_x) { | |||
| 1180 | mod_mask = IXGBE_EICR_GPI_SDP2_X5400x08000000; | |||
| 1181 | msf_mask = IXGBE_EICR_GPI_SDP1_X5400x04000000; | |||
| 1182 | } else { | |||
| 1183 | mod_mask = IXGBE_EICR_GPI_SDP20x04000000; | |||
| 1184 | msf_mask = IXGBE_EICR_GPI_SDP10x02000000; | |||
| 1185 | } | |||
| 1186 | if (reg_eicr & mod_mask) { | |||
| 1187 | /* Clear the interrupt */ | |||
| 1188 | IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00800 ), (mod_mask))); | |||
| 1189 | KERNEL_LOCK()_kernel_lock(); | |||
| 1190 | ixgbe_handle_mod(sc); | |||
| 1191 | KERNEL_UNLOCK()_kernel_unlock(); | |||
| 1192 | } else if ((hw->phy.media_type != ixgbe_media_type_copper) && | |||
| 1193 | (reg_eicr & msf_mask)) { | |||
| 1194 | /* Clear the interrupt */ | |||
| 1195 | IXGBE_WRITE_REG(hw, IXGBE_EICR, msf_mask)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00800 ), (msf_mask))); | |||
| 1196 | KERNEL_LOCK()_kernel_lock(); | |||
| 1197 | ixgbe_handle_msf(sc); | |||
| 1198 | KERNEL_UNLOCK()_kernel_unlock(); | |||
| 1199 | } | |||
| 1200 | } | |||
| 1201 | ||||
| 1202 | /* Check for fan failure */ | |||
| 1203 | if ((hw->device_id == IXGBE_DEV_ID_82598AT0x10C8) && | |||
| 1204 | (reg_eicr & IXGBE_EICR_GPI_SDP10x02000000)) { | |||
| 1205 | printf("%s: CRITICAL: FAN FAILURE!! " | |||
| 1206 | "REPLACE IMMEDIATELY!!\n", ifp->if_xname); | |||
| 1207 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00800 ), (0x02000000))); | |||
| 1208 | } | |||
| 1209 | ||||
| 1210 | /* External PHY interrupt */ | |||
| 1211 | if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T0x15AD && | |||
| 1212 | (reg_eicr & IXGBE_EICR_GPI_SDP0_X5400x02000000)) { | |||
| 1213 | /* Clear the interrupt */ | |||
| 1214 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00800 ), (0x02000000))); | |||
| 1215 | KERNEL_LOCK()_kernel_lock(); | |||
| 1216 | ixgbe_handle_phy(sc); | |||
| 1217 | KERNEL_UNLOCK()_kernel_unlock(); | |||
| 1218 | } | |||
| 1219 | ||||
| 1220 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00880 ), (0x80000000 | 0x00100000))); | |||
| 1221 | ||||
| 1222 | return (1); | |||
| 1223 | } | |||
| 1224 | ||||
| 1225 | /********************************************************************* | |||
| 1226 | * | |||
| 1227 | * Media Ioctl callback | |||
| 1228 | * | |||
| 1229 | * This routine is called whenever the user queries the status of | |||
| 1230 | * the interface using ifconfig. | |||
| 1231 | * | |||
| 1232 | **********************************************************************/ | |||
| 1233 | void | |||
| 1234 | ixgbe_media_status(struct ifnet * ifp, struct ifmediareq *ifmr) | |||
| 1235 | { | |||
| 1236 | struct ix_softc *sc = ifp->if_softc; | |||
| 1237 | uint64_t layer; | |||
| 1238 | ||||
| 1239 | ifmr->ifm_active = IFM_ETHER0x0000000000000100ULL; | |||
| 1240 | ifmr->ifm_status = IFM_AVALID0x0000000000000001ULL; | |||
| 1241 | ||||
| 1242 | INIT_DEBUGOUT("ixgbe_media_status: begin")if (0) printf("ixgbe_media_status: begin" "\n"); | |||
| 1243 | ixgbe_update_link_status(sc); | |||
| 1244 | ||||
| 1245 | if (!LINK_STATE_IS_UP(ifp->if_link_state)((ifp->if_data.ifi_link_state) >= 4 || (ifp->if_data .ifi_link_state) == 0)) | |||
| 1246 | return; | |||
| 1247 | ||||
| 1248 | ifmr->ifm_status |= IFM_ACTIVE0x0000000000000002ULL; | |||
| 1249 | layer = sc->phy_layer; | |||
| 1250 | ||||
| 1251 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T0x00001 || | |||
| 1252 | layer & IXGBE_PHYSICAL_LAYER_1000BASE_T0x00002 || | |||
| 1253 | layer & IXGBE_PHYSICAL_LAYER_100BASE_TX0x00004 || | |||
| 1254 | layer & IXGBE_PHYSICAL_LAYER_10BASE_T0x08000) { | |||
| 1255 | switch (sc->link_speed) { | |||
| 1256 | case IXGBE_LINK_SPEED_10GB_FULL0x0080: | |||
| 1257 | ifmr->ifm_active |= IFM_10G_T22 | IFM_FDX0x0000010000000000ULL; | |||
| 1258 | break; | |||
| 1259 | case IXGBE_LINK_SPEED_1GB_FULL0x0020: | |||
| 1260 | ifmr->ifm_active |= IFM_1000_T16 | IFM_FDX0x0000010000000000ULL; | |||
| 1261 | break; | |||
| 1262 | case IXGBE_LINK_SPEED_100_FULL0x0008: | |||
| 1263 | ifmr->ifm_active |= IFM_100_TX6 | IFM_FDX0x0000010000000000ULL; | |||
| 1264 | break; | |||
| 1265 | case IXGBE_LINK_SPEED_10_FULL0x0002: | |||
| 1266 | ifmr->ifm_active |= IFM_10_T3 | IFM_FDX0x0000010000000000ULL; | |||
| 1267 | break; | |||
| 1268 | } | |||
| 1269 | } | |||
| 1270 | if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU0x00008 || | |||
| 1271 | layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA0x02000) { | |||
| 1272 | switch (sc->link_speed) { | |||
| 1273 | case IXGBE_LINK_SPEED_10GB_FULL0x0080: | |||
| 1274 | ifmr->ifm_active |= IFM_10G_SFP_CU23 | IFM_FDX0x0000010000000000ULL; | |||
| 1275 | break; | |||
| 1276 | } | |||
| 1277 | } | |||
| 1278 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR0x00010) { | |||
| 1279 | switch (sc->link_speed) { | |||
| 1280 | case IXGBE_LINK_SPEED_10GB_FULL0x0080: | |||
| 1281 | ifmr->ifm_active |= IFM_10G_LR18 | IFM_FDX0x0000010000000000ULL; | |||
| 1282 | break; | |||
| 1283 | case IXGBE_LINK_SPEED_1GB_FULL0x0020: | |||
| 1284 | ifmr->ifm_active |= IFM_1000_LX14 | IFM_FDX0x0000010000000000ULL; | |||
| 1285 | break; | |||
| 1286 | } | |||
| 1287 | } | |||
| 1288 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR0x00040 || | |||
| 1289 | layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX0x04000) { | |||
| 1290 | switch (sc->link_speed) { | |||
| 1291 | case IXGBE_LINK_SPEED_10GB_FULL0x0080: | |||
| 1292 | ifmr->ifm_active |= IFM_10G_SR19 | IFM_FDX0x0000010000000000ULL; | |||
| 1293 | break; | |||
| 1294 | case IXGBE_LINK_SPEED_1GB_FULL0x0020: | |||
| 1295 | ifmr->ifm_active |= IFM_1000_SX11 | IFM_FDX0x0000010000000000ULL; | |||
| 1296 | break; | |||
| 1297 | } | |||
| 1298 | } | |||
| 1299 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX40x00100) { | |||
| 1300 | switch (sc->link_speed) { | |||
| 1301 | case IXGBE_LINK_SPEED_10GB_FULL0x0080: | |||
| 1302 | ifmr->ifm_active |= IFM_10G_CX420 | IFM_FDX0x0000010000000000ULL; | |||
| 1303 | break; | |||
| 1304 | } | |||
| 1305 | } | |||
| 1306 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR0x00800) { | |||
| 1307 | switch (sc->link_speed) { | |||
| 1308 | case IXGBE_LINK_SPEED_10GB_FULL0x0080: | |||
| 1309 | ifmr->ifm_active |= IFM_10G_KR30 | IFM_FDX0x0000010000000000ULL; | |||
| 1310 | break; | |||
| 1311 | case IXGBE_LINK_SPEED_2_5GB_FULL0x0400: | |||
| 1312 | ifmr->ifm_active |= IFM_2500_KX33 | IFM_FDX0x0000010000000000ULL; | |||
| 1313 | break; | |||
| 1314 | case IXGBE_LINK_SPEED_1GB_FULL0x0020: | |||
| 1315 | ifmr->ifm_active |= IFM_1000_KX28 | IFM_FDX0x0000010000000000ULL; | |||
| 1316 | break; | |||
| 1317 | } | |||
| 1318 | } else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX40x00080 || | |||
| 1319 | layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX0x10000 || | |||
| 1320 | layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX0x00200) { | |||
| 1321 | switch (sc->link_speed) { | |||
| 1322 | case IXGBE_LINK_SPEED_10GB_FULL0x0080: | |||
| 1323 | ifmr->ifm_active |= IFM_10G_KX429 | IFM_FDX0x0000010000000000ULL; | |||
| 1324 | break; | |||
| 1325 | case IXGBE_LINK_SPEED_2_5GB_FULL0x0400: | |||
| 1326 | ifmr->ifm_active |= IFM_2500_KX33 | IFM_FDX0x0000010000000000ULL; | |||
| 1327 | break; | |||
| 1328 | case IXGBE_LINK_SPEED_1GB_FULL0x0020: | |||
| 1329 | ifmr->ifm_active |= IFM_1000_KX28 | IFM_FDX0x0000010000000000ULL; | |||
| 1330 | break; | |||
| 1331 | } | |||
| 1332 | } | |||
| 1333 | ||||
| 1334 | switch (sc->hw.fc.current_mode) { | |||
| 1335 | case ixgbe_fc_tx_pause: | |||
| 1336 | ifmr->ifm_active |= IFM_FLOW0x0000040000000000ULL | IFM_ETH_TXPAUSE0x0000000000040000ULL; | |||
| 1337 | break; | |||
| 1338 | case ixgbe_fc_rx_pause: | |||
| 1339 | ifmr->ifm_active |= IFM_FLOW0x0000040000000000ULL | IFM_ETH_RXPAUSE0x0000000000020000ULL; | |||
| 1340 | break; | |||
| 1341 | case ixgbe_fc_full: | |||
| 1342 | ifmr->ifm_active |= IFM_FLOW0x0000040000000000ULL | IFM_ETH_RXPAUSE0x0000000000020000ULL | | |||
| 1343 | IFM_ETH_TXPAUSE0x0000000000040000ULL; | |||
| 1344 | break; | |||
| 1345 | default: | |||
| 1346 | ifmr->ifm_active &= ~(IFM_FLOW0x0000040000000000ULL | IFM_ETH_RXPAUSE0x0000000000020000ULL | | |||
| 1347 | IFM_ETH_TXPAUSE0x0000000000040000ULL); | |||
| 1348 | break; | |||
| 1349 | } | |||
| 1350 | } | |||
| 1351 | ||||
| 1352 | /********************************************************************* | |||
| 1353 | * | |||
| 1354 | * Media Ioctl callback | |||
| 1355 | * | |||
| 1356 | * This routine is called when the user changes speed/duplex using | |||
| 1357 | * media/mediopt option with ifconfig. | |||
| 1358 | * | |||
| 1359 | **********************************************************************/ | |||
| 1360 | int | |||
| 1361 | ixgbe_media_change(struct ifnet *ifp) | |||
| 1362 | { | |||
| 1363 | struct ix_softc *sc = ifp->if_softc; | |||
| 1364 | struct ixgbe_hw *hw = &sc->hw; | |||
| 1365 | struct ifmedia *ifm = &sc->media; | |||
| 1366 | ixgbe_link_speed speed = 0; | |||
| 1367 | ||||
| 1368 | if (IFM_TYPE(ifm->ifm_media)((ifm->ifm_media) & 0x000000000000ff00ULL) != IFM_ETHER0x0000000000000100ULL) | |||
| 1369 | return (EINVAL22); | |||
| 1370 | ||||
| 1371 | if (hw->phy.media_type == ixgbe_media_type_backplane) | |||
| 1372 | return (ENODEV19); | |||
| 1373 | ||||
| 1374 | switch (IFM_SUBTYPE(ifm->ifm_media)((ifm->ifm_media) & 0x00000000000000ffULL)) { | |||
| 1375 | case IFM_AUTO0ULL: | |||
| 1376 | case IFM_10G_T22: | |||
| 1377 | speed |= IXGBE_LINK_SPEED_100_FULL0x0008; | |||
| 1378 | speed |= IXGBE_LINK_SPEED_1GB_FULL0x0020; | |||
| 1379 | speed |= IXGBE_LINK_SPEED_10GB_FULL0x0080; | |||
| 1380 | break; | |||
| 1381 | case IFM_10G_SR19: | |||
| 1382 | case IFM_10G_KR30: | |||
| 1383 | case IFM_10G_LR18: | |||
| 1384 | case IFM_10G_LRM24: | |||
| 1385 | case IFM_10G_CX420: | |||
| 1386 | case IFM_10G_KX429: | |||
| 1387 | speed |= IXGBE_LINK_SPEED_1GB_FULL0x0020; | |||
| 1388 | speed |= IXGBE_LINK_SPEED_10GB_FULL0x0080; | |||
| 1389 | break; | |||
| 1390 | case IFM_10G_SFP_CU23: | |||
| 1391 | speed |= IXGBE_LINK_SPEED_10GB_FULL0x0080; | |||
| 1392 | break; | |||
| 1393 | case IFM_1000_T16: | |||
| 1394 | speed |= IXGBE_LINK_SPEED_100_FULL0x0008; | |||
| 1395 | speed |= IXGBE_LINK_SPEED_1GB_FULL0x0020; | |||
| 1396 | break; | |||
| 1397 | case IFM_1000_LX14: | |||
| 1398 | case IFM_1000_SX11: | |||
| 1399 | case IFM_1000_CX15: | |||
| 1400 | case IFM_1000_KX28: | |||
| 1401 | speed |= IXGBE_LINK_SPEED_1GB_FULL0x0020; | |||
| 1402 | break; | |||
| 1403 | case IFM_100_TX6: | |||
| 1404 | speed |= IXGBE_LINK_SPEED_100_FULL0x0008; | |||
| 1405 | break; | |||
| 1406 | case IFM_10_T3: | |||
| 1407 | speed |= IXGBE_LINK_SPEED_10_FULL0x0002; | |||
| 1408 | break; | |||
| 1409 | default: | |||
| 1410 | return (EINVAL22); | |||
| 1411 | } | |||
| 1412 | ||||
| 1413 | hw->mac.autotry_restart = TRUE1; | |||
| 1414 | hw->mac.ops.setup_link(hw, speed, TRUE1); | |||
| 1415 | ||||
| 1416 | return (0); | |||
| 1417 | } | |||
| 1418 | ||||
| 1419 | /********************************************************************* | |||
| 1420 | * | |||
| 1421 | * This routine maps the mbufs to tx descriptors, allowing the | |||
| 1422 | * TX engine to transmit the packets. | |||
| 1423 | * - return 0 on success, positive on failure | |||
| 1424 | * | |||
| 1425 | **********************************************************************/ | |||
| 1426 | ||||
| 1427 | int | |||
| 1428 | ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head) | |||
| 1429 | { | |||
| 1430 | struct ix_softc *sc = txr->sc; | |||
| 1431 | uint32_t olinfo_status = 0, cmd_type_len; | |||
| 1432 | int i, j, ntxc; | |||
| 1433 | int first, last = 0; | |||
| 1434 | bus_dmamap_t map; | |||
| 1435 | struct ixgbe_tx_buf *txbuf; | |||
| 1436 | union ixgbe_adv_tx_desc *txd = NULL((void *)0); | |||
| 1437 | ||||
| 1438 | /* Basic descriptor defines */ | |||
| 1439 | cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA0x00300000 | | |||
| 1440 | IXGBE_ADVTXD_DCMD_IFCS0x02000000 | IXGBE_ADVTXD_DCMD_DEXT0x20000000); | |||
| 1441 | ||||
| 1442 | /* | |||
| 1443 | * Important to capture the first descriptor | |||
| 1444 | * used because it will contain the index of | |||
| 1445 | * the one we tell the hardware to report back | |||
| 1446 | */ | |||
| 1447 | first = txr->next_avail_desc; | |||
| 1448 | txbuf = &txr->tx_buffers[first]; | |||
| 1449 | map = txbuf->map; | |||
| 1450 | ||||
| 1451 | /* | |||
| 1452 | * Set the appropriate offload context | |||
| 1453 | * this will becomes the first descriptor. | |||
| 1454 | */ | |||
| 1455 | ntxc = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status); | |||
| 1456 | if (ntxc == -1) | |||
| 1457 | goto xmit_fail; | |||
| 1458 | ||||
| 1459 | /* | |||
| 1460 | * Map the packet for DMA. | |||
| 1461 | */ | |||
| 1462 | switch (bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,(*(txr->txdma.dma_tag)->_dmamap_load_mbuf)((txr->txdma .dma_tag), (map), (m_head), (0x0001)) | |||
| 1463 | m_head, BUS_DMA_NOWAIT)(*(txr->txdma.dma_tag)->_dmamap_load_mbuf)((txr->txdma .dma_tag), (map), (m_head), (0x0001))) { | |||
| 1464 | case 0: | |||
| 1465 | break; | |||
| 1466 | case EFBIG27: | |||
| 1467 | if (m_defrag(m_head, M_NOWAIT0x0002) == 0 && | |||
| 1468 | bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,(*(txr->txdma.dma_tag)->_dmamap_load_mbuf)((txr->txdma .dma_tag), (map), (m_head), (0x0001)) | |||
| 1469 | m_head, BUS_DMA_NOWAIT)(*(txr->txdma.dma_tag)->_dmamap_load_mbuf)((txr->txdma .dma_tag), (map), (m_head), (0x0001)) == 0) | |||
| 1470 | break; | |||
| 1471 | /* FALLTHROUGH */ | |||
| 1472 | default: | |||
| 1473 | return (0); | |||
| 1474 | } | |||
| 1475 | ||||
| 1476 | i = txr->next_avail_desc + ntxc; | |||
| 1477 | if (i >= sc->num_tx_desc) | |||
| 1478 | i -= sc->num_tx_desc; | |||
| 1479 | ||||
| 1480 | for (j = 0; j < map->dm_nsegs; j++) { | |||
| 1481 | txd = &txr->tx_base[i]; | |||
| 1482 | ||||
| 1483 | txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr)((__uint64_t)(map->dm_segs[j].ds_addr)); | |||
| 1484 | txd->read.cmd_type_len = htole32(txr->txd_cmd |((__uint32_t)(txr->txd_cmd | cmd_type_len | map->dm_segs [j].ds_len)) | |||
| 1485 | cmd_type_len | map->dm_segs[j].ds_len)((__uint32_t)(txr->txd_cmd | cmd_type_len | map->dm_segs [j].ds_len)); | |||
| 1486 | txd->read.olinfo_status = htole32(olinfo_status)((__uint32_t)(olinfo_status)); | |||
| 1487 | last = i; /* descriptor that will get completion IRQ */ | |||
| 1488 | ||||
| 1489 | if (++i == sc->num_tx_desc) | |||
| 1490 | i = 0; | |||
| 1491 | } | |||
| 1492 | ||||
| 1493 | txd->read.cmd_type_len |= | |||
| ||||
| 1494 | htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS)((__uint32_t)(0x01000000 | 0x08000000)); | |||
| 1495 | ||||
| 1496 | bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (map), (0), (map->dm_mapsize), (0x04)) | |||
| 1497 | BUS_DMASYNC_PREWRITE)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (map), (0), (map->dm_mapsize), (0x04)); | |||
| 1498 | ||||
| 1499 | /* Set the index of the descriptor that will be marked done */ | |||
| 1500 | txbuf->m_head = m_head; | |||
| 1501 | txbuf->eop_index = last; | |||
| 1502 | ||||
| 1503 | membar_producer()do { __asm volatile("" ::: "memory"); } while (0); | |||
| 1504 | ||||
| 1505 | txr->next_avail_desc = i; | |||
| 1506 | ||||
| 1507 | return (ntxc + j); | |||
| 1508 | ||||
| 1509 | xmit_fail: | |||
| 1510 | bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map)(*(txr->txdma.dma_tag)->_dmamap_unload)((txr->txdma. dma_tag), (txbuf->map)); | |||
| 1511 | return (0); | |||
| 1512 | } | |||
| 1513 | ||||
| 1514 | void | |||
| 1515 | ixgbe_iff(struct ix_softc *sc) | |||
| 1516 | { | |||
| 1517 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 1518 | struct arpcom *ac = &sc->arpcom; | |||
| 1519 | uint32_t fctrl; | |||
| 1520 | uint8_t *mta; | |||
| 1521 | uint8_t *update_ptr; | |||
| 1522 | struct ether_multi *enm; | |||
| 1523 | struct ether_multistep step; | |||
| 1524 | int mcnt = 0; | |||
| 1525 | ||||
| 1526 | IOCTL_DEBUGOUT("ixgbe_iff: begin")if (0) printf("ixgbe_iff: begin" "\n"); | |||
| 1527 | ||||
| 1528 | mta = sc->mta; | |||
| 1529 | bzero(mta, sizeof(uint8_t) * IXGBE_ETH_LENGTH_OF_ADDRESS *__builtin_bzero((mta), (sizeof(uint8_t) * 6 * 128)) | |||
| 1530 | MAX_NUM_MULTICAST_ADDRESSES)__builtin_bzero((mta), (sizeof(uint8_t) * 6 * 128)); | |||
| 1531 | ||||
| 1532 | fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->read_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x05080))); | |||
| 1533 | fctrl &= ~(IXGBE_FCTRL_MPE0x00000100 | IXGBE_FCTRL_UPE0x00000200); | |||
| 1534 | ifp->if_flags &= ~IFF_ALLMULTI0x200; | |||
| 1535 | ||||
| 1536 | if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0 || | |||
| 1537 | ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES128) { | |||
| 1538 | ifp->if_flags |= IFF_ALLMULTI0x200; | |||
| 1539 | fctrl |= IXGBE_FCTRL_MPE0x00000100; | |||
| 1540 | if (ifp->if_flags & IFF_PROMISC0x100) | |||
| 1541 | fctrl |= IXGBE_FCTRL_UPE0x00000200; | |||
| 1542 | } else { | |||
| 1543 | ETHER_FIRST_MULTI(step, &sc->arpcom, enm)do { (step).e_enm = ((&(&sc->arpcom)->ac_multiaddrs )->lh_first); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while ( 0); | |||
| 1544 | while (enm != NULL((void *)0)) { | |||
| 1545 | bcopy(enm->enm_addrlo, | |||
| 1546 | &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS6], | |||
| 1547 | IXGBE_ETH_LENGTH_OF_ADDRESS6); | |||
| 1548 | mcnt++; | |||
| 1549 | ||||
| 1550 | ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm = (((enm))->enm_list.le_next); } while ( 0); | |||
| 1551 | } | |||
| 1552 | ||||
| 1553 | update_ptr = mta; | |||
| 1554 | sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt, | |||
| 1555 | ixgbe_mc_array_itr, TRUE1); | |||
| 1556 | } | |||
| 1557 | ||||
| 1558 | IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x05080), (fctrl))); | |||
| 1559 | } | |||
| 1560 | ||||
| 1561 | /* | |||
| 1562 | * This is an iterator function now needed by the multicast | |||
| 1563 | * shared code. It simply feeds the shared code routine the | |||
| 1564 | * addresses in the array of ixgbe_iff() one by one. | |||
| 1565 | */ | |||
| 1566 | uint8_t * | |||
| 1567 | ixgbe_mc_array_itr(struct ixgbe_hw *hw, uint8_t **update_ptr, uint32_t *vmdq) | |||
| 1568 | { | |||
| 1569 | uint8_t *addr = *update_ptr; | |||
| 1570 | uint8_t *newptr; | |||
| 1571 | *vmdq = 0; | |||
| 1572 | ||||
| 1573 | newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS6; | |||
| 1574 | *update_ptr = newptr; | |||
| 1575 | return addr; | |||
| 1576 | } | |||
| 1577 | ||||
| 1578 | void | |||
| 1579 | ixgbe_update_link_status(struct ix_softc *sc) | |||
| 1580 | { | |||
| 1581 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 1582 | int link_state = LINK_STATE_DOWN2; | |||
| 1583 | ||||
| 1584 | splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x4, __func__ ); } } while (0); | |||
| 1585 | KERNEL_ASSERT_LOCKED()((_kernel_lock_held()) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_ix.c" , 1585, "_kernel_lock_held()")); | |||
| 1586 | ||||
| 1587 | ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0); | |||
| 1588 | ||||
| 1589 | ifp->if_baudrateif_data.ifi_baudrate = 0; | |||
| 1590 | if (sc->link_up) { | |||
| 1591 | link_state = LINK_STATE_FULL_DUPLEX6; | |||
| 1592 | ||||
| 1593 | switch (sc->link_speed) { | |||
| 1594 | case IXGBE_LINK_SPEED_UNKNOWN0: | |||
| 1595 | ifp->if_baudrateif_data.ifi_baudrate = 0; | |||
| 1596 | break; | |||
| 1597 | case IXGBE_LINK_SPEED_100_FULL0x0008: | |||
| 1598 | ifp->if_baudrateif_data.ifi_baudrate = IF_Mbps(100)((((100) * 1000ULL) * 1000ULL)); | |||
| 1599 | break; | |||
| 1600 | case IXGBE_LINK_SPEED_1GB_FULL0x0020: | |||
| 1601 | ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL))); | |||
| 1602 | break; | |||
| 1603 | case IXGBE_LINK_SPEED_10GB_FULL0x0080: | |||
| 1604 | ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))); | |||
| 1605 | break; | |||
| 1606 | } | |||
| 1607 | ||||
| 1608 | /* Update any Flow Control changes */ | |||
| 1609 | sc->hw.mac.ops.fc_enable(&sc->hw); | |||
| 1610 | } | |||
| 1611 | if (ifp->if_link_stateif_data.ifi_link_state != link_state) { | |||
| 1612 | ifp->if_link_stateif_data.ifi_link_state = link_state; | |||
| 1613 | if_link_state_change(ifp); | |||
| 1614 | } | |||
| 1615 | } | |||
| 1616 | ||||
| 1617 | ||||
| 1618 | /********************************************************************* | |||
| 1619 | * | |||
| 1620 | * This routine disables all traffic on the adapter by issuing a | |||
| 1621 | * global reset on the MAC and deallocates TX/RX buffers. | |||
| 1622 | * | |||
| 1623 | **********************************************************************/ | |||
| 1624 | ||||
| 1625 | void | |||
| 1626 | ixgbe_stop(void *arg) | |||
| 1627 | { | |||
| 1628 | struct ix_softc *sc = arg; | |||
| 1629 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 1630 | int i; | |||
| 1631 | ||||
| 1632 | /* Tell the stack that the interface is no longer active */ | |||
| 1633 | ifp->if_flags &= ~IFF_RUNNING0x40; | |||
| 1634 | ||||
| 1635 | #if NKSTAT1 > 0 | |||
| 1636 | timeout_del(&sc->sc_kstat_tmo); | |||
| 1637 | #endif | |||
| 1638 | ifp->if_timer = 0; | |||
| 1639 | ||||
| 1640 | INIT_DEBUGOUT("ixgbe_stop: begin\n")if (0) printf("ixgbe_stop: begin\n" "\n"); | |||
| 1641 | ixgbe_disable_intr(sc); | |||
| 1642 | ||||
| 1643 | sc->hw.mac.ops.reset_hw(&sc->hw); | |||
| 1644 | sc->hw.adapter_stopped = FALSE0; | |||
| 1645 | sc->hw.mac.ops.stop_adapter(&sc->hw); | |||
| 1646 | if (sc->hw.mac.type == ixgbe_mac_82599EB) | |||
| 1647 | sc->hw.mac.ops.stop_mac_link_on_d3(&sc->hw); | |||
| 1648 | /* Turn off the laser */ | |||
| 1649 | if (sc->hw.mac.ops.disable_tx_laser) | |||
| 1650 | sc->hw.mac.ops.disable_tx_laser(&sc->hw); | |||
| 1651 | ||||
| 1652 | /* reprogram the RAR[0] in case user changed it. */ | |||
| 1653 | ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV0x80000000); | |||
| 1654 | ||||
| 1655 | intr_barrier(sc->tag); | |||
| 1656 | for (i = 0; i < sc->num_queues; i++) { | |||
| 1657 | struct ifqueue *ifq = ifp->if_ifqs[i]; | |||
| 1658 | ifq_barrier(ifq); | |||
| 1659 | ifq_clr_oactive(ifq); | |||
| 1660 | ||||
| 1661 | if (sc->queues[i].tag != NULL((void *)0)) | |||
| 1662 | intr_barrier(sc->queues[i].tag); | |||
| 1663 | timeout_del(&sc->rx_rings[i].rx_refill); | |||
| 1664 | } | |||
| 1665 | ||||
| 1666 | KASSERT((ifp->if_flags & IFF_RUNNING) == 0)(((ifp->if_flags & 0x40) == 0) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/pci/if_ix.c", 1666, "(ifp->if_flags & IFF_RUNNING) == 0" )); | |||
| 1667 | ||||
| 1668 | /* Should we really clear all structures on stop? */ | |||
| 1669 | ixgbe_free_transmit_structures(sc); | |||
| 1670 | ixgbe_free_receive_structures(sc); | |||
| 1671 | ||||
| 1672 | ixgbe_update_link_status(sc); | |||
| 1673 | } | |||
| 1674 | ||||
| 1675 | ||||
| 1676 | /********************************************************************* | |||
| 1677 | * | |||
| 1678 | * Determine hardware revision. | |||
| 1679 | * | |||
| 1680 | **********************************************************************/ | |||
| 1681 | void | |||
| 1682 | ixgbe_identify_hardware(struct ix_softc *sc) | |||
| 1683 | { | |||
| 1684 | struct ixgbe_osdep *os = &sc->osdep; | |||
| 1685 | struct pci_attach_args *pa = &os->os_pa; | |||
| 1686 | uint32_t reg; | |||
| 1687 | ||||
| 1688 | /* Save off the information about this board */ | |||
| 1689 | sc->hw.vendor_id = PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff); | |||
| 1690 | sc->hw.device_id = PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff); | |||
| 1691 | ||||
| 1692 | reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG0x08); | |||
| 1693 | sc->hw.revision_id = PCI_REVISION(reg)(((reg) >> 0) & 0xff); | |||
| 1694 | ||||
| 1695 | reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG0x2c); | |||
| 1696 | sc->hw.subsystem_vendor_id = PCI_VENDOR(reg)(((reg) >> 0) & 0xffff); | |||
| 1697 | sc->hw.subsystem_device_id = PCI_PRODUCT(reg)(((reg) >> 16) & 0xffff); | |||
| 1698 | ||||
| 1699 | /* We need this here to set the num_segs below */ | |||
| 1700 | ixgbe_set_mac_type(&sc->hw); | |||
| 1701 | ||||
| 1702 | /* Pick up the 82599 and VF settings */ | |||
| 1703 | if (sc->hw.mac.type != ixgbe_mac_82598EB) | |||
| 1704 | sc->hw.phy.smart_speed = ixgbe_smart_speed; | |||
| 1705 | sc->num_segs = IXGBE_82599_SCATTER32; | |||
| 1706 | } | |||
| 1707 | ||||
| 1708 | /********************************************************************* | |||
| 1709 | * | |||
| 1710 | * Setup the Legacy or MSI Interrupt handler | |||
| 1711 | * | |||
| 1712 | **********************************************************************/ | |||
| 1713 | int | |||
| 1714 | ixgbe_allocate_legacy(struct ix_softc *sc) | |||
| 1715 | { | |||
| 1716 | struct ixgbe_osdep *os = &sc->osdep; | |||
| 1717 | struct pci_attach_args *pa = &os->os_pa; | |||
| 1718 | const char *intrstr = NULL((void *)0); | |||
| 1719 | pci_chipset_tag_t pc = pa->pa_pc; | |||
| 1720 | pci_intr_handle_t ih; | |||
| 1721 | ||||
| 1722 | /* We allocate a single interrupt resource */ | |||
| 1723 | if (pci_intr_map_msi(pa, &ih) != 0 && | |||
| 1724 | pci_intr_map(pa, &ih) != 0) { | |||
| 1725 | printf(": couldn't map interrupt\n"); | |||
| 1726 | return (ENXIO6); | |||
| 1727 | } | |||
| 1728 | ||||
| 1729 | #if 0 | |||
| 1730 | /* XXX */ | |||
| 1731 | /* Tasklets for Link, SFP and Multispeed Fiber */ | |||
| 1732 | TASK_INIT(&sc->link_task, 0, ixgbe_handle_link, sc); | |||
| 1733 | TASK_INIT(&sc->mod_task, 0, ixgbe_handle_mod, sc); | |||
| 1734 | TASK_INIT(&sc->msf_task, 0, ixgbe_handle_msf, sc); | |||
| 1735 | #endif | |||
| 1736 | ||||
| 1737 | intrstr = pci_intr_string(pc, ih); | |||
| 1738 | sc->tag = pci_intr_establish(pc, ih, IPL_NET0x4 | IPL_MPSAFE0x100, | |||
| 1739 | ixgbe_legacy_intr, sc, sc->dev.dv_xname); | |||
| 1740 | if (sc->tag == NULL((void *)0)) { | |||
| 1741 | printf(": couldn't establish interrupt"); | |||
| 1742 | if (intrstr != NULL((void *)0)) | |||
| 1743 | printf(" at %s", intrstr); | |||
| 1744 | printf("\n"); | |||
| 1745 | return (ENXIO6); | |||
| 1746 | } | |||
| 1747 | printf(": %s", intrstr); | |||
| 1748 | ||||
| 1749 | /* For simplicity in the handlers */ | |||
| 1750 | sc->que_mask = IXGBE_EIMS_ENABLE_MASK( 0x0000FFFF | 0x00100000 | 0x40000000 | 0x80000000); | |||
| 1751 | ||||
| 1752 | return (0); | |||
| 1753 | } | |||
| 1754 | ||||
| 1755 | /********************************************************************* | |||
| 1756 | * | |||
| 1757 | * Setup the MSI-X Interrupt handlers | |||
| 1758 | * | |||
| 1759 | **********************************************************************/ | |||
| 1760 | int | |||
| 1761 | ixgbe_allocate_msix(struct ix_softc *sc) | |||
| 1762 | { | |||
| 1763 | struct ixgbe_osdep *os = &sc->osdep; | |||
| 1764 | struct pci_attach_args *pa = &os->os_pa; | |||
| 1765 | int i = 0, error = 0; | |||
| 1766 | struct ix_queue *que; | |||
| 1767 | pci_intr_handle_t ih; | |||
| 1768 | ||||
| 1769 | for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++) { | |||
| 1770 | if (pci_intr_map_msix(pa, i, &ih)) { | |||
| 1771 | printf("ixgbe_allocate_msix: " | |||
| 1772 | "pci_intr_map_msix vec %d failed\n", i); | |||
| 1773 | error = ENOMEM12; | |||
| 1774 | goto fail; | |||
| 1775 | } | |||
| 1776 | ||||
| 1777 | que->tag = pci_intr_establish_cpu(pa->pa_pc, ih, | |||
| 1778 | IPL_NET0x4 | IPL_MPSAFE0x100, intrmap_cpu(sc->sc_intrmap, i), | |||
| 1779 | ixgbe_queue_intr, que, que->name); | |||
| 1780 | if (que->tag == NULL((void *)0)) { | |||
| 1781 | printf("ixgbe_allocate_msix: " | |||
| 1782 | "pci_intr_establish vec %d failed\n", i); | |||
| 1783 | error = ENOMEM12; | |||
| 1784 | goto fail; | |||
| 1785 | } | |||
| 1786 | ||||
| 1787 | que->msix = i; | |||
| 1788 | } | |||
| 1789 | ||||
| 1790 | /* Now the link status/control last MSI-X vector */ | |||
| 1791 | if (pci_intr_map_msix(pa, i, &ih)) { | |||
| 1792 | printf("ixgbe_allocate_msix: " | |||
| 1793 | "pci_intr_map_msix link vector failed\n"); | |||
| 1794 | error = ENOMEM12; | |||
| 1795 | goto fail; | |||
| 1796 | } | |||
| 1797 | ||||
| 1798 | sc->tag = pci_intr_establish(pa->pa_pc, ih, IPL_NET0x4 | IPL_MPSAFE0x100, | |||
| 1799 | ixgbe_link_intr, sc, sc->dev.dv_xname); | |||
| 1800 | if (sc->tag == NULL((void *)0)) { | |||
| 1801 | printf("ixgbe_allocate_msix: " | |||
| 1802 | "pci_intr_establish link vector failed\n"); | |||
| 1803 | error = ENOMEM12; | |||
| 1804 | goto fail; | |||
| 1805 | } | |||
| 1806 | sc->linkvec = i; | |||
| 1807 | printf(", %s, %d queue%s", pci_intr_string(pa->pa_pc, ih), | |||
| 1808 | i, (i > 1) ? "s" : ""); | |||
| 1809 | ||||
| 1810 | return (0); | |||
| 1811 | fail: | |||
| 1812 | for (que = sc->queues; i > 0; i--, que++) { | |||
| 1813 | if (que->tag == NULL((void *)0)) | |||
| 1814 | continue; | |||
| 1815 | pci_intr_disestablish(pa->pa_pc, que->tag); | |||
| 1816 | que->tag = NULL((void *)0); | |||
| 1817 | } | |||
| 1818 | ||||
| 1819 | return (error); | |||
| 1820 | } | |||
| 1821 | ||||
| 1822 | void | |||
| 1823 | ixgbe_setup_msix(struct ix_softc *sc) | |||
| 1824 | { | |||
| 1825 | struct ixgbe_osdep *os = &sc->osdep; | |||
| 1826 | struct pci_attach_args *pa = &os->os_pa; | |||
| 1827 | int nmsix; | |||
| 1828 | unsigned int maxq; | |||
| 1829 | ||||
| 1830 | if (!ixgbe_enable_msix) | |||
| 1831 | return; | |||
| 1832 | ||||
| 1833 | nmsix = pci_intr_msix_count(pa); | |||
| 1834 | if (nmsix <= 1) | |||
| 1835 | return; | |||
| 1836 | ||||
| 1837 | /* give one vector to events */ | |||
| 1838 | nmsix--; | |||
| 1839 | ||||
| 1840 | /* XXX the number of queues is limited to what we can keep stats on */ | |||
| 1841 | maxq = (sc->hw.mac.type == ixgbe_mac_82598EB) ? 8 : 16; | |||
| 1842 | ||||
| 1843 | sc->sc_intrmap = intrmap_create(&sc->dev, nmsix, maxq, 0); | |||
| 1844 | sc->num_queues = intrmap_count(sc->sc_intrmap); | |||
| 1845 | } | |||
| 1846 | ||||
| 1847 | int | |||
| 1848 | ixgbe_allocate_pci_resources(struct ix_softc *sc) | |||
| 1849 | { | |||
| 1850 | struct ixgbe_osdep *os = &sc->osdep; | |||
| 1851 | struct pci_attach_args *pa = &os->os_pa; | |||
| 1852 | int val; | |||
| 1853 | ||||
| 1854 | val = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_BAR(0)(0x10 + (0) * 4)); | |||
| 1855 | if (PCI_MAPREG_TYPE(val)((val) & 0x00000001) != PCI_MAPREG_TYPE_MEM0x00000000) { | |||
| 1856 | printf(": mmba is not mem space\n"); | |||
| 1857 | return (ENXIO6); | |||
| 1858 | } | |||
| 1859 | ||||
| 1860 | if (pci_mapreg_map(pa, PCIR_BAR(0)(0x10 + (0) * 4), PCI_MAPREG_MEM_TYPE(val)((val) & 0x00000006), 0, | |||
| 1861 | &os->os_memt, &os->os_memh, &os->os_membase, &os->os_memsize, 0)) { | |||
| 1862 | printf(": cannot find mem space\n"); | |||
| 1863 | return (ENXIO6); | |||
| 1864 | } | |||
| 1865 | sc->hw.hw_addr = (uint8_t *)os->os_membase; | |||
| 1866 | ||||
| 1867 | /* Legacy defaults */ | |||
| 1868 | sc->num_queues = 1; | |||
| 1869 | sc->hw.back = os; | |||
| 1870 | ||||
| 1871 | /* Now setup MSI or MSI/X, return us the number of supported vectors. */ | |||
| 1872 | ixgbe_setup_msix(sc); | |||
| 1873 | ||||
| 1874 | return (0); | |||
| 1875 | } | |||
| 1876 | ||||
| 1877 | void | |||
| 1878 | ixgbe_free_pci_resources(struct ix_softc * sc) | |||
| 1879 | { | |||
| 1880 | struct ixgbe_osdep *os = &sc->osdep; | |||
| 1881 | struct pci_attach_args *pa = &os->os_pa; | |||
| 1882 | struct ix_queue *que = sc->queues; | |||
| 1883 | int i; | |||
| 1884 | ||||
| 1885 | /* Release all msix queue resources: */ | |||
| 1886 | for (i = 0; i < sc->num_queues; i++, que++) { | |||
| 1887 | if (que->tag) | |||
| 1888 | pci_intr_disestablish(pa->pa_pc, que->tag); | |||
| 1889 | que->tag = NULL((void *)0); | |||
| 1890 | } | |||
| 1891 | ||||
| 1892 | if (sc->tag) | |||
| 1893 | pci_intr_disestablish(pa->pa_pc, sc->tag); | |||
| 1894 | sc->tag = NULL((void *)0); | |||
| 1895 | if (os->os_membase != 0) | |||
| 1896 | bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize); | |||
| 1897 | os->os_membase = 0; | |||
| 1898 | } | |||
| 1899 | ||||
| 1900 | /********************************************************************* | |||
| 1901 | * | |||
| 1902 | * Setup networking device structure and register an interface. | |||
| 1903 | * | |||
| 1904 | **********************************************************************/ | |||
| 1905 | void | |||
| 1906 | ixgbe_setup_interface(struct ix_softc *sc) | |||
| 1907 | { | |||
| 1908 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 1909 | int i; | |||
| 1910 | ||||
| 1911 | strlcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ16); | |||
| 1912 | ifp->if_softc = sc; | |||
| 1913 | ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000; | |||
| 1914 | ifp->if_xflags = IFXF_MPSAFE0x1; | |||
| 1915 | ifp->if_ioctl = ixgbe_ioctl; | |||
| 1916 | ifp->if_qstart = ixgbe_start; | |||
| 1917 | ifp->if_timer = 0; | |||
| 1918 | ifp->if_watchdog = ixgbe_watchdog; | |||
| 1919 | ifp->if_hardmtu = IXGBE_MAX_FRAME_SIZE9216 - | |||
| 1920 | ETHER_HDR_LEN((6 * 2) + 2) - ETHER_CRC_LEN4; | |||
| 1921 | ifq_init_maxlen(&ifp->if_snd, sc->num_tx_desc - 1); | |||
| 1922 | ||||
| 1923 | ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010; | |||
| 1924 | ||||
| 1925 | #if NVLAN1 > 0 | |||
| 1926 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020; | |||
| 1927 | #endif | |||
| 1928 | ||||
| 1929 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_CSUM_TCPv40x00000002 | IFCAP_CSUM_UDPv40x00000004; | |||
| 1930 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_CSUM_TCPv60x00000080 | IFCAP_CSUM_UDPv60x00000100; | |||
| 1931 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_CSUM_IPv40x00000001; | |||
| 1932 | ||||
| 1933 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_TSOv40x00001000 | IFCAP_TSOv60x00002000; | |||
| 1934 | if (sc->hw.mac.type != ixgbe_mac_82598EB) { | |||
| 1935 | ifp->if_xflags |= IFXF_LRO0x200; | |||
| 1936 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_LRO0x00004000; | |||
| 1937 | } | |||
| 1938 | ||||
| 1939 | /* | |||
| 1940 | * Specify the media types supported by this sc and register | |||
| 1941 | * callbacks to update media and link information | |||
| 1942 | */ | |||
| 1943 | ifmedia_init(&sc->media, IFM_IMASK0xff00000000000000ULL, ixgbe_media_change, | |||
| 1944 | ixgbe_media_status); | |||
| 1945 | ixgbe_add_media_types(sc); | |||
| 1946 | ifmedia_set(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL); | |||
| 1947 | ||||
| 1948 | if_attach(ifp); | |||
| 1949 | ether_ifattach(ifp); | |||
| 1950 | ||||
| 1951 | if_attach_queues(ifp, sc->num_queues); | |||
| 1952 | if_attach_iqueues(ifp, sc->num_queues); | |||
| 1953 | for (i = 0; i < sc->num_queues; i++) { | |||
| 1954 | struct ifqueue *ifq = ifp->if_ifqs[i]; | |||
| 1955 | struct ifiqueue *ifiq = ifp->if_iqs[i]; | |||
| 1956 | struct tx_ring *txr = &sc->tx_rings[i]; | |||
| 1957 | struct rx_ring *rxr = &sc->rx_rings[i]; | |||
| 1958 | ||||
| 1959 | ifq->ifq_softc_ifq_ptr._ifq_softc = txr; | |||
| 1960 | txr->ifq = ifq; | |||
| 1961 | ||||
| 1962 | ifiq->ifiq_softc_ifiq_ptr._ifiq_softc = rxr; | |||
| 1963 | rxr->ifiq = ifiq; | |||
| 1964 | ||||
| 1965 | #if NKSTAT1 > 0 | |||
| 1966 | ix_txq_kstats(sc, txr); | |||
| 1967 | ix_rxq_kstats(sc, rxr); | |||
| 1968 | #endif | |||
| 1969 | } | |||
| 1970 | ||||
| 1971 | sc->max_frame_size = IXGBE_MAX_FRAME_SIZE9216; | |||
| 1972 | } | |||
| 1973 | ||||
| 1974 | void | |||
| 1975 | ixgbe_add_media_types(struct ix_softc *sc) | |||
| 1976 | { | |||
| 1977 | struct ixgbe_hw *hw = &sc->hw; | |||
| 1978 | uint64_t layer; | |||
| 1979 | ||||
| 1980 | sc->phy_layer = hw->mac.ops.get_supported_physical_layer(hw); | |||
| 1981 | layer = sc->phy_layer; | |||
| 1982 | ||||
| 1983 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T0x00001) | |||
| 1984 | ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_10G_T22, 0, NULL((void *)0)); | |||
| 1985 | if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T0x00002) | |||
| 1986 | ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_1000_T16, 0, NULL((void *)0)); | |||
| 1987 | if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX0x00004) | |||
| 1988 | ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_100_TX6, 0, NULL((void *)0)); | |||
| 1989 | if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU0x00008 || | |||
| 1990 | layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA0x02000) | |||
| 1991 | ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_10G_SFP_CU23, 0, NULL((void *)0)); | |||
| 1992 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR0x00010) { | |||
| 1993 | ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_10G_LR18, 0, NULL((void *)0)); | |||
| 1994 | if (hw->phy.multispeed_fiber) | |||
| 1995 | ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_1000_LX14, 0, | |||
| 1996 | NULL((void *)0)); | |||
| 1997 | } | |||
| 1998 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR0x00040) { | |||
| 1999 | ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_10G_SR19, 0, NULL((void *)0)); | |||
| 2000 | if (hw->phy.multispeed_fiber) | |||
| 2001 | ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_1000_SX11, 0, | |||
| 2002 | NULL((void *)0)); | |||
| 2003 | } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX0x04000) | |||
| 2004 | ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_1000_SX11, 0, NULL((void *)0)); | |||
| 2005 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX40x00100) | |||
| 2006 | ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_10G_CX420, 0, NULL((void *)0)); | |||
| 2007 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR0x00800) | |||
| 2008 | ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_10G_KR30, 0, NULL((void *)0)); | |||
| 2009 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX40x00080) | |||
| 2010 | ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_10G_KX429, 0, NULL((void *)0)); | |||
| 2011 | if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX0x00200) | |||
| 2012 | ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_1000_KX28, 0, NULL((void *)0)); | |||
| 2013 | if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX0x10000) | |||
| 2014 | ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_2500_KX33, 0, NULL((void *)0)); | |||
| 2015 | ||||
| 2016 | if (hw->device_id == IXGBE_DEV_ID_82598AT0x10C8) { | |||
| 2017 | ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_1000_T16 | IFM_FDX0x0000010000000000ULL, 0, | |||
| 2018 | NULL((void *)0)); | |||
| 2019 | ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_1000_T16, 0, NULL((void *)0)); | |||
| 2020 | } | |||
| 2021 | ||||
| 2022 | ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0)); | |||
| 2023 | } | |||
| 2024 | ||||
| 2025 | void | |||
| 2026 | ixgbe_config_link(struct ix_softc *sc) | |||
| 2027 | { | |||
| 2028 | uint32_t autoneg, err = 0; | |||
| 2029 | bool_Bool negotiate; | |||
| 2030 | ||||
| 2031 | if (ixgbe_is_sfp(&sc->hw)) { | |||
| 2032 | if (sc->hw.phy.multispeed_fiber) { | |||
| 2033 | sc->hw.mac.ops.setup_sfp(&sc->hw); | |||
| 2034 | if (sc->hw.mac.ops.enable_tx_laser) | |||
| 2035 | sc->hw.mac.ops.enable_tx_laser(&sc->hw); | |||
| 2036 | ixgbe_handle_msf(sc); | |||
| 2037 | } else | |||
| 2038 | ixgbe_handle_mod(sc); | |||
| 2039 | } else { | |||
| 2040 | if (sc->hw.mac.ops.check_link) | |||
| 2041 | err = sc->hw.mac.ops.check_link(&sc->hw, &autoneg, | |||
| 2042 | &sc->link_up, FALSE0); | |||
| 2043 | if (err) | |||
| 2044 | return; | |||
| 2045 | autoneg = sc->hw.phy.autoneg_advertised; | |||
| 2046 | if ((!autoneg) && (sc->hw.mac.ops.get_link_capabilities)) | |||
| 2047 | err = sc->hw.mac.ops.get_link_capabilities(&sc->hw, | |||
| 2048 | &autoneg, &negotiate); | |||
| 2049 | if (err) | |||
| 2050 | return; | |||
| 2051 | if (sc->hw.mac.ops.setup_link) | |||
| 2052 | sc->hw.mac.ops.setup_link(&sc->hw, | |||
| 2053 | autoneg, sc->link_up); | |||
| 2054 | } | |||
| 2055 | } | |||
| 2056 | ||||
| 2057 | /******************************************************************** | |||
| 2058 | * Manage DMA'able memory. | |||
| 2059 | *******************************************************************/ | |||
| 2060 | int | |||
| 2061 | ixgbe_dma_malloc(struct ix_softc *sc, bus_size_t size, | |||
| 2062 | struct ixgbe_dma_alloc *dma, int mapflags) | |||
| 2063 | { | |||
| 2064 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 2065 | struct ixgbe_osdep *os = &sc->osdep; | |||
| 2066 | int r; | |||
| 2067 | ||||
| 2068 | dma->dma_tag = os->os_pa.pa_dmat; | |||
| 2069 | r = bus_dmamap_create(dma->dma_tag, size, 1,(*(dma->dma_tag)->_dmamap_create)((dma->dma_tag), (size ), (1), (size), (0), (0x0001), (&dma->dma_map)) | |||
| 2070 | size, 0, BUS_DMA_NOWAIT, &dma->dma_map)(*(dma->dma_tag)->_dmamap_create)((dma->dma_tag), (size ), (1), (size), (0), (0x0001), (&dma->dma_map)); | |||
| 2071 | if (r != 0) { | |||
| 2072 | printf("%s: ixgbe_dma_malloc: bus_dmamap_create failed; " | |||
| 2073 | "error %u\n", ifp->if_xname, r); | |||
| 2074 | goto fail_0; | |||
| 2075 | } | |||
| 2076 | ||||
| 2077 | r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,(*(dma->dma_tag)->_dmamem_alloc)((dma->dma_tag), (size ), ((1 << 12)), (0), (&dma->dma_seg), (1), (& dma->dma_nseg), (0x0001)) | |||
| 2078 | 1, &dma->dma_nseg, BUS_DMA_NOWAIT)(*(dma->dma_tag)->_dmamem_alloc)((dma->dma_tag), (size ), ((1 << 12)), (0), (&dma->dma_seg), (1), (& dma->dma_nseg), (0x0001)); | |||
| 2079 | if (r != 0) { | |||
| 2080 | printf("%s: ixgbe_dma_malloc: bus_dmamem_alloc failed; " | |||
| 2081 | "error %u\n", ifp->if_xname, r); | |||
| 2082 | goto fail_1; | |||
| 2083 | } | |||
| 2084 | ||||
| 2085 | r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,(*(dma->dma_tag)->_dmamem_map)((dma->dma_tag), (& dma->dma_seg), (dma->dma_nseg), (size), (&dma->dma_vaddr ), (0x0001)) | |||
| 2086 | &dma->dma_vaddr, BUS_DMA_NOWAIT)(*(dma->dma_tag)->_dmamem_map)((dma->dma_tag), (& dma->dma_seg), (dma->dma_nseg), (size), (&dma->dma_vaddr ), (0x0001)); | |||
| 2087 | if (r != 0) { | |||
| 2088 | printf("%s: ixgbe_dma_malloc: bus_dmamem_map failed; " | |||
| 2089 | "error %u\n", ifp->if_xname, r); | |||
| 2090 | goto fail_2; | |||
| 2091 | } | |||
| 2092 | ||||
| 2093 | r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,(*(dma->dma_tag)->_dmamap_load)((dma->dma_tag), (dma ->dma_map), (dma->dma_vaddr), (size), (((void *)0)), (mapflags | 0x0001)) | |||
| 2094 | size, NULL, mapflags | BUS_DMA_NOWAIT)(*(dma->dma_tag)->_dmamap_load)((dma->dma_tag), (dma ->dma_map), (dma->dma_vaddr), (size), (((void *)0)), (mapflags | 0x0001)); | |||
| 2095 | if (r != 0) { | |||
| 2096 | printf("%s: ixgbe_dma_malloc: bus_dmamap_load failed; " | |||
| 2097 | "error %u\n", ifp->if_xname, r); | |||
| 2098 | goto fail_3; | |||
| 2099 | } | |||
| 2100 | ||||
| 2101 | dma->dma_size = size; | |||
| 2102 | return (0); | |||
| 2103 | fail_3: | |||
| 2104 | bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size)(*(dma->dma_tag)->_dmamem_unmap)((dma->dma_tag), (dma ->dma_vaddr), (size)); | |||
| 2105 | fail_2: | |||
| 2106 | bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg)(*(dma->dma_tag)->_dmamem_free)((dma->dma_tag), (& dma->dma_seg), (dma->dma_nseg)); | |||
| 2107 | fail_1: | |||
| 2108 | bus_dmamap_destroy(dma->dma_tag, dma->dma_map)(*(dma->dma_tag)->_dmamap_destroy)((dma->dma_tag), ( dma->dma_map)); | |||
| 2109 | fail_0: | |||
| 2110 | dma->dma_map = NULL((void *)0); | |||
| 2111 | dma->dma_tag = NULL((void *)0); | |||
| 2112 | return (r); | |||
| 2113 | } | |||
| 2114 | ||||
| 2115 | void | |||
| 2116 | ixgbe_dma_free(struct ix_softc *sc, struct ixgbe_dma_alloc *dma) | |||
| 2117 | { | |||
| 2118 | if (dma->dma_tag == NULL((void *)0)) | |||
| 2119 | return; | |||
| 2120 | ||||
| 2121 | if (dma->dma_map != NULL((void *)0)) { | |||
| 2122 | bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma ->dma_map), (0), (dma->dma_map->dm_mapsize), (0x02 | 0x08)) | |||
| 2123 | dma->dma_map->dm_mapsize,(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma ->dma_map), (0), (dma->dma_map->dm_mapsize), (0x02 | 0x08)) | |||
| 2124 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma ->dma_map), (0), (dma->dma_map->dm_mapsize), (0x02 | 0x08)); | |||
| 2125 | bus_dmamap_unload(dma->dma_tag, dma->dma_map)(*(dma->dma_tag)->_dmamap_unload)((dma->dma_tag), (dma ->dma_map)); | |||
| 2126 | bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size)(*(dma->dma_tag)->_dmamem_unmap)((dma->dma_tag), (dma ->dma_vaddr), (dma->dma_size)); | |||
| 2127 | bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg)(*(dma->dma_tag)->_dmamem_free)((dma->dma_tag), (& dma->dma_seg), (dma->dma_nseg)); | |||
| 2128 | bus_dmamap_destroy(dma->dma_tag, dma->dma_map)(*(dma->dma_tag)->_dmamap_destroy)((dma->dma_tag), ( dma->dma_map)); | |||
| 2129 | dma->dma_map = NULL((void *)0); | |||
| 2130 | } | |||
| 2131 | } | |||
| 2132 | ||||
| 2133 | ||||
| 2134 | /********************************************************************* | |||
| 2135 | * | |||
| 2136 | * Allocate memory for the transmit and receive rings, and then | |||
| 2137 | * the descriptors associated with each, called only once at attach. | |||
| 2138 | * | |||
| 2139 | **********************************************************************/ | |||
| 2140 | int | |||
| 2141 | ixgbe_allocate_queues(struct ix_softc *sc) | |||
| 2142 | { | |||
| 2143 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 2144 | struct ix_queue *que; | |||
| 2145 | struct tx_ring *txr; | |||
| 2146 | struct rx_ring *rxr; | |||
| 2147 | int rsize, tsize; | |||
| 2148 | int txconf = 0, rxconf = 0, i; | |||
| 2149 | ||||
| 2150 | /* First allocate the top level queue structs */ | |||
| 2151 | if (!(sc->queues = mallocarray(sc->num_queues, | |||
| 2152 | sizeof(struct ix_queue), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008))) { | |||
| 2153 | printf("%s: Unable to allocate queue memory\n", ifp->if_xname); | |||
| 2154 | goto fail; | |||
| 2155 | } | |||
| 2156 | ||||
| 2157 | /* Then allocate the TX ring struct memory */ | |||
| 2158 | if (!(sc->tx_rings = mallocarray(sc->num_queues, | |||
| 2159 | sizeof(struct tx_ring), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008))) { | |||
| 2160 | printf("%s: Unable to allocate TX ring memory\n", ifp->if_xname); | |||
| 2161 | goto fail; | |||
| 2162 | } | |||
| 2163 | ||||
| 2164 | /* Next allocate the RX */ | |||
| 2165 | if (!(sc->rx_rings = mallocarray(sc->num_queues, | |||
| 2166 | sizeof(struct rx_ring), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008))) { | |||
| 2167 | printf("%s: Unable to allocate RX ring memory\n", ifp->if_xname); | |||
| 2168 | goto rx_fail; | |||
| 2169 | } | |||
| 2170 | ||||
| 2171 | /* For the ring itself */ | |||
| 2172 | tsize = roundup2(sc->num_tx_desc *(((sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)) + (128 ) - 1) & ~((128) - 1)) | |||
| 2173 | sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN)(((sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)) + (128 ) - 1) & ~((128) - 1)); | |||
| 2174 | ||||
| 2175 | /* | |||
| 2176 | * Now set up the TX queues, txconf is needed to handle the | |||
| 2177 | * possibility that things fail midcourse and we need to | |||
| 2178 | * undo memory gracefully | |||
| 2179 | */ | |||
| 2180 | for (i = 0; i < sc->num_queues; i++, txconf++) { | |||
| 2181 | /* Set up some basics */ | |||
| 2182 | txr = &sc->tx_rings[i]; | |||
| 2183 | txr->sc = sc; | |||
| 2184 | txr->me = i; | |||
| 2185 | ||||
| 2186 | if (ixgbe_dma_malloc(sc, tsize, | |||
| 2187 | &txr->txdma, BUS_DMA_NOWAIT0x0001)) { | |||
| 2188 | printf("%s: Unable to allocate TX Descriptor memory\n", | |||
| 2189 | ifp->if_xname); | |||
| 2190 | goto err_tx_desc; | |||
| 2191 | } | |||
| 2192 | txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr; | |||
| 2193 | bzero((void *)txr->tx_base, tsize)__builtin_bzero(((void *)txr->tx_base), (tsize)); | |||
| 2194 | } | |||
| 2195 | ||||
| 2196 | /* | |||
| 2197 | * Next the RX queues... | |||
| 2198 | */ | |||
| 2199 | rsize = roundup2(sc->num_rx_desc *(((sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)) + (4096 ) - 1) & ~((4096) - 1)) | |||
| 2200 | sizeof(union ixgbe_adv_rx_desc), 4096)(((sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)) + (4096 ) - 1) & ~((4096) - 1)); | |||
| 2201 | for (i = 0; i < sc->num_queues; i++, rxconf++) { | |||
| 2202 | rxr = &sc->rx_rings[i]; | |||
| 2203 | /* Set up some basics */ | |||
| 2204 | rxr->sc = sc; | |||
| 2205 | rxr->me = i; | |||
| 2206 | timeout_set(&rxr->rx_refill, ixgbe_rxrefill, rxr); | |||
| 2207 | ||||
| 2208 | if (ixgbe_dma_malloc(sc, rsize, | |||
| 2209 | &rxr->rxdma, BUS_DMA_NOWAIT0x0001)) { | |||
| 2210 | printf("%s: Unable to allocate RxDescriptor memory\n", | |||
| 2211 | ifp->if_xname); | |||
| 2212 | goto err_rx_desc; | |||
| 2213 | } | |||
| 2214 | rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr; | |||
| 2215 | bzero((void *)rxr->rx_base, rsize)__builtin_bzero(((void *)rxr->rx_base), (rsize)); | |||
| 2216 | } | |||
| 2217 | ||||
| 2218 | /* | |||
| 2219 | * Finally set up the queue holding structs | |||
| 2220 | */ | |||
| 2221 | for (i = 0; i < sc->num_queues; i++) { | |||
| 2222 | que = &sc->queues[i]; | |||
| 2223 | que->sc = sc; | |||
| 2224 | que->txr = &sc->tx_rings[i]; | |||
| 2225 | que->rxr = &sc->rx_rings[i]; | |||
| 2226 | snprintf(que->name, sizeof(que->name), "%s:%d", | |||
| 2227 | sc->dev.dv_xname, i); | |||
| 2228 | } | |||
| 2229 | ||||
| 2230 | return (0); | |||
| 2231 | ||||
| 2232 | err_rx_desc: | |||
| 2233 | for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--) | |||
| 2234 | ixgbe_dma_free(sc, &rxr->rxdma); | |||
| 2235 | err_tx_desc: | |||
| 2236 | for (txr = sc->tx_rings; txconf > 0; txr++, txconf--) | |||
| 2237 | ixgbe_dma_free(sc, &txr->txdma); | |||
| 2238 | free(sc->rx_rings, M_DEVBUF2, sc->num_queues * sizeof(struct rx_ring)); | |||
| 2239 | sc->rx_rings = NULL((void *)0); | |||
| 2240 | rx_fail: | |||
| 2241 | free(sc->tx_rings, M_DEVBUF2, sc->num_queues * sizeof(struct tx_ring)); | |||
| 2242 | sc->tx_rings = NULL((void *)0); | |||
| 2243 | fail: | |||
| 2244 | return (ENOMEM12); | |||
| 2245 | } | |||
| 2246 | ||||
| 2247 | /********************************************************************* | |||
| 2248 | * | |||
| 2249 | * Allocate memory for tx_buffer structures. The tx_buffer stores all | |||
| 2250 | * the information needed to transmit a packet on the wire. This is | |||
| 2251 | * called only once at attach, setup is done every reset. | |||
| 2252 | * | |||
| 2253 | **********************************************************************/ | |||
| 2254 | int | |||
| 2255 | ixgbe_allocate_transmit_buffers(struct tx_ring *txr) | |||
| 2256 | { | |||
| 2257 | struct ix_softc *sc = txr->sc; | |||
| 2258 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 2259 | struct ixgbe_tx_buf *txbuf; | |||
| 2260 | int error, i; | |||
| 2261 | ||||
| 2262 | if (!(txr->tx_buffers = mallocarray(sc->num_tx_desc, | |||
| 2263 | sizeof(struct ixgbe_tx_buf), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008))) { | |||
| 2264 | printf("%s: Unable to allocate tx_buffer memory\n", | |||
| 2265 | ifp->if_xname); | |||
| 2266 | error = ENOMEM12; | |||
| 2267 | goto fail; | |||
| 2268 | } | |||
| 2269 | txr->txtag = txr->txdma.dma_tag; | |||
| 2270 | ||||
| 2271 | /* Create the descriptor buffer dma maps */ | |||
| 2272 | for (i = 0; i < sc->num_tx_desc; i++) { | |||
| 2273 | txbuf = &txr->tx_buffers[i]; | |||
| 2274 | error = bus_dmamap_create(txr->txdma.dma_tag, MAXMCLBYTES,(*(txr->txdma.dma_tag)->_dmamap_create)((txr->txdma. dma_tag), ((64 * 1024)), (sc->num_segs), ((1 << 12)) , (0), (0x0001), (&txbuf->map)) | |||
| 2275 | sc->num_segs, PAGE_SIZE, 0,(*(txr->txdma.dma_tag)->_dmamap_create)((txr->txdma. dma_tag), ((64 * 1024)), (sc->num_segs), ((1 << 12)) , (0), (0x0001), (&txbuf->map)) | |||
| 2276 | BUS_DMA_NOWAIT, &txbuf->map)(*(txr->txdma.dma_tag)->_dmamap_create)((txr->txdma. dma_tag), ((64 * 1024)), (sc->num_segs), ((1 << 12)) , (0), (0x0001), (&txbuf->map)); | |||
| 2277 | ||||
| 2278 | if (error != 0) { | |||
| 2279 | printf("%s: Unable to create TX DMA map\n", | |||
| 2280 | ifp->if_xname); | |||
| 2281 | goto fail; | |||
| 2282 | } | |||
| 2283 | } | |||
| 2284 | ||||
| 2285 | return 0; | |||
| 2286 | fail: | |||
| 2287 | return (error); | |||
| 2288 | } | |||
| 2289 | ||||
| 2290 | /********************************************************************* | |||
| 2291 | * | |||
| 2292 | * Initialize a transmit ring. | |||
| 2293 | * | |||
| 2294 | **********************************************************************/ | |||
| 2295 | int | |||
| 2296 | ixgbe_setup_transmit_ring(struct tx_ring *txr) | |||
| 2297 | { | |||
| 2298 | struct ix_softc *sc = txr->sc; | |||
| 2299 | int error; | |||
| 2300 | ||||
| 2301 | /* Now allocate transmit buffers for the ring */ | |||
| 2302 | if ((error = ixgbe_allocate_transmit_buffers(txr)) != 0) | |||
| 2303 | return (error); | |||
| 2304 | ||||
| 2305 | /* Clear the old ring contents */ | |||
| 2306 | bzero((void *)txr->tx_base,__builtin_bzero(((void *)txr->tx_base), ((sizeof(union ixgbe_adv_tx_desc )) * sc->num_tx_desc)) | |||
| 2307 | (sizeof(union ixgbe_adv_tx_desc)) * sc->num_tx_desc)__builtin_bzero(((void *)txr->tx_base), ((sizeof(union ixgbe_adv_tx_desc )) * sc->num_tx_desc)); | |||
| 2308 | ||||
| 2309 | /* Reset indices */ | |||
| 2310 | txr->next_avail_desc = 0; | |||
| 2311 | txr->next_to_clean = 0; | |||
| 2312 | ||||
| 2313 | bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize ), (0x01 | 0x04)) | |||
| 2314 | 0, txr->txdma.dma_map->dm_mapsize,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize ), (0x01 | 0x04)) | |||
| 2315 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize ), (0x01 | 0x04)); | |||
| 2316 | ||||
| 2317 | return (0); | |||
| 2318 | } | |||
| 2319 | ||||
| 2320 | /********************************************************************* | |||
| 2321 | * | |||
| 2322 | * Initialize all transmit rings. | |||
| 2323 | * | |||
| 2324 | **********************************************************************/ | |||
| 2325 | int | |||
| 2326 | ixgbe_setup_transmit_structures(struct ix_softc *sc) | |||
| 2327 | { | |||
| 2328 | struct tx_ring *txr = sc->tx_rings; | |||
| 2329 | int i, error; | |||
| 2330 | ||||
| 2331 | for (i = 0; i < sc->num_queues; i++, txr++) { | |||
| 2332 | if ((error = ixgbe_setup_transmit_ring(txr)) != 0) | |||
| 2333 | goto fail; | |||
| 2334 | } | |||
| 2335 | ||||
| 2336 | return (0); | |||
| 2337 | fail: | |||
| 2338 | ixgbe_free_transmit_structures(sc); | |||
| 2339 | return (error); | |||
| 2340 | } | |||
| 2341 | ||||
| 2342 | /********************************************************************* | |||
| 2343 | * | |||
| 2344 | * Enable transmit unit. | |||
| 2345 | * | |||
| 2346 | **********************************************************************/ | |||
| 2347 | void | |||
| 2348 | ixgbe_initialize_transmit_units(struct ix_softc *sc) | |||
| 2349 | { | |||
| 2350 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 2351 | struct tx_ring *txr; | |||
| 2352 | struct ixgbe_hw *hw = &sc->hw; | |||
| 2353 | int i; | |||
| 2354 | uint64_t tdba; | |||
| 2355 | uint32_t txctrl; | |||
| 2356 | uint32_t hlreg; | |||
| 2357 | ||||
| 2358 | /* Setup the Base and Length of the Tx Descriptor Ring */ | |||
| 2359 | ||||
| 2360 | for (i = 0; i < sc->num_queues; i++) { | |||
| 2361 | txr = &sc->tx_rings[i]; | |||
| 2362 | ||||
| 2363 | /* Setup descriptor base address */ | |||
| 2364 | tdba = txr->txdma.dma_map->dm_segs[0].ds_addr; | |||
| 2365 | IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06000 + ((i) * 0x40))), ((tdba & 0x00000000ffffffffULL)))) | |||
| 2366 | (tdba & 0x00000000ffffffffULL))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06000 + ((i) * 0x40))), ((tdba & 0x00000000ffffffffULL)))); | |||
| 2367 | IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06004 + ((i) * 0x40))), ((tdba >> 32)))); | |||
| 2368 | IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06008 + ((i) * 0x40))), (sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc )))) | |||
| 2369 | sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06008 + ((i) * 0x40))), (sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc )))); | |||
| 2370 | ||||
| 2371 | /* Setup the HW Tx Head and Tail descriptor pointers */ | |||
| 2372 | IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06010 + ((i) * 0x40))), (0))); | |||
| 2373 | IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06018 + ((i) * 0x40))), (0))); | |||
| 2374 | ||||
| 2375 | /* Setup Transmit Descriptor Cmd Settings */ | |||
| 2376 | txr->txd_cmd = IXGBE_TXD_CMD_IFCS0x02000000; | |||
| 2377 | txr->queue_status = IXGBE_QUEUE_IDLE; | |||
| 2378 | txr->watchdog_timer = 0; | |||
| 2379 | ||||
| 2380 | /* Disable Head Writeback */ | |||
| 2381 | switch (hw->mac.type) { | |||
| 2382 | case ixgbe_mac_82598EB: | |||
| 2383 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x07200 + ((i) * 4))))); | |||
| 2384 | break; | |||
| 2385 | case ixgbe_mac_82599EB: | |||
| 2386 | case ixgbe_mac_X540: | |||
| 2387 | default: | |||
| 2388 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x0600C + ((i) * 0x40))))); | |||
| 2389 | break; | |||
| 2390 | } | |||
| 2391 | txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN(1 << 11); | |||
| 2392 | switch (hw->mac.type) { | |||
| 2393 | case ixgbe_mac_82598EB: | |||
| 2394 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x07200 + ((i) * 4))), (txctrl))); | |||
| 2395 | break; | |||
| 2396 | case ixgbe_mac_82599EB: | |||
| 2397 | case ixgbe_mac_X540: | |||
| 2398 | default: | |||
| 2399 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x0600C + ((i) * 0x40))), (txctrl))); | |||
| 2400 | break; | |||
| 2401 | } | |||
| 2402 | } | |||
| 2403 | ifp->if_timer = 0; | |||
| 2404 | ||||
| 2405 | if (hw->mac.type != ixgbe_mac_82598EB) { | |||
| 2406 | uint32_t dmatxctl, rttdcs; | |||
| 2407 | dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x04A80 ))); | |||
| 2408 | dmatxctl |= IXGBE_DMATXCTL_TE0x1; | |||
| 2409 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x04A80 ), (dmatxctl))); | |||
| 2410 | /* Disable arbiter to set MTQC */ | |||
| 2411 | rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x04900 ))); | |||
| 2412 | rttdcs |= IXGBE_RTTDCS_ARBDIS0x00000040; | |||
| 2413 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x04900 ), (rttdcs))); | |||
| 2414 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x08120 ), (0x0))); | |||
| 2415 | rttdcs &= ~IXGBE_RTTDCS_ARBDIS0x00000040; | |||
| 2416 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x04900 ), (rttdcs))); | |||
| 2417 | } | |||
| 2418 | ||||
| 2419 | /* Enable TCP/UDP padding when using TSO */ | |||
| 2420 | hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x04240 ))); | |||
| 2421 | hlreg |= IXGBE_HLREG0_TXPADEN0x00000400; | |||
| 2422 | IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x04240 ), (hlreg))); | |||
| 2423 | } | |||
| 2424 | ||||
| 2425 | /********************************************************************* | |||
| 2426 | * | |||
| 2427 | * Free all transmit rings. | |||
| 2428 | * | |||
| 2429 | **********************************************************************/ | |||
| 2430 | void | |||
| 2431 | ixgbe_free_transmit_structures(struct ix_softc *sc) | |||
| 2432 | { | |||
| 2433 | struct tx_ring *txr = sc->tx_rings; | |||
| 2434 | int i; | |||
| 2435 | ||||
| 2436 | for (i = 0; i < sc->num_queues; i++, txr++) | |||
| 2437 | ixgbe_free_transmit_buffers(txr); | |||
| 2438 | } | |||
| 2439 | ||||
| 2440 | /********************************************************************* | |||
| 2441 | * | |||
| 2442 | * Free transmit ring related data structures. | |||
| 2443 | * | |||
| 2444 | **********************************************************************/ | |||
| 2445 | void | |||
| 2446 | ixgbe_free_transmit_buffers(struct tx_ring *txr) | |||
| 2447 | { | |||
| 2448 | struct ix_softc *sc = txr->sc; | |||
| 2449 | struct ixgbe_tx_buf *tx_buffer; | |||
| 2450 | int i; | |||
| 2451 | ||||
| 2452 | INIT_DEBUGOUT("free_transmit_ring: begin")if (0) printf("free_transmit_ring: begin" "\n"); | |||
| 2453 | ||||
| 2454 | if (txr->tx_buffers == NULL((void *)0)) | |||
| 2455 | return; | |||
| 2456 | ||||
| 2457 | tx_buffer = txr->tx_buffers; | |||
| 2458 | for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) { | |||
| 2459 | if (tx_buffer->map != NULL((void *)0) && tx_buffer->map->dm_nsegs > 0) { | |||
| 2460 | bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (tx_buffer->map), (0), (tx_buffer->map->dm_mapsize ), (0x08)) | |||
| 2461 | 0, tx_buffer->map->dm_mapsize,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (tx_buffer->map), (0), (tx_buffer->map->dm_mapsize ), (0x08)) | |||
| 2462 | BUS_DMASYNC_POSTWRITE)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (tx_buffer->map), (0), (tx_buffer->map->dm_mapsize ), (0x08)); | |||
| 2463 | bus_dmamap_unload(txr->txdma.dma_tag,(*(txr->txdma.dma_tag)->_dmamap_unload)((txr->txdma. dma_tag), (tx_buffer->map)) | |||
| 2464 | tx_buffer->map)(*(txr->txdma.dma_tag)->_dmamap_unload)((txr->txdma. dma_tag), (tx_buffer->map)); | |||
| 2465 | } | |||
| 2466 | if (tx_buffer->m_head != NULL((void *)0)) { | |||
| 2467 | m_freem(tx_buffer->m_head); | |||
| 2468 | tx_buffer->m_head = NULL((void *)0); | |||
| 2469 | } | |||
| 2470 | if (tx_buffer->map != NULL((void *)0)) { | |||
| 2471 | bus_dmamap_destroy(txr->txdma.dma_tag,(*(txr->txdma.dma_tag)->_dmamap_destroy)((txr->txdma .dma_tag), (tx_buffer->map)) | |||
| 2472 | tx_buffer->map)(*(txr->txdma.dma_tag)->_dmamap_destroy)((txr->txdma .dma_tag), (tx_buffer->map)); | |||
| 2473 | tx_buffer->map = NULL((void *)0); | |||
| 2474 | } | |||
| 2475 | } | |||
| 2476 | ||||
| 2477 | if (txr->tx_buffers != NULL((void *)0)) | |||
| 2478 | free(txr->tx_buffers, M_DEVBUF2, | |||
| 2479 | sc->num_tx_desc * sizeof(struct ixgbe_tx_buf)); | |||
| 2480 | txr->tx_buffers = NULL((void *)0); | |||
| 2481 | txr->txtag = NULL((void *)0); | |||
| 2482 | } | |||
| 2483 | ||||
| 2484 | /********************************************************************* | |||
| 2485 | * | |||
| 2486 | * Advanced Context Descriptor setup for VLAN or CSUM | |||
| 2487 | * | |||
| 2488 | **********************************************************************/ | |||
| 2489 | ||||
| 2490 | static inline int | |||
| 2491 | ixgbe_tx_offload(struct mbuf *mp, uint32_t *vlan_macip_lens, | |||
| 2492 | uint32_t *type_tucmd_mlhl, uint32_t *olinfo_status, uint32_t *cmd_type_len, | |||
| 2493 | uint32_t *mss_l4len_idx) | |||
| 2494 | { | |||
| 2495 | struct ether_extracted ext; | |||
| 2496 | int offload = 0; | |||
| 2497 | uint32_t ethlen, iphlen; | |||
| 2498 | ||||
| 2499 | ether_extract_headers(mp, &ext); | |||
| 2500 | ethlen = sizeof(*ext.eh); | |||
| 2501 | ||||
| 2502 | *vlan_macip_lens |= (ethlen << IXGBE_ADVTXD_MACLEN_SHIFT9); | |||
| 2503 | ||||
| 2504 | if (ext.ip4) { | |||
| 2505 | iphlen = ext.ip4->ip_hl << 2; | |||
| 2506 | ||||
| 2507 | if (ISSET(mp->m_pkthdr.csum_flags, M_IPV4_CSUM_OUT)((mp->M_dat.MH.MH_pkthdr.csum_flags) & (0x0001))) { | |||
| 2508 | *olinfo_status |= IXGBE_TXD_POPTS_IXSM0x01 << 8; | |||
| 2509 | offload = 1; | |||
| 2510 | } | |||
| 2511 | ||||
| 2512 | *type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV40x00000400; | |||
| 2513 | #ifdef INET61 | |||
| 2514 | } else if (ext.ip6) { | |||
| 2515 | iphlen = sizeof(*ext.ip6); | |||
| 2516 | ||||
| 2517 | *type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV60x00000000; | |||
| 2518 | #endif | |||
| 2519 | } else { | |||
| 2520 | if (mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_TCP_TSO0x8000) | |||
| 2521 | tcpstat_inc(tcps_outbadtso); | |||
| 2522 | return offload; | |||
| 2523 | } | |||
| 2524 | ||||
| 2525 | *vlan_macip_lens |= iphlen; | |||
| 2526 | ||||
| 2527 | if (ext.tcp) { | |||
| 2528 | *type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP0x00000800; | |||
| 2529 | if (ISSET(mp->m_pkthdr.csum_flags, M_TCP_CSUM_OUT)((mp->M_dat.MH.MH_pkthdr.csum_flags) & (0x0002))) { | |||
| 2530 | *olinfo_status |= IXGBE_TXD_POPTS_TXSM0x02 << 8; | |||
| 2531 | offload = 1; | |||
| 2532 | } | |||
| 2533 | } else if (ext.udp) { | |||
| 2534 | *type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP0x00000000; | |||
| 2535 | if (ISSET(mp->m_pkthdr.csum_flags, M_UDP_CSUM_OUT)((mp->M_dat.MH.MH_pkthdr.csum_flags) & (0x0004))) { | |||
| 2536 | *olinfo_status |= IXGBE_TXD_POPTS_TXSM0x02 << 8; | |||
| 2537 | offload = 1; | |||
| 2538 | } | |||
| 2539 | } | |||
| 2540 | ||||
| 2541 | if (mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_TCP_TSO0x8000) { | |||
| 2542 | if (ext.tcp) { | |||
| 2543 | uint32_t hdrlen, thlen, paylen, outlen; | |||
| 2544 | ||||
| 2545 | thlen = ext.tcp->th_off << 2; | |||
| 2546 | ||||
| 2547 | outlen = mp->m_pkthdrM_dat.MH.MH_pkthdr.ph_mss; | |||
| 2548 | *mss_l4len_idx |= outlen << IXGBE_ADVTXD_MSS_SHIFT16; | |||
| 2549 | *mss_l4len_idx |= thlen << IXGBE_ADVTXD_L4LEN_SHIFT8; | |||
| 2550 | ||||
| 2551 | hdrlen = ethlen + iphlen + thlen; | |||
| 2552 | paylen = mp->m_pkthdrM_dat.MH.MH_pkthdr.len - hdrlen; | |||
| 2553 | CLR(*olinfo_status, IXGBE_ADVTXD_PAYLEN_MASK((*olinfo_status) &= ~(0x0003FFFF << 14)) | |||
| 2554 | << IXGBE_ADVTXD_PAYLEN_SHIFT)((*olinfo_status) &= ~(0x0003FFFF << 14)); | |||
| 2555 | *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT14; | |||
| 2556 | ||||
| 2557 | *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE0x80000000; | |||
| 2558 | offload = 1; | |||
| 2559 | ||||
| 2560 | tcpstat_add(tcps_outpkttso, | |||
| 2561 | (paylen + outlen - 1) / outlen); | |||
| 2562 | } else | |||
| 2563 | tcpstat_inc(tcps_outbadtso); | |||
| 2564 | } | |||
| 2565 | ||||
| 2566 | return offload; | |||
| 2567 | } | |||
| 2568 | ||||
| 2569 | static int | |||
| 2570 | ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, | |||
| 2571 | uint32_t *cmd_type_len, uint32_t *olinfo_status) | |||
| 2572 | { | |||
| 2573 | struct ixgbe_adv_tx_context_desc *TXD; | |||
| 2574 | struct ixgbe_tx_buf *tx_buffer; | |||
| 2575 | uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0; | |||
| 2576 | uint32_t mss_l4len_idx = 0; | |||
| 2577 | int ctxd = txr->next_avail_desc; | |||
| 2578 | int offload = 0; | |||
| 2579 | ||||
| 2580 | /* Indicate the whole packet as payload when not doing TSO */ | |||
| 2581 | *olinfo_status |= mp->m_pkthdrM_dat.MH.MH_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT14; | |||
| 2582 | ||||
| 2583 | #if NVLAN1 > 0 | |||
| 2584 | if (ISSET(mp->m_flags, M_VLANTAG)((mp->m_hdr.mh_flags) & (0x0020))) { | |||
| 2585 | uint32_t vtag = mp->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag; | |||
| 2586 | vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT16); | |||
| 2587 | *cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE0x40000000; | |||
| 2588 | offload |= 1; | |||
| 2589 | } | |||
| 2590 | #endif | |||
| 2591 | ||||
| 2592 | offload |= ixgbe_tx_offload(mp, &vlan_macip_lens, &type_tucmd_mlhl, | |||
| 2593 | olinfo_status, cmd_type_len, &mss_l4len_idx); | |||
| 2594 | ||||
| 2595 | if (!offload) | |||
| 2596 | return (0); | |||
| 2597 | ||||
| 2598 | TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd]; | |||
| 2599 | tx_buffer = &txr->tx_buffers[ctxd]; | |||
| 2600 | ||||
| 2601 | type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT0x20000000 | IXGBE_ADVTXD_DTYP_CTXT0x00200000; | |||
| 2602 | ||||
| 2603 | /* Now copy bits into descriptor */ | |||
| 2604 | TXD->vlan_macip_lens = htole32(vlan_macip_lens)((__uint32_t)(vlan_macip_lens)); | |||
| 2605 | TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl)((__uint32_t)(type_tucmd_mlhl)); | |||
| 2606 | TXD->seqnum_seed = htole32(0)((__uint32_t)(0)); | |||
| 2607 | TXD->mss_l4len_idx = htole32(mss_l4len_idx)((__uint32_t)(mss_l4len_idx)); | |||
| 2608 | ||||
| 2609 | tx_buffer->m_head = NULL((void *)0); | |||
| 2610 | tx_buffer->eop_index = -1; | |||
| 2611 | ||||
| 2612 | return (1); | |||
| 2613 | } | |||
| 2614 | ||||
| 2615 | /********************************************************************** | |||
| 2616 | * | |||
| 2617 | * Examine each tx_buffer in the used queue. If the hardware is done | |||
| 2618 | * processing the packet then free associated resources. The | |||
| 2619 | * tx_buffer is put back on the free queue. | |||
| 2620 | * | |||
| 2621 | **********************************************************************/ | |||
| 2622 | int | |||
| 2623 | ixgbe_txeof(struct tx_ring *txr) | |||
| 2624 | { | |||
| 2625 | struct ix_softc *sc = txr->sc; | |||
| 2626 | struct ifqueue *ifq = txr->ifq; | |||
| 2627 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 2628 | unsigned int head, tail, last; | |||
| 2629 | struct ixgbe_tx_buf *tx_buffer; | |||
| 2630 | struct ixgbe_legacy_tx_desc *tx_desc; | |||
| 2631 | ||||
| 2632 | if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) | |||
| 2633 | return FALSE0; | |||
| 2634 | ||||
| 2635 | head = txr->next_avail_desc; | |||
| 2636 | tail = txr->next_to_clean; | |||
| 2637 | ||||
| 2638 | membar_consumer()do { __asm volatile("" ::: "memory"); } while (0); | |||
| 2639 | ||||
| 2640 | if (head == tail) | |||
| 2641 | return (FALSE0); | |||
| 2642 | ||||
| 2643 | bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize ), (0x02)) | |||
| 2644 | 0, txr->txdma.dma_map->dm_mapsize,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize ), (0x02)) | |||
| 2645 | BUS_DMASYNC_POSTREAD)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize ), (0x02)); | |||
| 2646 | ||||
| 2647 | for (;;) { | |||
| 2648 | tx_buffer = &txr->tx_buffers[tail]; | |||
| 2649 | last = tx_buffer->eop_index; | |||
| 2650 | tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last]; | |||
| 2651 | ||||
| 2652 | if (!ISSET(tx_desc->upper.fields.status, IXGBE_TXD_STAT_DD)((tx_desc->upper.fields.status) & (0x00000001))) | |||
| 2653 | break; | |||
| 2654 | ||||
| 2655 | bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (tx_buffer->map), (0), (tx_buffer->map->dm_mapsize ), (0x08)) | |||
| 2656 | 0, tx_buffer->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (tx_buffer->map), (0), (tx_buffer->map->dm_mapsize ), (0x08)); | |||
| 2657 | bus_dmamap_unload(txr->txdma.dma_tag, tx_buffer->map)(*(txr->txdma.dma_tag)->_dmamap_unload)((txr->txdma. dma_tag), (tx_buffer->map)); | |||
| 2658 | m_freem(tx_buffer->m_head); | |||
| 2659 | ||||
| 2660 | tx_buffer->m_head = NULL((void *)0); | |||
| 2661 | tx_buffer->eop_index = -1; | |||
| 2662 | ||||
| 2663 | tail = last + 1; | |||
| 2664 | if (tail == sc->num_tx_desc) | |||
| 2665 | tail = 0; | |||
| 2666 | if (head == tail) { | |||
| 2667 | /* All clean, turn off the timer */ | |||
| 2668 | ifp->if_timer = 0; | |||
| 2669 | break; | |||
| 2670 | } | |||
| 2671 | } | |||
| 2672 | ||||
| 2673 | bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize ), (0x01)) | |||
| 2674 | 0, txr->txdma.dma_map->dm_mapsize,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize ), (0x01)) | |||
| 2675 | BUS_DMASYNC_PREREAD)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag ), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize ), (0x01)); | |||
| 2676 | ||||
| 2677 | membar_producer()do { __asm volatile("" ::: "memory"); } while (0); | |||
| 2678 | ||||
| 2679 | txr->next_to_clean = tail; | |||
| 2680 | ||||
| 2681 | if (ifq_is_oactive(ifq)) | |||
| 2682 | ifq_restart(ifq); | |||
| 2683 | ||||
| 2684 | return TRUE1; | |||
| 2685 | } | |||
| 2686 | ||||
| 2687 | /********************************************************************* | |||
| 2688 | * | |||
| 2689 | * Get a buffer from system mbuf buffer pool. | |||
| 2690 | * | |||
| 2691 | **********************************************************************/ | |||
| 2692 | int | |||
| 2693 | ixgbe_get_buf(struct rx_ring *rxr, int i) | |||
| 2694 | { | |||
| 2695 | struct ix_softc *sc = rxr->sc; | |||
| 2696 | struct ixgbe_rx_buf *rxbuf; | |||
| 2697 | struct mbuf *mp; | |||
| 2698 | int error; | |||
| 2699 | union ixgbe_adv_rx_desc *rxdesc; | |||
| 2700 | ||||
| 2701 | rxbuf = &rxr->rx_buffers[i]; | |||
| 2702 | rxdesc = &rxr->rx_base[i]; | |||
| 2703 | if (rxbuf->buf) { | |||
| 2704 | printf("%s: ixgbe_get_buf: slot %d already has an mbuf\n", | |||
| 2705 | sc->dev.dv_xname, i); | |||
| 2706 | return (ENOBUFS55); | |||
| 2707 | } | |||
| 2708 | ||||
| 2709 | /* needed in any case so prealocate since this one will fail for sure */ | |||
| 2710 | mp = MCLGETL(NULL, M_DONTWAIT, sc->rx_mbuf_sz)m_clget((((void *)0)), (0x0002), (sc->rx_mbuf_sz)); | |||
| 2711 | if (!mp) | |||
| 2712 | return (ENOBUFS55); | |||
| 2713 | ||||
| 2714 | mp->m_datam_hdr.mh_data += (mp->m_extM_dat.MH.MH_dat.MH_ext.ext_size - sc->rx_mbuf_sz); | |||
| 2715 | mp->m_lenm_hdr.mh_len = mp->m_pkthdrM_dat.MH.MH_pkthdr.len = sc->rx_mbuf_sz; | |||
| 2716 | ||||
| 2717 | error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map,(*(rxr->rxdma.dma_tag)->_dmamap_load_mbuf)((rxr->rxdma .dma_tag), (rxbuf->map), (mp), (0x0001)) | |||
| 2718 | mp, BUS_DMA_NOWAIT)(*(rxr->rxdma.dma_tag)->_dmamap_load_mbuf)((rxr->rxdma .dma_tag), (rxbuf->map), (mp), (0x0001)); | |||
| 2719 | if (error) { | |||
| 2720 | m_freem(mp); | |||
| 2721 | return (error); | |||
| 2722 | } | |||
| 2723 | ||||
| 2724 | bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x01 )) | |||
| 2725 | 0, rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x01 )); | |||
| 2726 | rxbuf->buf = mp; | |||
| 2727 | ||||
| 2728 | rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr)((__uint64_t)(rxbuf->map->dm_segs[0].ds_addr)); | |||
| 2729 | ||||
| 2730 | return (0); | |||
| 2731 | } | |||
| 2732 | ||||
| 2733 | /********************************************************************* | |||
| 2734 | * | |||
| 2735 | * Allocate memory for rx_buffer structures. Since we use one | |||
| 2736 | * rx_buffer per received packet, the maximum number of rx_buffer's | |||
| 2737 | * that we'll need is equal to the number of receive descriptors | |||
| 2738 | * that we've allocated. | |||
| 2739 | * | |||
| 2740 | **********************************************************************/ | |||
| 2741 | int | |||
| 2742 | ixgbe_allocate_receive_buffers(struct rx_ring *rxr) | |||
| 2743 | { | |||
| 2744 | struct ix_softc *sc = rxr->sc; | |||
| 2745 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 2746 | struct ixgbe_rx_buf *rxbuf; | |||
| 2747 | int i, error; | |||
| 2748 | ||||
| 2749 | if (!(rxr->rx_buffers = mallocarray(sc->num_rx_desc, | |||
| 2750 | sizeof(struct ixgbe_rx_buf), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008))) { | |||
| 2751 | printf("%s: Unable to allocate rx_buffer memory\n", | |||
| 2752 | ifp->if_xname); | |||
| 2753 | error = ENOMEM12; | |||
| 2754 | goto fail; | |||
| 2755 | } | |||
| 2756 | ||||
| 2757 | rxbuf = rxr->rx_buffers; | |||
| 2758 | for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) { | |||
| 2759 | error = bus_dmamap_create(rxr->rxdma.dma_tag, 16 * 1024, 1,(*(rxr->rxdma.dma_tag)->_dmamap_create)((rxr->rxdma. dma_tag), (16 * 1024), (1), (16 * 1024), (0), (0x0001), (& rxbuf->map)) | |||
| 2760 | 16 * 1024, 0, BUS_DMA_NOWAIT, &rxbuf->map)(*(rxr->rxdma.dma_tag)->_dmamap_create)((rxr->rxdma. dma_tag), (16 * 1024), (1), (16 * 1024), (0), (0x0001), (& rxbuf->map)); | |||
| 2761 | if (error) { | |||
| 2762 | printf("%s: Unable to create Pack DMA map\n", | |||
| 2763 | ifp->if_xname); | |||
| 2764 | goto fail; | |||
| 2765 | } | |||
| 2766 | } | |||
| 2767 | bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize ), (0x01 | 0x04)) | |||
| 2768 | rxr->rxdma.dma_map->dm_mapsize,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize ), (0x01 | 0x04)) | |||
| 2769 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize ), (0x01 | 0x04)); | |||
| 2770 | ||||
| 2771 | return (0); | |||
| 2772 | ||||
| 2773 | fail: | |||
| 2774 | return (error); | |||
| 2775 | } | |||
| 2776 | ||||
| 2777 | /********************************************************************* | |||
| 2778 | * | |||
| 2779 | * Initialize a receive ring and its buffers. | |||
| 2780 | * | |||
| 2781 | **********************************************************************/ | |||
| 2782 | int | |||
| 2783 | ixgbe_setup_receive_ring(struct rx_ring *rxr) | |||
| 2784 | { | |||
| 2785 | struct ix_softc *sc = rxr->sc; | |||
| 2786 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 2787 | int rsize, error; | |||
| 2788 | ||||
| 2789 | rsize = roundup2(sc->num_rx_desc *(((sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)) + (4096 ) - 1) & ~((4096) - 1)) | |||
| 2790 | sizeof(union ixgbe_adv_rx_desc), 4096)(((sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)) + (4096 ) - 1) & ~((4096) - 1)); | |||
| 2791 | /* Clear the ring contents */ | |||
| 2792 | bzero((void *)rxr->rx_base, rsize)__builtin_bzero(((void *)rxr->rx_base), (rsize)); | |||
| 2793 | ||||
| 2794 | if ((error = ixgbe_allocate_receive_buffers(rxr)) != 0) | |||
| 2795 | return (error); | |||
| 2796 | ||||
| 2797 | /* Setup our descriptor indices */ | |||
| 2798 | rxr->next_to_check = 0; | |||
| 2799 | rxr->last_desc_filled = sc->num_rx_desc - 1; | |||
| 2800 | ||||
| 2801 | if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES(1 << 11)) + 1), | |||
| 2802 | sc->num_rx_desc - 1); | |||
| 2803 | ||||
| 2804 | ixgbe_rxfill(rxr); | |||
| 2805 | if (if_rxr_inuse(&rxr->rx_ring)((&rxr->rx_ring)->rxr_alive) == 0) { | |||
| 2806 | printf("%s: unable to fill any rx descriptors\n", | |||
| 2807 | sc->dev.dv_xname); | |||
| 2808 | return (ENOBUFS55); | |||
| 2809 | } | |||
| 2810 | ||||
| 2811 | return (0); | |||
| 2812 | } | |||
| 2813 | ||||
| 2814 | int | |||
| 2815 | ixgbe_rxfill(struct rx_ring *rxr) | |||
| 2816 | { | |||
| 2817 | struct ix_softc *sc = rxr->sc; | |||
| 2818 | int post = 0; | |||
| 2819 | u_int slots; | |||
| 2820 | int i; | |||
| 2821 | ||||
| 2822 | bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize ), (0x08)) | |||
| 2823 | 0, rxr->rxdma.dma_map->dm_mapsize,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize ), (0x08)) | |||
| 2824 | BUS_DMASYNC_POSTWRITE)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize ), (0x08)); | |||
| 2825 | ||||
| 2826 | i = rxr->last_desc_filled; | |||
| 2827 | for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc); | |||
| 2828 | slots > 0; slots--) { | |||
| 2829 | if (++i == sc->num_rx_desc) | |||
| 2830 | i = 0; | |||
| 2831 | ||||
| 2832 | if (ixgbe_get_buf(rxr, i) != 0) | |||
| 2833 | break; | |||
| 2834 | ||||
| 2835 | rxr->last_desc_filled = i; | |||
| 2836 | post = 1; | |||
| 2837 | } | |||
| 2838 | ||||
| 2839 | bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize ), (0x04)) | |||
| 2840 | 0, rxr->rxdma.dma_map->dm_mapsize,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize ), (0x04)) | |||
| 2841 | BUS_DMASYNC_PREWRITE)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize ), (0x04)); | |||
| 2842 | ||||
| 2843 | if_rxr_put(&rxr->rx_ring, slots)do { (&rxr->rx_ring)->rxr_alive -= (slots); } while (0); | |||
| 2844 | ||||
| 2845 | return (post); | |||
| 2846 | } | |||
| 2847 | ||||
| 2848 | void | |||
| 2849 | ixgbe_rxrefill(void *xrxr) | |||
| 2850 | { | |||
| 2851 | struct rx_ring *rxr = xrxr; | |||
| 2852 | struct ix_softc *sc = rxr->sc; | |||
| 2853 | ||||
| 2854 | if (ixgbe_rxfill(rxr)) { | |||
| 2855 | /* Advance the Rx Queue "Tail Pointer" */ | |||
| 2856 | IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(rxr->me),((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((((rxr->me) < 64) ? (0x01018 + ((rxr-> me) * 0x40)) : (0x0D018 + (((rxr->me) - 64) * 0x40)))), (rxr ->last_desc_filled))) | |||
| 2857 | rxr->last_desc_filled)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((((rxr->me) < 64) ? (0x01018 + ((rxr-> me) * 0x40)) : (0x0D018 + (((rxr->me) - 64) * 0x40)))), (rxr ->last_desc_filled))); | |||
| 2858 | } else if (if_rxr_inuse(&rxr->rx_ring)((&rxr->rx_ring)->rxr_alive) == 0) | |||
| 2859 | timeout_add(&rxr->rx_refill, 1); | |||
| 2860 | ||||
| 2861 | } | |||
| 2862 | ||||
| 2863 | /********************************************************************* | |||
| 2864 | * | |||
| 2865 | * Initialize all receive rings. | |||
| 2866 | * | |||
| 2867 | **********************************************************************/ | |||
| 2868 | int | |||
| 2869 | ixgbe_setup_receive_structures(struct ix_softc *sc) | |||
| 2870 | { | |||
| 2871 | struct rx_ring *rxr = sc->rx_rings; | |||
| 2872 | int i; | |||
| 2873 | ||||
| 2874 | for (i = 0; i < sc->num_queues; i++, rxr++) | |||
| 2875 | if (ixgbe_setup_receive_ring(rxr)) | |||
| 2876 | goto fail; | |||
| 2877 | ||||
| 2878 | return (0); | |||
| 2879 | fail: | |||
| 2880 | ixgbe_free_receive_structures(sc); | |||
| 2881 | return (ENOBUFS55); | |||
| 2882 | } | |||
| 2883 | ||||
| 2884 | /********************************************************************* | |||
| 2885 | * | |||
| 2886 | * Setup receive registers and features. | |||
| 2887 | * | |||
| 2888 | **********************************************************************/ | |||
| 2889 | #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT2 2 | |||
| 2890 | ||||
| 2891 | void | |||
| 2892 | ixgbe_initialize_receive_units(struct ix_softc *sc) | |||
| 2893 | { | |||
| 2894 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 2895 | struct rx_ring *rxr = sc->rx_rings; | |||
| 2896 | struct ixgbe_hw *hw = &sc->hw; | |||
| 2897 | uint32_t bufsz, fctrl, srrctl, rxcsum, rdrxctl; | |||
| 2898 | uint32_t hlreg; | |||
| 2899 | int i; | |||
| 2900 | ||||
| 2901 | /* | |||
| 2902 | * Make sure receives are disabled while | |||
| 2903 | * setting up the descriptor ring | |||
| 2904 | */ | |||
| 2905 | ixgbe_disable_rx(hw); | |||
| 2906 | ||||
| 2907 | /* Enable broadcasts */ | |||
| 2908 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x05080 ))); | |||
| 2909 | fctrl |= IXGBE_FCTRL_BAM0x00000400; | |||
| 2910 | if (sc->hw.mac.type == ixgbe_mac_82598EB) { | |||
| 2911 | fctrl |= IXGBE_FCTRL_DPF0x00002000; | |||
| 2912 | fctrl |= IXGBE_FCTRL_PMCF0x00001000; | |||
| 2913 | } | |||
| 2914 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x05080 ), (fctrl))); | |||
| 2915 | ||||
| 2916 | hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x04240 ))); | |||
| 2917 | /* Always enable jumbo frame reception */ | |||
| 2918 | hlreg |= IXGBE_HLREG0_JUMBOEN0x00000004; | |||
| 2919 | /* Always enable CRC stripping */ | |||
| 2920 | hlreg |= IXGBE_HLREG0_RXCRCSTRP0x00000002; | |||
| 2921 | IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x04240 ), (hlreg))); | |||
| 2922 | ||||
| 2923 | if (ISSET(ifp->if_xflags, IFXF_LRO)((ifp->if_xflags) & (0x200))) { | |||
| 2924 | rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x02F00 ))); | |||
| 2925 | ||||
| 2926 | /* This field has to be set to zero. */ | |||
| 2927 | rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE0x003E0000; | |||
| 2928 | ||||
| 2929 | /* RSC Coalescing on ACK Change */ | |||
| 2930 | rdrxctl |= IXGBE_RDRXCTL_RSCACKC0x02000000; | |||
| 2931 | rdrxctl |= IXGBE_RDRXCTL_FCOE_WRFIX0x04000000; | |||
| 2932 | ||||
| 2933 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x02F00 ), (rdrxctl))); | |||
| 2934 | } | |||
| 2935 | ||||
| 2936 | bufsz = (sc->rx_mbuf_sz - ETHER_ALIGN2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT10; | |||
| 2937 | ||||
| 2938 | for (i = 0; i < sc->num_queues; i++, rxr++) { | |||
| 2939 | uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr; | |||
| 2940 | ||||
| 2941 | /* Setup the Base and Length of the Rx Descriptor Ring */ | |||
| 2942 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((i) < 64) ? (0x01000 + ((i) * 0x40)) : (0x0D000 + (((i) - 64) * 0x40 )))), ((rdba & 0x00000000ffffffffULL)))) | |||
| 2943 | (rdba & 0x00000000ffffffffULL))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((i) < 64) ? (0x01000 + ((i) * 0x40)) : (0x0D000 + (((i) - 64) * 0x40 )))), ((rdba & 0x00000000ffffffffULL)))); | |||
| 2944 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((i) < 64) ? (0x01004 + ((i) * 0x40)) : (0x0D004 + (((i) - 64) * 0x40 )))), ((rdba >> 32)))); | |||
| 2945 | IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((i) < 64) ? (0x01008 + ((i) * 0x40)) : (0x0D008 + (((i) - 64) * 0x40 )))), (sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc))) ) | |||
| 2946 | sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((i) < 64) ? (0x01008 + ((i) * 0x40)) : (0x0D008 + (((i) - 64) * 0x40 )))), (sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc))) ); | |||
| 2947 | ||||
| 2948 | /* Set up the SRRCTL register */ | |||
| 2949 | srrctl = bufsz | IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF0x02000000; | |||
| 2950 | IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((i) <= 15) ? (0x02100 + ((i) * 4)) : (((i) < 64) ? (0x01014 + (( i) * 0x40)) : (0x0D014 + (((i) - 64) * 0x40))))), (srrctl))); | |||
| 2951 | ||||
| 2952 | if (ISSET(ifp->if_xflags, IFXF_LRO)((ifp->if_xflags) & (0x200))) { | |||
| 2953 | rdrxctl = IXGBE_READ_REG(&sc->hw, IXGBE_RSCCTL(i))((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->read_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((((i) < 64) ? (0x0102C + ((i) * 0x40)) : ( 0x0D02C + (((i) - 64) * 0x40)))))); | |||
| 2954 | ||||
| 2955 | /* Enable Receive Side Coalescing */ | |||
| 2956 | rdrxctl |= IXGBE_RSCCTL_RSCEN0x01; | |||
| 2957 | rdrxctl |= IXGBE_RSCCTL_MAXDESC_160x0C; | |||
| 2958 | ||||
| 2959 | IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), rdrxctl)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((i) < 64) ? (0x0102C + ((i) * 0x40)) : (0x0D02C + (((i) - 64) * 0x40 )))), (rdrxctl))); | |||
| 2960 | } | |||
| 2961 | ||||
| 2962 | /* Setup the HW Rx Head and Tail Descriptor Pointers */ | |||
| 2963 | IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((i) < 64) ? (0x01010 + ((i) * 0x40)) : (0x0D010 + (((i) - 64) * 0x40 )))), (0))); | |||
| 2964 | IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((i) < 64) ? (0x01018 + ((i) * 0x40)) : (0x0D018 + (((i) - 64) * 0x40 )))), (0))); | |||
| 2965 | } | |||
| 2966 | ||||
| 2967 | if (sc->hw.mac.type != ixgbe_mac_82598EB) { | |||
| 2968 | uint32_t psrtype = IXGBE_PSRTYPE_TCPHDR0x00000010 | | |||
| 2969 | IXGBE_PSRTYPE_UDPHDR0x00000020 | | |||
| 2970 | IXGBE_PSRTYPE_IPV4HDR0x00000100 | | |||
| 2971 | IXGBE_PSRTYPE_IPV6HDR0x00000200; | |||
| 2972 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((0) <= 15) ? (0x05480 + ((0) * 4)) : (0x0EA00 + ((0) * 4)))), (psrtype ))); | |||
| 2973 | } | |||
| 2974 | ||||
| 2975 | rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x05000 ))); | |||
| 2976 | rxcsum &= ~IXGBE_RXCSUM_PCSD0x00002000; | |||
| 2977 | ||||
| 2978 | ixgbe_initialize_rss_mapping(sc); | |||
| 2979 | ||||
| 2980 | /* Setup RSS */ | |||
| 2981 | if (sc->num_queues > 1) { | |||
| 2982 | /* RSS and RX IPP Checksum are mutually exclusive */ | |||
| 2983 | rxcsum |= IXGBE_RXCSUM_PCSD0x00002000; | |||
| 2984 | } | |||
| 2985 | ||||
| 2986 | /* Map QPRC/QPRDC/QPTC on a per queue basis */ | |||
| 2987 | ixgbe_map_queue_statistics(sc); | |||
| 2988 | ||||
| 2989 | /* This is useful for calculating UDP/IP fragment checksums */ | |||
| 2990 | if (!(rxcsum & IXGBE_RXCSUM_PCSD0x00002000)) | |||
| 2991 | rxcsum |= IXGBE_RXCSUM_IPPCSE0x00001000; | |||
| 2992 | ||||
| 2993 | IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x05000 ), (rxcsum))); | |||
| 2994 | } | |||
| 2995 | ||||
| 2996 | void | |||
| 2997 | ixgbe_initialize_rss_mapping(struct ix_softc *sc) | |||
| 2998 | { | |||
| 2999 | struct ixgbe_hw *hw = &sc->hw; | |||
| 3000 | uint32_t reta = 0, mrqc, rss_key[10]; | |||
| 3001 | int i, j, queue_id, table_size, index_mult; | |||
| 3002 | ||||
| 3003 | /* set up random bits */ | |||
| 3004 | stoeplitz_to_key(&rss_key, sizeof(rss_key)); | |||
| 3005 | ||||
| 3006 | /* Set multiplier for RETA setup and table size based on MAC */ | |||
| 3007 | index_mult = 0x1; | |||
| 3008 | table_size = 128; | |||
| 3009 | switch (sc->hw.mac.type) { | |||
| 3010 | case ixgbe_mac_82598EB: | |||
| 3011 | index_mult = 0x11; | |||
| 3012 | break; | |||
| 3013 | case ixgbe_mac_X550: | |||
| 3014 | case ixgbe_mac_X550EM_x: | |||
| 3015 | case ixgbe_mac_X550EM_a: | |||
| 3016 | table_size = 512; | |||
| 3017 | break; | |||
| 3018 | default: | |||
| 3019 | break; | |||
| 3020 | } | |||
| 3021 | ||||
| 3022 | /* Set up the redirection table */ | |||
| 3023 | for (i = 0, j = 0; i < table_size; i++, j++) { | |||
| 3024 | if (j == sc->num_queues) j = 0; | |||
| 3025 | queue_id = (j * index_mult); | |||
| 3026 | /* | |||
| 3027 | * The low 8 bits are for hash value (n+0); | |||
| 3028 | * The next 8 bits are for hash value (n+1), etc. | |||
| 3029 | */ | |||
| 3030 | reta = reta >> 8; | |||
| 3031 | reta = reta | ( ((uint32_t) queue_id) << 24); | |||
| 3032 | if ((i & 3) == 3) { | |||
| 3033 | if (i < 128) | |||
| 3034 | IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x05C00 + ((i >> 2) * 4))), (reta))); | |||
| 3035 | else | |||
| 3036 | IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x0EE80 + (((i >> 2) - 32) * 4))), (reta))) | |||
| 3037 | reta)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x0EE80 + (((i >> 2) - 32) * 4))), (reta))); | |||
| 3038 | reta = 0; | |||
| 3039 | } | |||
| 3040 | } | |||
| 3041 | ||||
| 3042 | /* Now fill our hash function seeds */ | |||
| 3043 | for (i = 0; i < 10; i++) | |||
| 3044 | IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i])((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x05C80 + ((i) * 4))), (rss_key[i]))); | |||
| 3045 | ||||
| 3046 | /* | |||
| 3047 | * Disable UDP - IP fragments aren't currently being handled | |||
| 3048 | * and so we end up with a mix of 2-tuple and 4-tuple | |||
| 3049 | * traffic. | |||
| 3050 | */ | |||
| 3051 | mrqc = IXGBE_MRQC_RSSEN0x00000001 | |||
| 3052 | | IXGBE_MRQC_RSS_FIELD_IPV40x00020000 | |||
| 3053 | | IXGBE_MRQC_RSS_FIELD_IPV4_TCP0x00010000 | |||
| 3054 | | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP0x00040000 | |||
| 3055 | | IXGBE_MRQC_RSS_FIELD_IPV6_EX0x00080000 | |||
| 3056 | | IXGBE_MRQC_RSS_FIELD_IPV60x00100000 | |||
| 3057 | | IXGBE_MRQC_RSS_FIELD_IPV6_TCP0x00200000 | |||
| 3058 | ; | |||
| 3059 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x05818 ), (mrqc))); | |||
| 3060 | } | |||
| 3061 | ||||
| 3062 | /********************************************************************* | |||
| 3063 | * | |||
| 3064 | * Free all receive rings. | |||
| 3065 | * | |||
| 3066 | **********************************************************************/ | |||
| 3067 | void | |||
| 3068 | ixgbe_free_receive_structures(struct ix_softc *sc) | |||
| 3069 | { | |||
| 3070 | struct rx_ring *rxr; | |||
| 3071 | int i; | |||
| 3072 | ||||
| 3073 | for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++) | |||
| 3074 | if_rxr_init(&rxr->rx_ring, 0, 0); | |||
| 3075 | ||||
| 3076 | for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++) | |||
| 3077 | ixgbe_free_receive_buffers(rxr); | |||
| 3078 | } | |||
| 3079 | ||||
| 3080 | /********************************************************************* | |||
| 3081 | * | |||
| 3082 | * Free receive ring data structures | |||
| 3083 | * | |||
| 3084 | **********************************************************************/ | |||
| 3085 | void | |||
| 3086 | ixgbe_free_receive_buffers(struct rx_ring *rxr) | |||
| 3087 | { | |||
| 3088 | struct ix_softc *sc; | |||
| 3089 | struct ixgbe_rx_buf *rxbuf; | |||
| 3090 | int i; | |||
| 3091 | ||||
| 3092 | sc = rxr->sc; | |||
| 3093 | if (rxr->rx_buffers != NULL((void *)0)) { | |||
| 3094 | for (i = 0; i < sc->num_rx_desc; i++) { | |||
| 3095 | rxbuf = &rxr->rx_buffers[i]; | |||
| 3096 | if (rxbuf->buf != NULL((void *)0)) { | |||
| 3097 | bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x02 )) | |||
| 3098 | 0, rxbuf->map->dm_mapsize,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x02 )) | |||
| 3099 | BUS_DMASYNC_POSTREAD)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x02 )); | |||
| 3100 | bus_dmamap_unload(rxr->rxdma.dma_tag,(*(rxr->rxdma.dma_tag)->_dmamap_unload)((rxr->rxdma. dma_tag), (rxbuf->map)) | |||
| 3101 | rxbuf->map)(*(rxr->rxdma.dma_tag)->_dmamap_unload)((rxr->rxdma. dma_tag), (rxbuf->map)); | |||
| 3102 | m_freem(rxbuf->buf); | |||
| 3103 | rxbuf->buf = NULL((void *)0); | |||
| 3104 | } | |||
| 3105 | if (rxbuf->map != NULL((void *)0)) { | |||
| 3106 | bus_dmamap_destroy(rxr->rxdma.dma_tag,(*(rxr->rxdma.dma_tag)->_dmamap_destroy)((rxr->rxdma .dma_tag), (rxbuf->map)) | |||
| 3107 | rxbuf->map)(*(rxr->rxdma.dma_tag)->_dmamap_destroy)((rxr->rxdma .dma_tag), (rxbuf->map)); | |||
| 3108 | rxbuf->map = NULL((void *)0); | |||
| 3109 | } | |||
| 3110 | } | |||
| 3111 | free(rxr->rx_buffers, M_DEVBUF2, | |||
| 3112 | sc->num_rx_desc * sizeof(struct ixgbe_rx_buf)); | |||
| 3113 | rxr->rx_buffers = NULL((void *)0); | |||
| 3114 | } | |||
| 3115 | } | |||
| 3116 | ||||
| 3117 | /********************************************************************* | |||
| 3118 | * | |||
| 3119 | * This routine executes in interrupt context. It replenishes | |||
| 3120 | * the mbufs in the descriptor and sends data which has been | |||
| 3121 | * dma'ed into host memory to upper layer. | |||
| 3122 | * | |||
| 3123 | *********************************************************************/ | |||
| 3124 | int | |||
| 3125 | ixgbe_rxeof(struct rx_ring *rxr) | |||
| 3126 | { | |||
| 3127 | struct ix_softc *sc = rxr->sc; | |||
| 3128 | struct ifnet *ifp = &sc->arpcom.ac_if; | |||
| 3129 | struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 }; | |||
| 3130 | struct mbuf *mp, *sendmp; | |||
| 3131 | uint8_t eop = 0; | |||
| 3132 | uint16_t len, vtag; | |||
| 3133 | uint32_t staterr = 0; | |||
| 3134 | struct ixgbe_rx_buf *rxbuf, *nxbuf; | |||
| 3135 | union ixgbe_adv_rx_desc *rxdesc; | |||
| 3136 | size_t dsize = sizeof(union ixgbe_adv_rx_desc); | |||
| 3137 | int i, nextp, rsccnt; | |||
| 3138 | ||||
| 3139 | if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) | |||
| 3140 | return FALSE0; | |||
| 3141 | ||||
| 3142 | i = rxr->next_to_check; | |||
| 3143 | while (if_rxr_inuse(&rxr->rx_ring)((&rxr->rx_ring)->rxr_alive) > 0) { | |||
| 3144 | uint32_t hash; | |||
| 3145 | uint16_t hashtype; | |||
| 3146 | ||||
| 3147 | bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxr->rxdma.dma_map), (dsize * i), (dsize), (0x02)) | |||
| 3148 | dsize * i, dsize, BUS_DMASYNC_POSTREAD)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxr->rxdma.dma_map), (dsize * i), (dsize), (0x02)); | |||
| 3149 | ||||
| 3150 | rxdesc = &rxr->rx_base[i]; | |||
| 3151 | staterr = letoh32(rxdesc->wb.upper.status_error)((__uint32_t)(rxdesc->wb.upper.status_error)); | |||
| 3152 | if (!ISSET(staterr, IXGBE_RXD_STAT_DD)((staterr) & (0x01))) { | |||
| 3153 | bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxr->rxdma.dma_map), (dsize * i), (dsize), (0x01)) | |||
| 3154 | dsize * i, dsize,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxr->rxdma.dma_map), (dsize * i), (dsize), (0x01)) | |||
| 3155 | BUS_DMASYNC_PREREAD)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxr->rxdma.dma_map), (dsize * i), (dsize), (0x01)); | |||
| 3156 | break; | |||
| 3157 | } | |||
| 3158 | ||||
| 3159 | /* Zero out the receive descriptors status */ | |||
| 3160 | rxdesc->wb.upper.status_error = 0; | |||
| 3161 | rxbuf = &rxr->rx_buffers[i]; | |||
| 3162 | ||||
| 3163 | /* pull the mbuf off the ring */ | |||
| 3164 | bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x02 )) | |||
| 3165 | rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x02 )); | |||
| 3166 | bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map)(*(rxr->rxdma.dma_tag)->_dmamap_unload)((rxr->rxdma. dma_tag), (rxbuf->map)); | |||
| 3167 | ||||
| 3168 | mp = rxbuf->buf; | |||
| 3169 | len = letoh16(rxdesc->wb.upper.length)((__uint16_t)(rxdesc->wb.upper.length)); | |||
| 3170 | vtag = letoh16(rxdesc->wb.upper.vlan)((__uint16_t)(rxdesc->wb.upper.vlan)); | |||
| 3171 | eop = ((staterr & IXGBE_RXD_STAT_EOP0x02) != 0); | |||
| 3172 | hash = lemtoh32(&rxdesc->wb.lower.hi_dword.rss)((__uint32_t)(*(__uint32_t *)(&rxdesc->wb.lower.hi_dword .rss))); | |||
| 3173 | hashtype = | |||
| 3174 | lemtoh16(&rxdesc->wb.lower.lo_dword.hs_rss.pkt_info)((__uint16_t)(*(__uint16_t *)(&rxdesc->wb.lower.lo_dword .hs_rss.pkt_info))) & | |||
| 3175 | IXGBE_RXDADV_RSSTYPE_MASK0x0000000F; | |||
| 3176 | rsccnt = lemtoh32(&rxdesc->wb.lower.lo_dword.data)((__uint32_t)(*(__uint32_t *)(&rxdesc->wb.lower.lo_dword .data))) & | |||
| 3177 | IXGBE_RXDADV_RSCCNT_MASK0x001E0000; | |||
| 3178 | rsccnt >>= IXGBE_RXDADV_RSCCNT_SHIFT17; | |||
| 3179 | ||||
| 3180 | if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK( 0x01000000 | 0x02000000 | 0x08000000 | 0x10000000 | 0x20000000 )) { | |||
| 3181 | if (rxbuf->fmp) { | |||
| 3182 | m_freem(rxbuf->fmp); | |||
| 3183 | rxbuf->fmp = NULL((void *)0); | |||
| 3184 | } | |||
| 3185 | ||||
| 3186 | m_freem(mp); | |||
| 3187 | rxbuf->buf = NULL((void *)0); | |||
| 3188 | goto next_desc; | |||
| 3189 | } | |||
| 3190 | ||||
| 3191 | if (mp == NULL((void *)0)) { | |||
| 3192 | panic("%s: ixgbe_rxeof: NULL mbuf in slot %d " | |||
| 3193 | "(nrx %d, filled %d)", sc->dev.dv_xname, | |||
| 3194 | i, if_rxr_inuse(&rxr->rx_ring)((&rxr->rx_ring)->rxr_alive), | |||
| 3195 | rxr->last_desc_filled); | |||
| 3196 | } | |||
| 3197 | ||||
| 3198 | if (!eop) { | |||
| 3199 | /* | |||
| 3200 | * Figure out the next descriptor of this frame. | |||
| 3201 | */ | |||
| 3202 | if (rsccnt) { | |||
| 3203 | nextp = staterr & IXGBE_RXDADV_NEXTP_MASK0x000FFFF0; | |||
| 3204 | nextp >>= IXGBE_RXDADV_NEXTP_SHIFT0x00000004; | |||
| 3205 | } else { | |||
| 3206 | nextp = i + 1; | |||
| 3207 | } | |||
| 3208 | if (nextp == sc->num_rx_desc) | |||
| 3209 | nextp = 0; | |||
| 3210 | nxbuf = &rxr->rx_buffers[nextp]; | |||
| 3211 | /* prefetch(nxbuf); */ | |||
| 3212 | } | |||
| 3213 | ||||
| 3214 | /* | |||
| 3215 | * Rather than using the fmp/lmp global pointers | |||
| 3216 | * we now keep the head of a packet chain in the | |||
| 3217 | * buffer struct and pass this along from one | |||
| 3218 | * descriptor to the next, until we get EOP. | |||
| 3219 | */ | |||
| 3220 | mp->m_lenm_hdr.mh_len = len; | |||
| 3221 | /* | |||
| 3222 | * See if there is a stored head | |||
| 3223 | * that determines what we are | |||
| 3224 | */ | |||
| 3225 | sendmp = rxbuf->fmp; | |||
| 3226 | rxbuf->buf = rxbuf->fmp = NULL((void *)0); | |||
| 3227 | ||||
| 3228 | if (sendmp == NULL((void *)0)) { | |||
| 3229 | /* first desc of a non-ps chain */ | |||
| 3230 | sendmp = mp; | |||
| 3231 | sendmp->m_pkthdrM_dat.MH.MH_pkthdr.len = 0; | |||
| 3232 | sendmp->m_pkthdrM_dat.MH.MH_pkthdr.ph_mss = 0; | |||
| 3233 | } | |||
| 3234 | sendmp->m_pkthdrM_dat.MH.MH_pkthdr.len += mp->m_lenm_hdr.mh_len; | |||
| 3235 | /* | |||
| 3236 | * This function iterates over interleaved descriptors. | |||
| 3237 | * Thus, we reuse ph_mss as global segment counter per | |||
| 3238 | * TCP connection, instead of introducing a new variable | |||
| 3239 | * in m_pkthdr. | |||
| 3240 | */ | |||
| 3241 | if (rsccnt) | |||
| 3242 | sendmp->m_pkthdrM_dat.MH.MH_pkthdr.ph_mss += rsccnt - 1; | |||
| 3243 | ||||
| 3244 | /* Pass the head pointer on */ | |||
| 3245 | if (eop == 0) { | |||
| 3246 | nxbuf->fmp = sendmp; | |||
| 3247 | sendmp = NULL((void *)0); | |||
| 3248 | mp->m_nextm_hdr.mh_next = nxbuf->buf; | |||
| 3249 | } else { /* Sending this frame? */ | |||
| 3250 | uint16_t pkts; | |||
| 3251 | ||||
| 3252 | ixgbe_rx_checksum(staterr, sendmp); | |||
| 3253 | #if NVLAN1 > 0 | |||
| 3254 | if (staterr & IXGBE_RXD_STAT_VP0x08) { | |||
| 3255 | sendmp->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag = vtag; | |||
| 3256 | SET(sendmp->m_flags, M_VLANTAG)((sendmp->m_hdr.mh_flags) |= (0x0020)); | |||
| 3257 | } | |||
| 3258 | #endif | |||
| 3259 | if (hashtype != IXGBE_RXDADV_RSSTYPE_NONE0x00000000) { | |||
| 3260 | sendmp->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid = hash; | |||
| 3261 | SET(sendmp->m_pkthdr.csum_flags, M_FLOWID)((sendmp->M_dat.MH.MH_pkthdr.csum_flags) |= (0x4000)); | |||
| 3262 | } | |||
| 3263 | ||||
| 3264 | pkts = sendmp->m_pkthdrM_dat.MH.MH_pkthdr.ph_mss; | |||
| 3265 | sendmp->m_pkthdrM_dat.MH.MH_pkthdr.ph_mss = 0; | |||
| 3266 | ||||
| 3267 | if (pkts > 1) { | |||
| 3268 | struct ether_extracted ext; | |||
| 3269 | uint32_t hdrlen, paylen; | |||
| 3270 | ||||
| 3271 | /* Calculate header size. */ | |||
| 3272 | ether_extract_headers(sendmp, &ext); | |||
| 3273 | hdrlen = sizeof(*ext.eh); | |||
| 3274 | #if NVLAN1 > 0 | |||
| 3275 | if (ISSET(sendmp->m_flags, M_VLANTAG)((sendmp->m_hdr.mh_flags) & (0x0020)) || | |||
| 3276 | ext.evh) | |||
| 3277 | hdrlen += ETHER_VLAN_ENCAP_LEN4; | |||
| 3278 | #endif | |||
| 3279 | if (ext.ip4) | |||
| 3280 | hdrlen += ext.ip4->ip_hl << 2; | |||
| 3281 | if (ext.ip6) | |||
| 3282 | hdrlen += sizeof(*ext.ip6); | |||
| 3283 | if (ext.tcp) { | |||
| 3284 | hdrlen += ext.tcp->th_off << 2; | |||
| 3285 | tcpstat_inc(tcps_inhwlro); | |||
| 3286 | tcpstat_add(tcps_inpktlro, pkts); | |||
| 3287 | } else { | |||
| 3288 | tcpstat_inc(tcps_inbadlro); | |||
| 3289 | } | |||
| 3290 | ||||
| 3291 | /* | |||
| 3292 | * If we gonna forward this packet, we have to | |||
| 3293 | * mark it as TSO, set a correct mss, | |||
| 3294 | * and recalculate the TCP checksum. | |||
| 3295 | */ | |||
| 3296 | paylen = sendmp->m_pkthdrM_dat.MH.MH_pkthdr.len > hdrlen ? | |||
| 3297 | sendmp->m_pkthdrM_dat.MH.MH_pkthdr.len - hdrlen : 0; | |||
| 3298 | if (ext.tcp && paylen >= pkts) { | |||
| 3299 | SET(sendmp->m_pkthdr.csum_flags,((sendmp->M_dat.MH.MH_pkthdr.csum_flags) |= (0x8000)) | |||
| 3300 | M_TCP_TSO)((sendmp->M_dat.MH.MH_pkthdr.csum_flags) |= (0x8000)); | |||
| 3301 | sendmp->m_pkthdrM_dat.MH.MH_pkthdr.ph_mss = paylen / pkts; | |||
| 3302 | } | |||
| 3303 | if (ext.tcp && | |||
| 3304 | ISSET(sendmp->m_pkthdr.csum_flags,((sendmp->M_dat.MH.MH_pkthdr.csum_flags) & (0x0020)) | |||
| 3305 | M_TCP_CSUM_IN_OK)((sendmp->M_dat.MH.MH_pkthdr.csum_flags) & (0x0020))) { | |||
| 3306 | SET(sendmp->m_pkthdr.csum_flags,((sendmp->M_dat.MH.MH_pkthdr.csum_flags) |= (0x0002)) | |||
| 3307 | M_TCP_CSUM_OUT)((sendmp->M_dat.MH.MH_pkthdr.csum_flags) |= (0x0002)); | |||
| 3308 | } | |||
| 3309 | } | |||
| 3310 | ||||
| 3311 | ml_enqueue(&ml, sendmp); | |||
| 3312 | } | |||
| 3313 | next_desc: | |||
| 3314 | if_rxr_put(&rxr->rx_ring, 1)do { (&rxr->rx_ring)->rxr_alive -= (1); } while (0); | |||
| 3315 | bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxr->rxdma.dma_map), (dsize * i), (dsize), (0x01)) | |||
| 3316 | dsize * i, dsize,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxr->rxdma.dma_map), (dsize * i), (dsize), (0x01)) | |||
| 3317 | BUS_DMASYNC_PREREAD)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag ), (rxr->rxdma.dma_map), (dsize * i), (dsize), (0x01)); | |||
| 3318 | ||||
| 3319 | /* Advance our pointers to the next descriptor. */ | |||
| 3320 | if (++i == sc->num_rx_desc) | |||
| 3321 | i = 0; | |||
| 3322 | } | |||
| 3323 | rxr->next_to_check = i; | |||
| 3324 | ||||
| 3325 | if (ifiq_input(rxr->ifiq, &ml)) | |||
| 3326 | if_rxr_livelocked(&rxr->rx_ring); | |||
| 3327 | ||||
| 3328 | if (!(staterr & IXGBE_RXD_STAT_DD0x01)) | |||
| 3329 | return FALSE0; | |||
| 3330 | ||||
| 3331 | return TRUE1; | |||
| 3332 | } | |||
| 3333 | ||||
| 3334 | /********************************************************************* | |||
| 3335 | * | |||
| 3336 | * Verify that the hardware indicated that the checksum is valid. | |||
| 3337 | * Inform the stack about the status of checksum so that stack | |||
| 3338 | * doesn't spend time verifying the checksum. | |||
| 3339 | * | |||
| 3340 | *********************************************************************/ | |||
| 3341 | void | |||
| 3342 | ixgbe_rx_checksum(uint32_t staterr, struct mbuf * mp) | |||
| 3343 | { | |||
| 3344 | uint16_t status = (uint16_t) staterr; | |||
| 3345 | uint8_t errors = (uint8_t) (staterr >> 24); | |||
| 3346 | ||||
| 3347 | if (status & IXGBE_RXD_STAT_IPCS0x40) { | |||
| 3348 | if (!(errors & IXGBE_RXD_ERR_IPE0x80)) { | |||
| 3349 | /* IP Checksum Good */ | |||
| 3350 | mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK0x0008; | |||
| 3351 | } else | |||
| 3352 | mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags = 0; | |||
| 3353 | } | |||
| 3354 | if (status & IXGBE_RXD_STAT_L4CS0x20) { | |||
| 3355 | if (!(errors & IXGBE_RXD_ERR_TCPE0x40)) | |||
| 3356 | mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= | |||
| 3357 | M_TCP_CSUM_IN_OK0x0020 | M_UDP_CSUM_IN_OK0x0080; | |||
| 3358 | } | |||
| 3359 | } | |||
| 3360 | ||||
| 3361 | void | |||
| 3362 | ixgbe_setup_vlan_hw_support(struct ix_softc *sc) | |||
| 3363 | { | |||
| 3364 | uint32_t ctrl; | |||
| 3365 | int i; | |||
| 3366 | ||||
| 3367 | /* | |||
| 3368 | * A soft reset zero's out the VFTA, so | |||
| 3369 | * we need to repopulate it now. | |||
| 3370 | */ | |||
| 3371 | for (i = 0; i < IXGBE_VFTA_SIZE128; i++) { | |||
| 3372 | if (sc->shadow_vfta[i] != 0) | |||
| 3373 | IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTA(i),((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((0x0A000 + ((i) * 4))), (sc->shadow_vfta[ i]))) | |||
| 3374 | sc->shadow_vfta[i])((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((0x0A000 + ((i) * 4))), (sc->shadow_vfta[ i]))); | |||
| 3375 | } | |||
| 3376 | ||||
| 3377 | ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_VLNCTRL)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->read_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x05088))); | |||
| 3378 | #if 0 | |||
| 3379 | /* Enable the Filter Table if enabled */ | |||
| 3380 | if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { | |||
| 3381 | ctrl &= ~IXGBE_VLNCTRL_CFIEN0x20000000; | |||
| 3382 | ctrl |= IXGBE_VLNCTRL_VFE0x40000000; | |||
| 3383 | } | |||
| 3384 | #endif | |||
| 3385 | if (sc->hw.mac.type == ixgbe_mac_82598EB) | |||
| 3386 | ctrl |= IXGBE_VLNCTRL_VME0x80000000; | |||
| 3387 | IXGBE_WRITE_REG(&sc->hw, IXGBE_VLNCTRL, ctrl)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x05088), (ctrl))); | |||
| 3388 | ||||
| 3389 | /* On 82599 the VLAN enable is per/queue in RXDCTL */ | |||
| 3390 | if (sc->hw.mac.type != ixgbe_mac_82598EB) { | |||
| 3391 | for (i = 0; i < sc->num_queues; i++) { | |||
| 3392 | ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i))((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->read_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((((i) < 64) ? (0x01028 + ((i) * 0x40)) : ( 0x0D028 + (((i) - 64) * 0x40)))))); | |||
| 3393 | ctrl |= IXGBE_RXDCTL_VME0x40000000; | |||
| 3394 | IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), ctrl)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((((i) < 64) ? (0x01028 + ((i) * 0x40)) : ( 0x0D028 + (((i) - 64) * 0x40)))), (ctrl))); | |||
| 3395 | } | |||
| 3396 | } | |||
| 3397 | } | |||
| 3398 | ||||
| 3399 | void | |||
| 3400 | ixgbe_enable_intr(struct ix_softc *sc) | |||
| 3401 | { | |||
| 3402 | struct ixgbe_hw *hw = &sc->hw; | |||
| 3403 | uint32_t mask, fwsm; | |||
| 3404 | ||||
| 3405 | mask = (IXGBE_EIMS_ENABLE_MASK( 0x0000FFFF | 0x00100000 | 0x40000000 | 0x80000000) & ~IXGBE_EIMS_RTX_QUEUE0x0000FFFF); | |||
| 3406 | /* Enable Fan Failure detection */ | |||
| 3407 | if (hw->device_id == IXGBE_DEV_ID_82598AT0x10C8) | |||
| 3408 | mask |= IXGBE_EIMS_GPI_SDP10x02000000; | |||
| 3409 | ||||
| 3410 | switch (sc->hw.mac.type) { | |||
| 3411 | case ixgbe_mac_82599EB: | |||
| 3412 | mask |= IXGBE_EIMS_ECC0x10000000; | |||
| 3413 | /* Temperature sensor on some adapters */ | |||
| 3414 | mask |= IXGBE_EIMS_GPI_SDP00x01000000; | |||
| 3415 | /* SFP+ (RX_LOS_N & MOD_ABS_N) */ | |||
| 3416 | mask |= IXGBE_EIMS_GPI_SDP10x02000000; | |||
| 3417 | mask |= IXGBE_EIMS_GPI_SDP20x04000000; | |||
| 3418 | break; | |||
| 3419 | case ixgbe_mac_X540: | |||
| 3420 | mask |= IXGBE_EIMS_ECC0x10000000; | |||
| 3421 | /* Detect if Thermal Sensor is enabled */ | |||
| 3422 | fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x10148 ))); | |||
| 3423 | if (fwsm & IXGBE_FWSM_TS_ENABLED0x1) | |||
| 3424 | mask |= IXGBE_EIMS_TS0x00800000; | |||
| 3425 | break; | |||
| 3426 | case ixgbe_mac_X550: | |||
| 3427 | case ixgbe_mac_X550EM_x: | |||
| 3428 | case ixgbe_mac_X550EM_a: | |||
| 3429 | mask |= IXGBE_EIMS_ECC0x10000000; | |||
| 3430 | /* MAC thermal sensor is automatically enabled */ | |||
| 3431 | mask |= IXGBE_EIMS_TS0x00800000; | |||
| 3432 | /* Some devices use SDP0 for important information */ | |||
| 3433 | if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP0x15AC || | |||
| 3434 | hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T0x15AD) | |||
| 3435 | mask |= IXGBE_EIMS_GPI_SDP0_X5400x02000000; | |||
| 3436 | default: | |||
| 3437 | break; | |||
| 3438 | } | |||
| 3439 | ||||
| 3440 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00880 ), (mask))); | |||
| 3441 | ||||
| 3442 | /* With MSI-X we use auto clear */ | |||
| 3443 | if (sc->sc_intrmap) { | |||
| 3444 | mask = IXGBE_EIMS_ENABLE_MASK( 0x0000FFFF | 0x00100000 | 0x40000000 | 0x80000000); | |||
| 3445 | /* Don't autoclear Link */ | |||
| 3446 | mask &= ~IXGBE_EIMS_OTHER0x80000000; | |||
| 3447 | mask &= ~IXGBE_EIMS_LSC0x00100000; | |||
| 3448 | IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00810 ), (mask))); | |||
| 3449 | } | |||
| 3450 | ||||
| 3451 | IXGBE_WRITE_FLUSH(hw)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00008 ))); | |||
| 3452 | } | |||
| 3453 | ||||
| 3454 | void | |||
| 3455 | ixgbe_disable_intr(struct ix_softc *sc) | |||
| 3456 | { | |||
| 3457 | if (sc->sc_intrmap) | |||
| 3458 | IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x00810), (0))); | |||
| 3459 | if (sc->hw.mac.type == ixgbe_mac_82598EB) { | |||
| 3460 | IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x00888), (~0))); | |||
| 3461 | } else { | |||
| 3462 | IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x00888), (0xFFFF0000))); | |||
| 3463 | IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((0x00AB0 + (0) * 4)), (~0))); | |||
| 3464 | IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((0x00AB0 + (1) * 4)), (~0))); | |||
| 3465 | } | |||
| 3466 | IXGBE_WRITE_FLUSH(&sc->hw)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->read_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), (0x00008))); | |||
| 3467 | } | |||
| 3468 | ||||
| 3469 | uint16_t | |||
| 3470 | ixgbe_read_pci_cfg(struct ixgbe_hw *hw, uint32_t reg) | |||
| 3471 | { | |||
| 3472 | struct pci_attach_args *pa; | |||
| 3473 | uint32_t value; | |||
| 3474 | int high = 0; | |||
| 3475 | ||||
| 3476 | if (reg & 0x2) { | |||
| 3477 | high = 1; | |||
| 3478 | reg &= ~0x2; | |||
| 3479 | } | |||
| 3480 | pa = &((struct ixgbe_osdep *)hw->back)->os_pa; | |||
| 3481 | value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg); | |||
| 3482 | ||||
| 3483 | if (high) | |||
| 3484 | value >>= 16; | |||
| 3485 | ||||
| 3486 | return (value & 0xffff); | |||
| 3487 | } | |||
| 3488 | ||||
| 3489 | void | |||
| 3490 | ixgbe_write_pci_cfg(struct ixgbe_hw *hw, uint32_t reg, uint16_t value) | |||
| 3491 | { | |||
| 3492 | struct pci_attach_args *pa; | |||
| 3493 | uint32_t rv; | |||
| 3494 | int high = 0; | |||
| 3495 | ||||
| 3496 | /* Need to do read/mask/write... because 16 vs 32 bit!!! */ | |||
| 3497 | if (reg & 0x2) { | |||
| 3498 | high = 1; | |||
| 3499 | reg &= ~0x2; | |||
| 3500 | } | |||
| 3501 | pa = &((struct ixgbe_osdep *)hw->back)->os_pa; | |||
| 3502 | rv = pci_conf_read(pa->pa_pc, pa->pa_tag, reg); | |||
| 3503 | if (!high) | |||
| 3504 | rv = (rv & 0xffff0000) | value; | |||
| 3505 | else | |||
| 3506 | rv = (rv & 0xffff) | ((uint32_t)value << 16); | |||
| 3507 | pci_conf_write(pa->pa_pc, pa->pa_tag, reg, rv); | |||
| 3508 | } | |||
| 3509 | ||||
| 3510 | /* | |||
| 3511 | * Setup the correct IVAR register for a particular MSIX interrupt | |||
| 3512 | * (yes this is all very magic and confusing :) | |||
| 3513 | * - entry is the register array entry | |||
| 3514 | * - vector is the MSIX vector for this queue | |||
| 3515 | * - type is RX/TX/MISC | |||
| 3516 | */ | |||
| 3517 | void | |||
| 3518 | ixgbe_set_ivar(struct ix_softc *sc, uint8_t entry, uint8_t vector, int8_t type) | |||
| 3519 | { | |||
| 3520 | struct ixgbe_hw *hw = &sc->hw; | |||
| 3521 | uint32_t ivar, index; | |||
| 3522 | ||||
| 3523 | vector |= IXGBE_IVAR_ALLOC_VAL0x80; | |||
| 3524 | ||||
| 3525 | switch (hw->mac.type) { | |||
| 3526 | ||||
| 3527 | case ixgbe_mac_82598EB: | |||
| 3528 | if (type == -1) | |||
| 3529 | entry = IXGBE_IVAR_OTHER_CAUSES_INDEX97; | |||
| 3530 | else | |||
| 3531 | entry += (type * 64); | |||
| 3532 | index = (entry >> 2) & 0x1F; | |||
| 3533 | ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x00900 + ((index) * 4))))); | |||
| 3534 | ivar &= ~((uint32_t)0xFF << (8 * (entry & 0x3))); | |||
| 3535 | ivar |= ((uint32_t)vector << (8 * (entry & 0x3))); | |||
| 3536 | IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((0x00900 + ((index) * 4))), (ivar))); | |||
| 3537 | break; | |||
| 3538 | ||||
| 3539 | case ixgbe_mac_82599EB: | |||
| 3540 | case ixgbe_mac_X540: | |||
| 3541 | case ixgbe_mac_X550: | |||
| 3542 | case ixgbe_mac_X550EM_x: | |||
| 3543 | case ixgbe_mac_X550EM_a: | |||
| 3544 | if (type == -1) { /* MISC IVAR */ | |||
| 3545 | index = (entry & 1) * 8; | |||
| 3546 | ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00A00 ))); | |||
| 3547 | ivar &= ~((uint32_t)0xFF << index); | |||
| 3548 | ivar |= ((uint32_t)vector << index); | |||
| 3549 | IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00A00 ), (ivar))); | |||
| 3550 | } else { /* RX/TX IVARS */ | |||
| 3551 | index = (16 * (entry & 1)) + (8 * type); | |||
| 3552 | ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x00900 + ((entry >> 1) * 4))))); | |||
| 3553 | ivar &= ~((uint32_t)0xFF << index); | |||
| 3554 | ivar |= ((uint32_t)vector << index); | |||
| 3555 | IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x00900 + ((entry >> 1) * 4))), (ivar))); | |||
| 3556 | } | |||
| 3557 | ||||
| 3558 | default: | |||
| 3559 | break; | |||
| 3560 | } | |||
| 3561 | } | |||
| 3562 | ||||
| 3563 | void | |||
| 3564 | ixgbe_configure_ivars(struct ix_softc *sc) | |||
| 3565 | { | |||
| 3566 | struct ix_queue *que = sc->queues; | |||
| 3567 | uint32_t newitr; | |||
| 3568 | int i; | |||
| 3569 | ||||
| 3570 | newitr = (4000000 / IXGBE_INTS_PER_SEC8000) & 0x0FF8; | |||
| 3571 | ||||
| 3572 | for (i = 0; i < sc->num_queues; i++, que++) { | |||
| 3573 | /* First the RX queue entry */ | |||
| 3574 | ixgbe_set_ivar(sc, i, que->msix, 0); | |||
| 3575 | /* ... and the TX */ | |||
| 3576 | ixgbe_set_ivar(sc, i, que->msix, 1); | |||
| 3577 | /* Set an Initial EITR value */ | |||
| 3578 | IXGBE_WRITE_REG(&sc->hw,((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((((que->msix) <= 23) ? (0x00820 + ((que ->msix) * 4)) : (0x012300 + (((que->msix) - 24) * 4)))) , (newitr))) | |||
| 3579 | IXGBE_EITR(que->msix), newitr)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((((que->msix) <= 23) ? (0x00820 + ((que ->msix) * 4)) : (0x012300 + (((que->msix) - 24) * 4)))) , (newitr))); | |||
| 3580 | } | |||
| 3581 | ||||
| 3582 | /* For the Link interrupt */ | |||
| 3583 | ixgbe_set_ivar(sc, 1, sc->linkvec, -1); | |||
| 3584 | } | |||
| 3585 | ||||
| 3586 | /* | |||
| 3587 | * SFP module interrupts handler | |||
| 3588 | */ | |||
| 3589 | void | |||
| 3590 | ixgbe_handle_mod(struct ix_softc *sc) | |||
| 3591 | { | |||
| 3592 | struct ixgbe_hw *hw = &sc->hw; | |||
| 3593 | uint32_t err; | |||
| 3594 | ||||
| 3595 | err = hw->phy.ops.identify_sfp(hw); | |||
| 3596 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED-19) { | |||
| 3597 | printf("%s: Unsupported SFP+ module type was detected!\n", | |||
| 3598 | sc->dev.dv_xname); | |||
| 3599 | return; | |||
| 3600 | } | |||
| 3601 | err = hw->mac.ops.setup_sfp(hw); | |||
| 3602 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED-19) { | |||
| 3603 | printf("%s: Setup failure - unsupported SFP+ module type!\n", | |||
| 3604 | sc->dev.dv_xname); | |||
| 3605 | return; | |||
| 3606 | } | |||
| 3607 | ||||
| 3608 | ixgbe_handle_msf(sc); | |||
| 3609 | } | |||
| 3610 | ||||
| 3611 | ||||
| 3612 | /* | |||
| 3613 | * MSF (multispeed fiber) interrupts handler | |||
| 3614 | */ | |||
| 3615 | void | |||
| 3616 | ixgbe_handle_msf(struct ix_softc *sc) | |||
| 3617 | { | |||
| 3618 | struct ixgbe_hw *hw = &sc->hw; | |||
| 3619 | uint32_t autoneg; | |||
| 3620 | bool_Bool negotiate; | |||
| 3621 | ||||
| 3622 | autoneg = hw->phy.autoneg_advertised; | |||
| 3623 | if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) { | |||
| 3624 | if (hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate)) | |||
| 3625 | return; | |||
| 3626 | } | |||
| 3627 | if (hw->mac.ops.setup_link) | |||
| 3628 | hw->mac.ops.setup_link(hw, autoneg, TRUE1); | |||
| 3629 | ||||
| 3630 | ifmedia_delete_instance(&sc->media, IFM_INST_ANY((uint64_t) -1)); | |||
| 3631 | ixgbe_add_media_types(sc); | |||
| 3632 | ifmedia_set(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL); | |||
| 3633 | } | |||
| 3634 | ||||
| 3635 | /* | |||
| 3636 | * External PHY interrupts handler | |||
| 3637 | */ | |||
| 3638 | void | |||
| 3639 | ixgbe_handle_phy(struct ix_softc *sc) | |||
| 3640 | { | |||
| 3641 | struct ixgbe_hw *hw = &sc->hw; | |||
| 3642 | int error; | |||
| 3643 | ||||
| 3644 | error = hw->phy.ops.handle_lasi(hw); | |||
| 3645 | if (error == IXGBE_ERR_OVERTEMP-26) | |||
| 3646 | printf("%s: CRITICAL: EXTERNAL PHY OVER TEMP!! " | |||
| 3647 | " PHY will downshift to lower power state!\n", | |||
| 3648 | sc->dev.dv_xname); | |||
| 3649 | else if (error) | |||
| 3650 | printf("%s: Error handling LASI interrupt: %d\n", | |||
| 3651 | sc->dev.dv_xname, error); | |||
| 3652 | ||||
| 3653 | } | |||
| 3654 | ||||
| 3655 | #if NKSTAT1 > 0 | |||
| 3656 | enum ix_counter_idx { | |||
| 3657 | ix_counter_crcerrs, | |||
| 3658 | ix_counter_lxontxc, | |||
| 3659 | ix_counter_lxonrxc, | |||
| 3660 | ix_counter_lxofftxc, | |||
| 3661 | ix_counter_lxoffrxc, | |||
| 3662 | ix_counter_prc64, | |||
| 3663 | ix_counter_prc127, | |||
| 3664 | ix_counter_prc255, | |||
| 3665 | ix_counter_prc511, | |||
| 3666 | ix_counter_prc1023, | |||
| 3667 | ix_counter_prc1522, | |||
| 3668 | ix_counter_gptc, | |||
| 3669 | ix_counter_gorc, | |||
| 3670 | ix_counter_gotc, | |||
| 3671 | ix_counter_ruc, | |||
| 3672 | ix_counter_rfc, | |||
| 3673 | ix_counter_roc, | |||
| 3674 | ix_counter_rjc, | |||
| 3675 | ix_counter_tor, | |||
| 3676 | ix_counter_tpr, | |||
| 3677 | ix_counter_tpt, | |||
| 3678 | ix_counter_gprc, | |||
| 3679 | ix_counter_bprc, | |||
| 3680 | ix_counter_mprc, | |||
| 3681 | ix_counter_ptc64, | |||
| 3682 | ix_counter_ptc127, | |||
| 3683 | ix_counter_ptc255, | |||
| 3684 | ix_counter_ptc511, | |||
| 3685 | ix_counter_ptc1023, | |||
| 3686 | ix_counter_ptc1522, | |||
| 3687 | ix_counter_mptc, | |||
| 3688 | ix_counter_bptc, | |||
| 3689 | ||||
| 3690 | ix_counter_num, | |||
| 3691 | }; | |||
| 3692 | ||||
| 3693 | CTASSERT(KSTAT_KV_U_PACKETS <= 0xff)extern char _ctassert[(KSTAT_KV_U_PACKETS <= 0xff) ? 1 : - 1 ] __attribute__((__unused__)); | |||
| 3694 | CTASSERT(KSTAT_KV_U_BYTES <= 0xff)extern char _ctassert[(KSTAT_KV_U_BYTES <= 0xff) ? 1 : -1 ] __attribute__((__unused__)); | |||
| 3695 | ||||
| 3696 | struct ix_counter { | |||
| 3697 | char name[KSTAT_KV_NAMELEN16]; | |||
| 3698 | uint32_t reg; | |||
| 3699 | uint8_t width; | |||
| 3700 | uint8_t unit; | |||
| 3701 | }; | |||
| 3702 | ||||
| 3703 | static const struct ix_counter ix_counters[ix_counter_num] = { | |||
| 3704 | [ix_counter_crcerrs] = { "crc errs", IXGBE_CRCERRS0x04000, 32, | |||
| 3705 | KSTAT_KV_U_PACKETS }, | |||
| 3706 | [ix_counter_lxontxc] = { "tx link xon", IXGBE_LXONTXC0x03F60, 32, | |||
| 3707 | KSTAT_KV_U_PACKETS }, | |||
| 3708 | [ix_counter_lxonrxc] = { "rx link xon", 0, 32, | |||
| 3709 | KSTAT_KV_U_PACKETS }, | |||
| 3710 | [ix_counter_lxofftxc] = { "tx link xoff", IXGBE_LXOFFTXC0x03F68, 32, | |||
| 3711 | KSTAT_KV_U_PACKETS }, | |||
| 3712 | [ix_counter_lxoffrxc] = { "rx link xoff", 0, 32, | |||
| 3713 | KSTAT_KV_U_PACKETS }, | |||
| 3714 | [ix_counter_prc64] = { "rx 64B", IXGBE_PRC640x0405C, 32, | |||
| 3715 | KSTAT_KV_U_PACKETS }, | |||
| 3716 | [ix_counter_prc127] = { "rx 65-127B", IXGBE_PRC1270x04060, 32, | |||
| 3717 | KSTAT_KV_U_PACKETS }, | |||
| 3718 | [ix_counter_prc255] = { "rx 128-255B", IXGBE_PRC2550x04064, 32, | |||
| 3719 | KSTAT_KV_U_PACKETS }, | |||
| 3720 | [ix_counter_prc511] = { "rx 256-511B", IXGBE_PRC5110x04068, 32, | |||
| 3721 | KSTAT_KV_U_PACKETS }, | |||
| 3722 | [ix_counter_prc1023] = { "rx 512-1023B", IXGBE_PRC10230x0406C, 32, | |||
| 3723 | KSTAT_KV_U_PACKETS }, | |||
| 3724 | [ix_counter_prc1522] = { "rx 1024-maxB", IXGBE_PRC15220x04070, 32, | |||
| 3725 | KSTAT_KV_U_PACKETS }, | |||
| 3726 | [ix_counter_gptc] = { "tx good", IXGBE_GPTC0x04080, 32, | |||
| 3727 | KSTAT_KV_U_PACKETS }, | |||
| 3728 | [ix_counter_gorc] = { "rx good", IXGBE_GORCL0x04088, 36, | |||
| 3729 | KSTAT_KV_U_BYTES }, | |||
| 3730 | [ix_counter_gotc] = { "tx good", IXGBE_GOTCL0x04090, 36, | |||
| 3731 | KSTAT_KV_U_BYTES }, | |||
| 3732 | [ix_counter_ruc] = { "rx undersize", IXGBE_RUC0x040A4, 32, | |||
| 3733 | KSTAT_KV_U_PACKETS }, | |||
| 3734 | [ix_counter_rfc] = { "rx fragment", IXGBE_RFC0x040A8, 32, | |||
| 3735 | KSTAT_KV_U_PACKETS }, | |||
| 3736 | [ix_counter_roc] = { "rx oversize", IXGBE_ROC0x040AC, 32, | |||
| 3737 | KSTAT_KV_U_PACKETS }, | |||
| 3738 | [ix_counter_rjc] = { "rx jabber", IXGBE_RJC0x040B0, 32, | |||
| 3739 | KSTAT_KV_U_PACKETS }, | |||
| 3740 | [ix_counter_tor] = { "rx total", IXGBE_TORL0x040C0, 36, | |||
| 3741 | KSTAT_KV_U_BYTES }, | |||
| 3742 | [ix_counter_tpr] = { "rx total", IXGBE_TPR0x040D0, 32, | |||
| 3743 | KSTAT_KV_U_PACKETS }, | |||
| 3744 | [ix_counter_tpt] = { "tx total", IXGBE_TPT0x040D4, 32, | |||
| 3745 | KSTAT_KV_U_PACKETS }, | |||
| 3746 | [ix_counter_gprc] = { "rx good", IXGBE_GPRC0x04074, 32, | |||
| 3747 | KSTAT_KV_U_PACKETS }, | |||
| 3748 | [ix_counter_bprc] = { "rx bcast", IXGBE_BPRC0x04078, 32, | |||
| 3749 | KSTAT_KV_U_PACKETS }, | |||
| 3750 | [ix_counter_mprc] = { "rx mcast", IXGBE_MPRC0x0407C, 32, | |||
| 3751 | KSTAT_KV_U_PACKETS }, | |||
| 3752 | [ix_counter_ptc64] = { "tx 64B", IXGBE_PTC640x040D8, 32, | |||
| 3753 | KSTAT_KV_U_PACKETS }, | |||
| 3754 | [ix_counter_ptc127] = { "tx 65-127B", IXGBE_PTC1270x040DC, 32, | |||
| 3755 | KSTAT_KV_U_PACKETS }, | |||
| 3756 | [ix_counter_ptc255] = { "tx 128-255B", IXGBE_PTC2550x040E0, 32, | |||
| 3757 | KSTAT_KV_U_PACKETS }, | |||
| 3758 | [ix_counter_ptc511] = { "tx 256-511B", IXGBE_PTC5110x040E4, 32, | |||
| 3759 | KSTAT_KV_U_PACKETS }, | |||
| 3760 | [ix_counter_ptc1023] = { "tx 512-1023B", IXGBE_PTC10230x040E8, 32, | |||
| 3761 | KSTAT_KV_U_PACKETS }, | |||
| 3762 | [ix_counter_ptc1522] = { "tx 1024-maxB", IXGBE_PTC15220x040EC, 32, | |||
| 3763 | KSTAT_KV_U_PACKETS }, | |||
| 3764 | [ix_counter_mptc] = { "tx mcast", IXGBE_MPTC0x040F0, 32, | |||
| 3765 | KSTAT_KV_U_PACKETS }, | |||
| 3766 | [ix_counter_bptc] = { "tx bcast", IXGBE_BPTC0x040F4, 32, | |||
| 3767 | KSTAT_KV_U_PACKETS }, | |||
| 3768 | }; | |||
| 3769 | ||||
| 3770 | struct ix_rxq_kstats { | |||
| 3771 | struct kstat_kv qprc; | |||
| 3772 | struct kstat_kv qbrc; | |||
| 3773 | struct kstat_kv qprdc; | |||
| 3774 | }; | |||
| 3775 | ||||
| 3776 | static const struct ix_rxq_kstats ix_rxq_kstats_tpl = { | |||
| 3777 | KSTAT_KV_UNIT_INITIALIZER("packets",{ .kv_key = ("packets"), .kv_type = (KSTAT_KV_T_COUNTER64), . kv_unit = (KSTAT_KV_U_PACKETS), } | |||
| 3778 | KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS){ .kv_key = ("packets"), .kv_type = (KSTAT_KV_T_COUNTER64), . kv_unit = (KSTAT_KV_U_PACKETS), }, | |||
| 3779 | KSTAT_KV_UNIT_INITIALIZER("bytes",{ .kv_key = ("bytes"), .kv_type = (KSTAT_KV_T_COUNTER64), .kv_unit = (KSTAT_KV_U_BYTES), } | |||
| 3780 | KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES){ .kv_key = ("bytes"), .kv_type = (KSTAT_KV_T_COUNTER64), .kv_unit = (KSTAT_KV_U_BYTES), }, | |||
| 3781 | KSTAT_KV_UNIT_INITIALIZER("qdrops",{ .kv_key = ("qdrops"), .kv_type = (KSTAT_KV_T_COUNTER64), .kv_unit = (KSTAT_KV_U_PACKETS), } | |||
| 3782 | KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS){ .kv_key = ("qdrops"), .kv_type = (KSTAT_KV_T_COUNTER64), .kv_unit = (KSTAT_KV_U_PACKETS), }, | |||
| 3783 | }; | |||
| 3784 | ||||
| 3785 | struct ix_txq_kstats { | |||
| 3786 | struct kstat_kv qptc; | |||
| 3787 | struct kstat_kv qbtc; | |||
| 3788 | }; | |||
| 3789 | ||||
| 3790 | static const struct ix_txq_kstats ix_txq_kstats_tpl = { | |||
| 3791 | KSTAT_KV_UNIT_INITIALIZER("packets",{ .kv_key = ("packets"), .kv_type = (KSTAT_KV_T_COUNTER64), . kv_unit = (KSTAT_KV_U_PACKETS), } | |||
| 3792 | KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS){ .kv_key = ("packets"), .kv_type = (KSTAT_KV_T_COUNTER64), . kv_unit = (KSTAT_KV_U_PACKETS), }, | |||
| 3793 | KSTAT_KV_UNIT_INITIALIZER("bytes",{ .kv_key = ("bytes"), .kv_type = (KSTAT_KV_T_COUNTER64), .kv_unit = (KSTAT_KV_U_BYTES), } | |||
| 3794 | KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES){ .kv_key = ("bytes"), .kv_type = (KSTAT_KV_T_COUNTER64), .kv_unit = (KSTAT_KV_U_BYTES), }, | |||
| 3795 | }; | |||
| 3796 | ||||
| 3797 | static int ix_kstats_read(struct kstat *ks); | |||
| 3798 | static int ix_rxq_kstats_read(struct kstat *ks); | |||
| 3799 | static int ix_txq_kstats_read(struct kstat *ks); | |||
| 3800 | ||||
| 3801 | static void | |||
| 3802 | ix_kstats(struct ix_softc *sc) | |||
| 3803 | { | |||
| 3804 | struct kstat *ks; | |||
| 3805 | struct kstat_kv *kvs; | |||
| 3806 | unsigned int i; | |||
| 3807 | ||||
| 3808 | mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc-> sc_kstat_mtx), ((((0x1)) > 0x0 && ((0x1)) < 0x9 ) ? 0x9 : ((0x1)))); } while (0); | |||
| 3809 | timeout_set(&sc->sc_kstat_tmo, ix_kstats_tick, sc); | |||
| 3810 | ||||
| 3811 | ks = kstat_create(sc->dev.dv_xname, 0, "ix-stats", 0, | |||
| 3812 | KSTAT_T_KV1, 0); | |||
| 3813 | if (ks == NULL((void *)0)) | |||
| 3814 | return; | |||
| 3815 | ||||
| 3816 | kvs = mallocarray(nitems(ix_counters)(sizeof((ix_counters)) / sizeof((ix_counters)[0])), sizeof(*kvs), | |||
| 3817 | M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008); | |||
| 3818 | ||||
| 3819 | for (i = 0; i < nitems(ix_counters)(sizeof((ix_counters)) / sizeof((ix_counters)[0])); i++) { | |||
| 3820 | const struct ix_counter *ixc = &ix_counters[i]; | |||
| 3821 | ||||
| 3822 | kstat_kv_unit_init(&kvs[i], ixc->name, | |||
| 3823 | KSTAT_KV_T_COUNTER64, ixc->unit); | |||
| 3824 | } | |||
| 3825 | ||||
| 3826 | kstat_set_mutex(ks, &sc->sc_kstat_mtx); | |||
| 3827 | ks->ks_softc = sc; | |||
| 3828 | ks->ks_data = kvs; | |||
| 3829 | ks->ks_datalen = nitems(ix_counters)(sizeof((ix_counters)) / sizeof((ix_counters)[0])) * sizeof(*kvs); | |||
| 3830 | ks->ks_read = ix_kstats_read; | |||
| 3831 | ||||
| 3832 | sc->sc_kstat = ks; | |||
| 3833 | kstat_install(ks); | |||
| 3834 | } | |||
| 3835 | ||||
| 3836 | static void | |||
| 3837 | ix_rxq_kstats(struct ix_softc *sc, struct rx_ring *rxr) | |||
| 3838 | { | |||
| 3839 | struct ix_rxq_kstats *stats; | |||
| 3840 | struct kstat *ks; | |||
| 3841 | ||||
| 3842 | ks = kstat_create(sc->dev.dv_xname, 0, "ix-rxq", rxr->me, | |||
| 3843 | KSTAT_T_KV1, 0); | |||
| 3844 | if (ks == NULL((void *)0)) | |||
| 3845 | return; | |||
| 3846 | ||||
| 3847 | stats = malloc(sizeof(*stats), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008); | |||
| 3848 | *stats = ix_rxq_kstats_tpl; | |||
| 3849 | ||||
| 3850 | kstat_set_mutex(ks, &sc->sc_kstat_mtx); | |||
| 3851 | ks->ks_softc = rxr; | |||
| 3852 | ks->ks_data = stats; | |||
| 3853 | ks->ks_datalen = sizeof(*stats); | |||
| 3854 | ks->ks_read = ix_rxq_kstats_read; | |||
| 3855 | ||||
| 3856 | rxr->kstat = ks; | |||
| 3857 | kstat_install(ks); | |||
| 3858 | } | |||
| 3859 | ||||
| 3860 | static void | |||
| 3861 | ix_txq_kstats(struct ix_softc *sc, struct tx_ring *txr) | |||
| 3862 | { | |||
| 3863 | struct ix_txq_kstats *stats; | |||
| 3864 | struct kstat *ks; | |||
| 3865 | ||||
| 3866 | ks = kstat_create(sc->dev.dv_xname, 0, "ix-txq", txr->me, | |||
| 3867 | KSTAT_T_KV1, 0); | |||
| 3868 | if (ks == NULL((void *)0)) | |||
| 3869 | return; | |||
| 3870 | ||||
| 3871 | stats = malloc(sizeof(*stats), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008); | |||
| 3872 | *stats = ix_txq_kstats_tpl; | |||
| 3873 | ||||
| 3874 | kstat_set_mutex(ks, &sc->sc_kstat_mtx); | |||
| 3875 | ks->ks_softc = txr; | |||
| 3876 | ks->ks_data = stats; | |||
| 3877 | ks->ks_datalen = sizeof(*stats); | |||
| 3878 | ks->ks_read = ix_txq_kstats_read; | |||
| 3879 | ||||
| 3880 | txr->kstat = ks; | |||
| 3881 | kstat_install(ks); | |||
| 3882 | } | |||
| 3883 | ||||
| 3884 | /********************************************************************** | |||
| 3885 | * | |||
| 3886 | * Update the board statistics counters. | |||
| 3887 | * | |||
| 3888 | **********************************************************************/ | |||
| 3889 | ||||
| 3890 | static void | |||
| 3891 | ix_kstats_tick(void *arg) | |||
| 3892 | { | |||
| 3893 | struct ix_softc *sc = arg; | |||
| 3894 | int i; | |||
| 3895 | ||||
| 3896 | timeout_add_sec(&sc->sc_kstat_tmo, 1); | |||
| 3897 | ||||
| 3898 | mtx_enter(&sc->sc_kstat_mtx); | |||
| 3899 | ix_kstats_read(sc->sc_kstat); | |||
| 3900 | for (i = 0; i < sc->num_queues; i++) { | |||
| 3901 | ix_rxq_kstats_read(sc->rx_rings[i].kstat); | |||
| 3902 | ix_txq_kstats_read(sc->tx_rings[i].kstat); | |||
| 3903 | } | |||
| 3904 | mtx_leave(&sc->sc_kstat_mtx); | |||
| 3905 | } | |||
| 3906 | ||||
| 3907 | static uint64_t | |||
| 3908 | ix_read36(struct ixgbe_hw *hw, bus_size_t loreg, bus_size_t hireg) | |||
| 3909 | { | |||
| 3910 | uint64_t lo, hi; | |||
| 3911 | ||||
| 3912 | lo = IXGBE_READ_REG(hw, loreg)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (loreg) )); | |||
| 3913 | hi = IXGBE_READ_REG(hw, hireg)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (hireg) )); | |||
| 3914 | ||||
| 3915 | return (((hi & 0xf) << 32) | lo); | |||
| 3916 | } | |||
| 3917 | ||||
| 3918 | static int | |||
| 3919 | ix_kstats_read(struct kstat *ks) | |||
| 3920 | { | |||
| 3921 | struct ix_softc *sc = ks->ks_softc; | |||
| 3922 | struct kstat_kv *kvs = ks->ks_data; | |||
| 3923 | struct ixgbe_hw *hw = &sc->hw; | |||
| 3924 | unsigned int i; | |||
| 3925 | ||||
| 3926 | for (i = 0; i < nitems(ix_counters)(sizeof((ix_counters)) / sizeof((ix_counters)[0])); i++) { | |||
| 3927 | const struct ix_counter *ixc = &ix_counters[i]; | |||
| 3928 | uint32_t reg = ixc->reg; | |||
| 3929 | uint64_t v; | |||
| 3930 | ||||
| 3931 | if (reg == 0) | |||
| 3932 | continue; | |||
| 3933 | ||||
| 3934 | if (ixc->width > 32) { | |||
| 3935 | if (sc->hw.mac.type == ixgbe_mac_82598EB) | |||
| 3936 | v = IXGBE_READ_REG(hw, reg + 4)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (reg + 4 ))); | |||
| 3937 | else | |||
| 3938 | v = ix_read36(hw, reg, reg + 4); | |||
| 3939 | } else | |||
| 3940 | v = IXGBE_READ_REG(hw, reg)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (reg))); | |||
| 3941 | ||||
| 3942 | kstat_kv_u64(&kvs[i])(&kvs[i])->kv_v.v_u64 += v; | |||
| 3943 | } | |||
| 3944 | ||||
| 3945 | /* handle the exceptions */ | |||
| 3946 | if (sc->hw.mac.type == ixgbe_mac_82598EB) { | |||
| 3947 | kstat_kv_u64(&kvs[ix_counter_lxonrxc])(&kvs[ix_counter_lxonrxc])->kv_v.v_u64 += | |||
| 3948 | IXGBE_READ_REG(hw, IXGBE_LXONRXC)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x0CF60 ))); | |||
| 3949 | kstat_kv_u64(&kvs[ix_counter_lxoffrxc])(&kvs[ix_counter_lxoffrxc])->kv_v.v_u64 += | |||
| 3950 | IXGBE_READ_REG(hw, IXGBE_LXOFFRXC)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x0CF68 ))); | |||
| 3951 | } else { | |||
| 3952 | kstat_kv_u64(&kvs[ix_counter_lxonrxc])(&kvs[ix_counter_lxonrxc])->kv_v.v_u64 += | |||
| 3953 | IXGBE_READ_REG(hw, IXGBE_LXONRXCNT)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x041A4 ))); | |||
| 3954 | kstat_kv_u64(&kvs[ix_counter_lxoffrxc])(&kvs[ix_counter_lxoffrxc])->kv_v.v_u64 += | |||
| 3955 | IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x041A8 ))); | |||
| 3956 | } | |||
| 3957 | ||||
| 3958 | getnanouptime(&ks->ks_updated); | |||
| 3959 | ||||
| 3960 | return (0); | |||
| 3961 | } | |||
| 3962 | ||||
| 3963 | int | |||
| 3964 | ix_rxq_kstats_read(struct kstat *ks) | |||
| 3965 | { | |||
| 3966 | struct ix_rxq_kstats *stats = ks->ks_data; | |||
| 3967 | struct rx_ring *rxr = ks->ks_softc; | |||
| 3968 | struct ix_softc *sc = rxr->sc; | |||
| 3969 | struct ixgbe_hw *hw = &sc->hw; | |||
| 3970 | uint32_t i = rxr->me; | |||
| 3971 | ||||
| 3972 | kstat_kv_u64(&stats->qprc)(&stats->qprc)->kv_v.v_u64 += IXGBE_READ_REG(hw, IXGBE_QPRC(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x01030 + ((i) * 0x40))))); | |||
| 3973 | if (sc->hw.mac.type == ixgbe_mac_82598EB) { | |||
| 3974 | kstat_kv_u64(&stats->qprdc)(&stats->qprdc)->kv_v.v_u64 += | |||
| 3975 | IXGBE_READ_REG(hw, IXGBE_RNBC(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x03FC0 + ((i) * 4))))); | |||
| 3976 | kstat_kv_u64(&stats->qbrc)(&stats->qbrc)->kv_v.v_u64 += | |||
| 3977 | IXGBE_READ_REG(hw, IXGBE_QBRC(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x01034 + ((i) * 0x40))))); | |||
| 3978 | } else { | |||
| 3979 | kstat_kv_u64(&stats->qprdc)(&stats->qprdc)->kv_v.v_u64 += | |||
| 3980 | IXGBE_READ_REG(hw, IXGBE_QPRDC(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x01430 + ((i) * 0x40))))); | |||
| 3981 | kstat_kv_u64(&stats->qbrc)(&stats->qbrc)->kv_v.v_u64 += | |||
| 3982 | ix_read36(hw, IXGBE_QBRC_L(i)(0x01034 + ((i) * 0x40)), IXGBE_QBRC_H(i)(0x01038 + ((i) * 0x40))); | |||
| 3983 | } | |||
| 3984 | ||||
| 3985 | getnanouptime(&ks->ks_updated); | |||
| 3986 | ||||
| 3987 | return (0); | |||
| 3988 | } | |||
| 3989 | ||||
| 3990 | int | |||
| 3991 | ix_txq_kstats_read(struct kstat *ks) | |||
| 3992 | { | |||
| 3993 | struct ix_txq_kstats *stats = ks->ks_data; | |||
| 3994 | struct tx_ring *txr = ks->ks_softc; | |||
| 3995 | struct ix_softc *sc = txr->sc; | |||
| 3996 | struct ixgbe_hw *hw = &sc->hw; | |||
| 3997 | uint32_t i = txr->me; | |||
| 3998 | ||||
| 3999 | kstat_kv_u64(&stats->qptc)(&stats->qptc)->kv_v.v_u64 += IXGBE_READ_REG(hw, IXGBE_QPTC(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06030 + ((i) * 0x40))))); | |||
| 4000 | if (sc->hw.mac.type == ixgbe_mac_82598EB) { | |||
| 4001 | kstat_kv_u64(&stats->qbtc)(&stats->qbtc)->kv_v.v_u64 += | |||
| 4002 | IXGBE_READ_REG(hw, IXGBE_QBTC(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4 ((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06034 + ((i) * 0x40))))); | |||
| 4003 | } else { | |||
| 4004 | kstat_kv_u64(&stats->qbtc)(&stats->qbtc)->kv_v.v_u64 += | |||
| 4005 | ix_read36(hw, IXGBE_QBTC_L(i)(0x08700 + ((i) * 0x8)), IXGBE_QBTC_H(i)(0x08704 + ((i) * 0x8))); | |||
| 4006 | } | |||
| 4007 | ||||
| 4008 | getnanouptime(&ks->ks_updated); | |||
| 4009 | ||||
| 4010 | return (0); | |||
| 4011 | } | |||
| 4012 | #endif /* NKVSTAT > 0 */ | |||
| 4013 | ||||
| 4014 | void | |||
| 4015 | ixgbe_map_queue_statistics(struct ix_softc *sc) | |||
| 4016 | { | |||
| 4017 | int i; | |||
| 4018 | uint32_t r; | |||
| 4019 | ||||
| 4020 | for (i = 0; i < 32; i++) { | |||
| 4021 | /* | |||
| 4022 | * Queues 0-15 are mapped 1:1 | |||
| 4023 | * Queue 0 -> Counter 0 | |||
| 4024 | * Queue 1 -> Counter 1 | |||
| 4025 | * Queue 2 -> Counter 2.... | |||
| 4026 | * Queues 16-127 are mapped to Counter 0 | |||
| 4027 | */ | |||
| 4028 | if (i < 4) { | |||
| 4029 | r = (i * 4 + 0); | |||
| 4030 | r |= (i * 4 + 1) << 8; | |||
| 4031 | r |= (i * 4 + 2) << 16; | |||
| 4032 | r |= (i * 4 + 3) << 24; | |||
| 4033 | } else | |||
| 4034 | r = 0; | |||
| 4035 | ||||
| 4036 | IXGBE_WRITE_REG(&sc->hw, IXGBE_RQSMR(i), r)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((0x02300 + ((i) * 4))), (r))); | |||
| 4037 | IXGBE_WRITE_REG(&sc->hw, IXGBE_TQSM(i), r)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt )->write_4((((struct ixgbe_osdep *)(&sc->hw)->back )->os_memh), ((0x08600 + ((i) * 4))), (r))); | |||
| 4038 | } | |||
| 4039 | } |