| File: | dev/pci/if_pcn.c |
| Warning: | line 564, column 16 Value stored to 'ifp' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* $OpenBSD: if_pcn.c,v 1.45 2022/01/09 05:42:54 jsg Exp $ */ |
| 2 | /* $NetBSD: if_pcn.c,v 1.26 2005/05/07 09:15:44 is Exp $ */ |
| 3 | |
| 4 | /* |
| 5 | * Copyright (c) 2001 Wasabi Systems, Inc. |
| 6 | * All rights reserved. |
| 7 | * |
| 8 | * Written by Jason R. Thorpe for Wasabi Systems, Inc. |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or without |
| 11 | * modification, are permitted provided that the following conditions |
| 12 | * are met: |
| 13 | * 1. Redistributions of source code must retain the above copyright |
| 14 | * notice, this list of conditions and the following disclaimer. |
| 15 | * 2. Redistributions in binary form must reproduce the above copyright |
| 16 | * notice, this list of conditions and the following disclaimer in the |
| 17 | * documentation and/or other materials provided with the distribution. |
| 18 | * 3. All advertising materials mentioning features or use of this software |
| 19 | * must display the following acknowledgement: |
| 20 | * This product includes software developed for the NetBSD Project by |
| 21 | * Wasabi Systems, Inc. |
| 22 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse |
| 23 | * or promote products derived from this software without specific prior |
| 24 | * written permission. |
| 25 | * |
| 26 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND |
| 27 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
| 28 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 29 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC |
| 30 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 31 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 32 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 33 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 34 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 35 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 36 | * POSSIBILITY OF SUCH DAMAGE. |
| 37 | */ |
| 38 | |
| 39 | /* |
| 40 | * Device driver for the AMD PCnet-PCI series of Ethernet |
| 41 | * chips: |
| 42 | * |
| 43 | * * Am79c970 PCnet-PCI Single-Chip Ethernet Controller for PCI |
| 44 | * Local Bus |
| 45 | * |
| 46 | * * Am79c970A PCnet-PCI II Single-Chip Full-Duplex Ethernet Controller |
| 47 | * for PCI Local Bus |
| 48 | * |
| 49 | * * Am79c971 PCnet-FAST Single-Chip Full-Duplex 10/100Mbps |
| 50 | * Ethernet Controller for PCI Local Bus |
| 51 | * |
| 52 | * * Am79c972 PCnet-FAST+ Enhanced 10/100Mbps PCI Ethernet Controller |
| 53 | * with OnNow Support |
| 54 | * |
| 55 | * * Am79c973/Am79c975 PCnet-FAST III Single-Chip 10/100Mbps PCI |
| 56 | * Ethernet Controller with Integrated PHY |
| 57 | * |
| 58 | * This also supports the virtual PCnet-PCI Ethernet interface found |
| 59 | * in VMware. |
| 60 | * |
| 61 | * TODO: |
| 62 | * |
| 63 | * * Split this into bus-specific and bus-independent portions. |
| 64 | * The core could also be used for the ILACC (Am79900) 32-bit |
| 65 | * Ethernet chip (XXX only if we use an ILACC-compatible SWSTYLE). |
| 66 | */ |
| 67 | |
| 68 | #include "bpfilter.h" |
| 69 | |
| 70 | #include <sys/param.h> |
| 71 | #include <sys/systm.h> |
| 72 | #include <sys/timeout.h> |
| 73 | #include <sys/mbuf.h> |
| 74 | #include <sys/malloc.h> |
| 75 | #include <sys/kernel.h> |
| 76 | #include <sys/socket.h> |
| 77 | #include <sys/ioctl.h> |
| 78 | #include <sys/errno.h> |
| 79 | #include <sys/device.h> |
| 80 | #include <sys/queue.h> |
| 81 | #include <sys/endian.h> |
| 82 | |
| 83 | #include <net/if.h> |
| 84 | #include <net/if_dl.h> |
| 85 | |
| 86 | #include <netinet/in.h> |
| 87 | #include <netinet/if_ether.h> |
| 88 | |
| 89 | #include <net/if_media.h> |
| 90 | |
| 91 | #if NBPFILTER1 > 0 |
| 92 | #include <net/bpf.h> |
| 93 | #endif |
| 94 | |
| 95 | #include <machine/bus.h> |
| 96 | #include <machine/intr.h> |
| 97 | |
| 98 | #include <dev/mii/miivar.h> |
| 99 | |
| 100 | #include <dev/ic/am79900reg.h> |
| 101 | #include <dev/ic/lancereg.h> |
| 102 | |
| 103 | #include <dev/pci/pcireg.h> |
| 104 | #include <dev/pci/pcivar.h> |
| 105 | #include <dev/pci/pcidevs.h> |
| 106 | |
| 107 | /* |
| 108 | * Register definitions for the AMD PCnet-PCI series of Ethernet |
| 109 | * chips. |
| 110 | * |
| 111 | * These are only the registers that we access directly from PCI |
| 112 | * space. Everything else (accessed via the RAP + RDP/BDP) is |
| 113 | * defined in <dev/ic/lancereg.h>. |
| 114 | */ |
| 115 | |
| 116 | /* |
| 117 | * PCI configuration space. |
| 118 | */ |
| 119 | |
| 120 | #define PCN_PCI_CBIO(0x10 + 0x00) (PCI_MAPREG_START0x10 + 0x00) |
| 121 | #define PCN_PCI_CBMEM(0x10 + 0x04) (PCI_MAPREG_START0x10 + 0x04) |
| 122 | |
| 123 | /* |
| 124 | * I/O map in Word I/O mode. |
| 125 | */ |
| 126 | |
| 127 | #define PCN16_APROM0x00 0x00 |
| 128 | #define PCN16_RDP0x10 0x10 |
| 129 | #define PCN16_RAP0x12 0x12 |
| 130 | #define PCN16_RESET0x14 0x14 |
| 131 | #define PCN16_BDP0x16 0x16 |
| 132 | |
| 133 | /* |
| 134 | * I/O map in DWord I/O mode. |
| 135 | */ |
| 136 | |
| 137 | #define PCN32_APROM0x00 0x00 |
| 138 | #define PCN32_RDP0x10 0x10 |
| 139 | #define PCN32_RAP0x14 0x14 |
| 140 | #define PCN32_RESET0x18 0x18 |
| 141 | #define PCN32_BDP0x1c 0x1c |
| 142 | |
| 143 | /* |
| 144 | * Transmit descriptor list size. This is arbitrary, but allocate |
| 145 | * enough descriptors for 128 pending transmissions, and 4 segments |
| 146 | * per packet. This MUST work out to a power of 2. |
| 147 | * |
| 148 | * NOTE: We can't have any more than 512 Tx descriptors, SO BE CAREFUL! |
| 149 | * |
| 150 | * So we play a little trick here. We give each packet up to 16 |
| 151 | * DMA segments, but only allocate the max of 512 descriptors. The |
| 152 | * transmit logic can deal with this, we just are hoping to sneak by. |
| 153 | */ |
| 154 | #define PCN_NTXSEGS16 16 |
| 155 | |
| 156 | #define PCN_TXQUEUELEN128 128 |
| 157 | #define PCN_TXQUEUELEN_MASK(128 - 1) (PCN_TXQUEUELEN128 - 1) |
| 158 | #define PCN_NTXDESC512 512 |
| 159 | #define PCN_NTXDESC_MASK(512 - 1) (PCN_NTXDESC512 - 1) |
| 160 | #define PCN_NEXTTX(x)(((x) + 1) & (512 - 1)) (((x) + 1) & PCN_NTXDESC_MASK(512 - 1)) |
| 161 | #define PCN_NEXTTXS(x)(((x) + 1) & (128 - 1)) (((x) + 1) & PCN_TXQUEUELEN_MASK(128 - 1)) |
| 162 | |
| 163 | /* Tx interrupt every N + 1 packets. */ |
| 164 | #define PCN_TXINTR_MASK7 7 |
| 165 | |
| 166 | /* |
| 167 | * Receive descriptor list size. We have one Rx buffer per incoming |
| 168 | * packet, so this logic is a little simpler. |
| 169 | */ |
| 170 | #define PCN_NRXDESC128 128 |
| 171 | #define PCN_NRXDESC_MASK(128 - 1) (PCN_NRXDESC128 - 1) |
| 172 | #define PCN_NEXTRX(x)(((x) + 1) & (128 - 1)) (((x) + 1) & PCN_NRXDESC_MASK(128 - 1)) |
| 173 | |
| 174 | /* |
| 175 | * Control structures are DMA'd to the PCnet chip. We allocate them in |
| 176 | * a single clump that maps to a single DMA segment to make several things |
| 177 | * easier. |
| 178 | */ |
| 179 | struct pcn_control_data { |
| 180 | /* The transmit descriptors. */ |
| 181 | struct letmd pcd_txdescs[PCN_NTXDESC512]; |
| 182 | |
| 183 | /* The receive descriptors. */ |
| 184 | struct lermd pcd_rxdescs[PCN_NRXDESC128]; |
| 185 | |
| 186 | /* The init block. */ |
| 187 | struct leinit pcd_initblock; |
| 188 | }; |
| 189 | |
| 190 | #define PCN_CDOFF(x)__builtin_offsetof(struct pcn_control_data, x) offsetof(struct pcn_control_data, x)__builtin_offsetof(struct pcn_control_data, x) |
| 191 | #define PCN_CDTXOFF(x)__builtin_offsetof(struct pcn_control_data, pcd_txdescs[(x)]) PCN_CDOFF(pcd_txdescs[(x)])__builtin_offsetof(struct pcn_control_data, pcd_txdescs[(x)]) |
| 192 | #define PCN_CDRXOFF(x)__builtin_offsetof(struct pcn_control_data, pcd_rxdescs[(x)]) PCN_CDOFF(pcd_rxdescs[(x)])__builtin_offsetof(struct pcn_control_data, pcd_rxdescs[(x)]) |
| 193 | #define PCN_CDINITOFF__builtin_offsetof(struct pcn_control_data, pcd_initblock) PCN_CDOFF(pcd_initblock)__builtin_offsetof(struct pcn_control_data, pcd_initblock) |
| 194 | |
| 195 | /* |
| 196 | * Software state for transmit jobs. |
| 197 | */ |
| 198 | struct pcn_txsoft { |
| 199 | struct mbuf *txs_mbuf; /* head of our mbuf chain */ |
| 200 | bus_dmamap_t txs_dmamap; /* our DMA map */ |
| 201 | int txs_firstdesc; /* first descriptor in packet */ |
| 202 | int txs_lastdesc; /* last descriptor in packet */ |
| 203 | }; |
| 204 | |
| 205 | /* |
| 206 | * Software state for receive jobs. |
| 207 | */ |
| 208 | struct pcn_rxsoft { |
| 209 | struct mbuf *rxs_mbuf; /* head of our mbuf chain */ |
| 210 | bus_dmamap_t rxs_dmamap; /* our DMA map */ |
| 211 | }; |
| 212 | |
| 213 | /* |
| 214 | * Description of Rx FIFO watermarks for various revisions. |
| 215 | */ |
| 216 | static const char * const pcn_79c970_rcvfw[] = { |
| 217 | "16 bytes", |
| 218 | "64 bytes", |
| 219 | "128 bytes", |
| 220 | NULL((void *)0), |
| 221 | }; |
| 222 | |
| 223 | static const char * const pcn_79c971_rcvfw[] = { |
| 224 | "16 bytes", |
| 225 | "64 bytes", |
| 226 | "112 bytes", |
| 227 | NULL((void *)0), |
| 228 | }; |
| 229 | |
| 230 | /* |
| 231 | * Description of Tx start points for various revisions. |
| 232 | */ |
| 233 | static const char * const pcn_79c970_xmtsp[] = { |
| 234 | "8 bytes", |
| 235 | "64 bytes", |
| 236 | "128 bytes", |
| 237 | "248 bytes", |
| 238 | }; |
| 239 | |
| 240 | static const char * const pcn_79c971_xmtsp[] = { |
| 241 | "20 bytes", |
| 242 | "64 bytes", |
| 243 | "128 bytes", |
| 244 | "248 bytes", |
| 245 | }; |
| 246 | |
| 247 | static const char * const pcn_79c971_xmtsp_sram[] = { |
| 248 | "44 bytes", |
| 249 | "64 bytes", |
| 250 | "128 bytes", |
| 251 | "store-and-forward", |
| 252 | }; |
| 253 | |
| 254 | /* |
| 255 | * Description of Tx FIFO watermarks for various revisions. |
| 256 | */ |
| 257 | static const char * const pcn_79c970_xmtfw[] = { |
| 258 | "16 bytes", |
| 259 | "64 bytes", |
| 260 | "128 bytes", |
| 261 | NULL((void *)0), |
| 262 | }; |
| 263 | |
| 264 | static const char * const pcn_79c971_xmtfw[] = { |
| 265 | "16 bytes", |
| 266 | "64 bytes", |
| 267 | "108 bytes", |
| 268 | NULL((void *)0), |
| 269 | }; |
| 270 | |
| 271 | /* |
| 272 | * Software state per device. |
| 273 | */ |
| 274 | struct pcn_softc { |
| 275 | struct device sc_dev; /* generic device information */ |
| 276 | bus_space_tag_t sc_st; /* bus space tag */ |
| 277 | bus_space_handle_t sc_sh; /* bus space handle */ |
| 278 | bus_dma_tag_t sc_dmat; /* bus DMA tag */ |
| 279 | struct arpcom sc_arpcom; /* Ethernet common data */ |
| 280 | |
| 281 | /* Points to our media routines, etc. */ |
| 282 | const struct pcn_variant *sc_variant; |
| 283 | |
| 284 | void *sc_ih; /* interrupt cookie */ |
| 285 | |
| 286 | struct mii_data sc_mii; /* MII/media information */ |
| 287 | |
| 288 | struct timeout sc_tick_timeout; /* tick timeout */ |
| 289 | |
| 290 | bus_dmamap_t sc_cddmamap; /* control data DMA map */ |
| 291 | #define sc_cddmasc_cddmamap->dm_segs[0].ds_addr sc_cddmamap->dm_segs[0].ds_addr |
| 292 | |
| 293 | /* Software state for transmit and receive descriptors. */ |
| 294 | struct pcn_txsoft sc_txsoft[PCN_TXQUEUELEN128]; |
| 295 | struct pcn_rxsoft sc_rxsoft[PCN_NRXDESC128]; |
| 296 | |
| 297 | /* Control data structures */ |
| 298 | struct pcn_control_data *sc_control_data; |
| 299 | #define sc_txdescssc_control_data->pcd_txdescs sc_control_data->pcd_txdescs |
| 300 | #define sc_rxdescssc_control_data->pcd_rxdescs sc_control_data->pcd_rxdescs |
| 301 | #define sc_initblocksc_control_data->pcd_initblock sc_control_data->pcd_initblock |
| 302 | |
| 303 | const char * const *sc_rcvfw_desc; /* Rx FIFO watermark info */ |
| 304 | int sc_rcvfw; |
| 305 | |
| 306 | const char * const *sc_xmtsp_desc; /* Tx start point info */ |
| 307 | int sc_xmtsp; |
| 308 | |
| 309 | const char * const *sc_xmtfw_desc; /* Tx FIFO watermark info */ |
| 310 | int sc_xmtfw; |
| 311 | |
| 312 | int sc_flags; /* misc. flags; see below */ |
| 313 | int sc_swstyle; /* the software style in use */ |
| 314 | |
| 315 | int sc_txfree; /* number of free Tx descriptors */ |
| 316 | int sc_txnext; /* next ready Tx descriptor */ |
| 317 | |
| 318 | int sc_txsfree; /* number of free Tx jobs */ |
| 319 | int sc_txsnext; /* next free Tx job */ |
| 320 | int sc_txsdirty; /* dirty Tx jobs */ |
| 321 | |
| 322 | int sc_rxptr; /* next ready Rx descriptor/job */ |
| 323 | |
| 324 | uint32_t sc_csr5; /* prototype CSR5 register */ |
| 325 | uint32_t sc_mode; /* prototype MODE register */ |
| 326 | }; |
| 327 | |
| 328 | /* sc_flags */ |
| 329 | #define PCN_F_HAS_MII0x0001 0x0001 /* has MII */ |
| 330 | |
| 331 | #define PCN_CDTXADDR(sc, x)((sc)->sc_cddmamap->dm_segs[0].ds_addr + __builtin_offsetof (struct pcn_control_data, pcd_txdescs[((x))])) ((sc)->sc_cddmasc_cddmamap->dm_segs[0].ds_addr + PCN_CDTXOFF((x))__builtin_offsetof(struct pcn_control_data, pcd_txdescs[((x)) ])) |
| 332 | #define PCN_CDRXADDR(sc, x)((sc)->sc_cddmamap->dm_segs[0].ds_addr + __builtin_offsetof (struct pcn_control_data, pcd_rxdescs[((x))])) ((sc)->sc_cddmasc_cddmamap->dm_segs[0].ds_addr + PCN_CDRXOFF((x))__builtin_offsetof(struct pcn_control_data, pcd_rxdescs[((x)) ])) |
| 333 | #define PCN_CDINITADDR(sc)((sc)->sc_cddmamap->dm_segs[0].ds_addr + __builtin_offsetof (struct pcn_control_data, pcd_initblock)) ((sc)->sc_cddmasc_cddmamap->dm_segs[0].ds_addr + PCN_CDINITOFF__builtin_offsetof(struct pcn_control_data, pcd_initblock)) |
| 334 | |
| 335 | #define PCN_CDTXSYNC(sc, x, n, ops)do { int __x, __n; __x = (x); __n = (n); if ((__x + __n) > 512) { (*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat ), ((sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_txdescs[(__x)])), (sizeof(struct letmd) * (512 - __x)), ((ops))); __n -= (512 - __x); __x = 0; } (*((sc)->sc_dmat )->_dmamap_sync)(((sc)->sc_dmat), ((sc)->sc_cddmamap ), (__builtin_offsetof(struct pcn_control_data, pcd_txdescs[( __x)])), (sizeof(struct letmd) * __n), ((ops))); } while ( 0) \ |
| 336 | do { \ |
| 337 | int __x, __n; \ |
| 338 | \ |
| 339 | __x = (x); \ |
| 340 | __n = (n); \ |
| 341 | \ |
| 342 | /* If it will wrap around, sync to the end of the ring. */ \ |
| 343 | if ((__x + __n) > PCN_NTXDESC512) { \ |
| 344 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_txdescs[(__x)])), (sizeof(struct letmd) * (512 - __x)), ((ops))) |
| 345 | PCN_CDTXOFF(__x), sizeof(struct letmd) * \(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_txdescs[(__x)])), (sizeof(struct letmd) * (512 - __x)), ((ops))) |
| 346 | (PCN_NTXDESC - __x), (ops))(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_txdescs[(__x)])), (sizeof(struct letmd) * (512 - __x)), ((ops))); \ |
| 347 | __n -= (PCN_NTXDESC512 - __x); \ |
| 348 | __x = 0; \ |
| 349 | } \ |
| 350 | \ |
| 351 | /* Now sync whatever is left. */ \ |
| 352 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_txdescs[(__x)])), (sizeof(struct letmd) * __n), ((ops)) ) |
| 353 | PCN_CDTXOFF(__x), sizeof(struct letmd) * __n, (ops))(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_txdescs[(__x)])), (sizeof(struct letmd) * __n), ((ops)) ); \ |
| 354 | } while (/*CONSTCOND*/0) |
| 355 | |
| 356 | #define PCN_CDRXSYNC(sc, x, ops)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_rxdescs[((x))])), (sizeof(struct lermd)), ((ops))) \ |
| 357 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_rxdescs[((x))])), (sizeof(struct lermd)), ((ops))) |
| 358 | PCN_CDRXOFF((x)), sizeof(struct lermd), (ops))(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_rxdescs[((x))])), (sizeof(struct lermd)), ((ops))) |
| 359 | |
| 360 | #define PCN_CDINITSYNC(sc, ops)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_initblock)), (sizeof(struct leinit)), ((ops))) \ |
| 361 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_initblock)), (sizeof(struct leinit)), ((ops))) |
| 362 | PCN_CDINITOFF, sizeof(struct leinit), (ops))(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_initblock)), (sizeof(struct leinit)), ((ops))) |
| 363 | |
| 364 | #define PCN_INIT_RXDESC(sc, x)do { struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; struct lermd *__rmd = &(sc)->sc_control_data->pcd_rxdescs [(x)]; struct mbuf *__m = __rxs->rxs_mbuf; __m->m_hdr.mh_data = __m->M_dat.MH.MH_dat.MH_ext.ext_buf + 2; if ((sc)->sc_swstyle == 3) { __rmd->rmd2 = ((__uint32_t)(__rxs->rxs_dmamap-> dm_segs[0].ds_addr + 2)); __rmd->rmd0 = 0; } else { __rmd-> rmd2 = 0; __rmd->rmd0 = ((__uint32_t)(__rxs->rxs_dmamap ->dm_segs[0].ds_addr + 2)); } __rmd->rmd1 = ((__uint32_t )((1<<31)|(0xf<<12)| ((~((1 << 11) - 2) + 1 ) & (0xfff)))); (*(((sc))->sc_dmat)->_dmamap_sync)( (((sc))->sc_dmat), (((sc))->sc_cddmamap), (__builtin_offsetof (struct pcn_control_data, pcd_rxdescs[(((x)))])), (sizeof(struct lermd)), ((0x01|0x04)));} while( 0) \ |
| 365 | do { \ |
| 366 | struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ |
| 367 | struct lermd *__rmd = &(sc)->sc_rxdescssc_control_data->pcd_rxdescs[(x)]; \ |
| 368 | struct mbuf *__m = __rxs->rxs_mbuf; \ |
| 369 | \ |
| 370 | /* \ |
| 371 | * Note: We scoot the packet forward 2 bytes in the buffer \ |
| 372 | * so that the payload after the Ethernet header is aligned \ |
| 373 | * to a 4-byte boundary. \ |
| 374 | */ \ |
| 375 | __m->m_datam_hdr.mh_data = __m->m_extM_dat.MH.MH_dat.MH_ext.ext_buf + 2; \ |
| 376 | \ |
| 377 | if ((sc)->sc_swstyle == LE_B20_SSTYLE_PCNETPCI33) { \ |
| 378 | __rmd->rmd2 = \ |
| 379 | htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2)((__uint32_t)(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2 )); \ |
| 380 | __rmd->rmd0 = 0; \ |
| 381 | } else { \ |
| 382 | __rmd->rmd2 = 0; \ |
| 383 | __rmd->rmd0 = \ |
| 384 | htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2)((__uint32_t)(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2 )); \ |
| 385 | } \ |
| 386 | __rmd->rmd1 = htole32(LE_R1_OWN|LE_R1_ONES| \((__uint32_t)((1<<31)|(0xf<<12)| ((~((1 << 11 ) - 2) + 1) & (0xfff)))) |
| 387 | (LE_BCNT(MCLBYTES - 2) & LE_R1_BCNT_MASK))((__uint32_t)((1<<31)|(0xf<<12)| ((~((1 << 11 ) - 2) + 1) & (0xfff)))); \ |
| 388 | PCN_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(((sc))->sc_dmat)->_dmamap_sync)((((sc))->sc_dmat) , (((sc))->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_rxdescs[(((x)))])), (sizeof(struct lermd)), ((0x01|0x04 )));\ |
| 389 | } while(/*CONSTCOND*/0) |
| 390 | |
| 391 | void pcn_start(struct ifnet *); |
| 392 | void pcn_watchdog(struct ifnet *); |
| 393 | int pcn_ioctl(struct ifnet *, u_long, caddr_t); |
| 394 | int pcn_init(struct ifnet *); |
| 395 | void pcn_stop(struct ifnet *, int); |
| 396 | |
| 397 | void pcn_reset(struct pcn_softc *); |
| 398 | void pcn_rxdrain(struct pcn_softc *); |
| 399 | int pcn_add_rxbuf(struct pcn_softc *, int); |
| 400 | void pcn_tick(void *); |
| 401 | |
| 402 | void pcn_spnd(struct pcn_softc *); |
| 403 | |
| 404 | void pcn_set_filter(struct pcn_softc *); |
| 405 | |
| 406 | int pcn_intr(void *); |
| 407 | void pcn_txintr(struct pcn_softc *); |
| 408 | int pcn_rxintr(struct pcn_softc *); |
| 409 | |
| 410 | int pcn_mii_readreg(struct device *, int, int); |
| 411 | void pcn_mii_writereg(struct device *, int, int, int); |
| 412 | void pcn_mii_statchg(struct device *); |
| 413 | |
| 414 | void pcn_79c970_mediainit(struct pcn_softc *); |
| 415 | int pcn_79c970_mediachange(struct ifnet *); |
| 416 | void pcn_79c970_mediastatus(struct ifnet *, struct ifmediareq *); |
| 417 | |
| 418 | void pcn_79c971_mediainit(struct pcn_softc *); |
| 419 | int pcn_79c971_mediachange(struct ifnet *); |
| 420 | void pcn_79c971_mediastatus(struct ifnet *, struct ifmediareq *); |
| 421 | |
| 422 | /* |
| 423 | * Description of a PCnet-PCI variant. Used to select media access |
| 424 | * method, mostly, and to print a nice description of the chip. |
| 425 | */ |
| 426 | static const struct pcn_variant { |
| 427 | const char *pcv_desc; |
| 428 | void (*pcv_mediainit)(struct pcn_softc *); |
| 429 | uint16_t pcv_chipid; |
| 430 | } pcn_variants[] = { |
| 431 | { "Am79c970", |
| 432 | pcn_79c970_mediainit, |
| 433 | PARTID_Am79c9700x2430 }, |
| 434 | |
| 435 | { "Am79c970A", |
| 436 | pcn_79c970_mediainit, |
| 437 | PARTID_Am79c970A0x2621 }, |
| 438 | |
| 439 | { "Am79c971", |
| 440 | pcn_79c971_mediainit, |
| 441 | PARTID_Am79c9710x2623 }, |
| 442 | |
| 443 | { "Am79c972", |
| 444 | pcn_79c971_mediainit, |
| 445 | PARTID_Am79c9720x2624 }, |
| 446 | |
| 447 | { "Am79c973", |
| 448 | pcn_79c971_mediainit, |
| 449 | PARTID_Am79c9730x2625 }, |
| 450 | |
| 451 | { "Am79c975", |
| 452 | pcn_79c971_mediainit, |
| 453 | PARTID_Am79c9750x2627 }, |
| 454 | |
| 455 | { "Am79c976", |
| 456 | pcn_79c971_mediainit, |
| 457 | PARTID_Am79c9760x2628 }, |
| 458 | |
| 459 | { "Am79c978", |
| 460 | pcn_79c971_mediainit, |
| 461 | PARTID_Am79c9780x2626 }, |
| 462 | |
| 463 | { "Unknown", |
| 464 | pcn_79c971_mediainit, |
| 465 | 0 }, |
| 466 | }; |
| 467 | |
| 468 | int pcn_copy_small = 0; |
| 469 | |
| 470 | int pcn_match(struct device *, void *, void *); |
| 471 | void pcn_attach(struct device *, struct device *, void *); |
| 472 | |
| 473 | struct cfattach pcn_ca = { |
| 474 | sizeof(struct pcn_softc), pcn_match, pcn_attach, |
| 475 | }; |
| 476 | |
| 477 | const struct pci_matchid pcn_devices[] = { |
| 478 | { PCI_VENDOR_AMD0x1022, PCI_PRODUCT_AMD_PCNET_PCI0x2000 }, |
| 479 | { PCI_VENDOR_AMD0x1022, PCI_PRODUCT_AMD_PCHOME_PCI0x2001 } |
| 480 | }; |
| 481 | |
| 482 | struct cfdriver pcn_cd = { |
| 483 | NULL((void *)0), "pcn", DV_IFNET |
| 484 | }; |
| 485 | |
| 486 | /* |
| 487 | * Routines to read and write the PCnet-PCI CSR/BCR space. |
| 488 | */ |
| 489 | |
| 490 | static __inline uint32_t |
| 491 | pcn_csr_read(struct pcn_softc *sc, int reg) |
| 492 | { |
| 493 | |
| 494 | bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg)((sc->sc_st)->write_4((sc->sc_sh), (0x14), (reg))); |
| 495 | return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RDP)((sc->sc_st)->read_4((sc->sc_sh), (0x10)))); |
| 496 | } |
| 497 | |
| 498 | static __inline void |
| 499 | pcn_csr_write(struct pcn_softc *sc, int reg, uint32_t val) |
| 500 | { |
| 501 | |
| 502 | bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg)((sc->sc_st)->write_4((sc->sc_sh), (0x14), (reg))); |
| 503 | bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, val)((sc->sc_st)->write_4((sc->sc_sh), (0x10), (val))); |
| 504 | } |
| 505 | |
| 506 | static __inline uint32_t |
| 507 | pcn_bcr_read(struct pcn_softc *sc, int reg) |
| 508 | { |
| 509 | |
| 510 | bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg)((sc->sc_st)->write_4((sc->sc_sh), (0x14), (reg))); |
| 511 | return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_BDP)((sc->sc_st)->read_4((sc->sc_sh), (0x1c)))); |
| 512 | } |
| 513 | |
| 514 | static __inline void |
| 515 | pcn_bcr_write(struct pcn_softc *sc, int reg, uint32_t val) |
| 516 | { |
| 517 | |
| 518 | bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg)((sc->sc_st)->write_4((sc->sc_sh), (0x14), (reg))); |
| 519 | bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_BDP, val)((sc->sc_st)->write_4((sc->sc_sh), (0x1c), (val))); |
| 520 | } |
| 521 | |
| 522 | static const struct pcn_variant * |
| 523 | pcn_lookup_variant(uint16_t chipid) |
| 524 | { |
| 525 | const struct pcn_variant *pcv; |
| 526 | |
| 527 | for (pcv = pcn_variants; pcv->pcv_chipid != 0; pcv++) { |
| 528 | if (chipid == pcv->pcv_chipid) |
| 529 | return (pcv); |
| 530 | } |
| 531 | |
| 532 | /* |
| 533 | * This covers unknown chips, which we simply treat like |
| 534 | * a generic PCnet-FAST. |
| 535 | */ |
| 536 | return (pcv); |
| 537 | } |
| 538 | |
| 539 | int |
| 540 | pcn_match(struct device *parent, void *match, void *aux) |
| 541 | { |
| 542 | struct pci_attach_args *pa = aux; |
| 543 | |
| 544 | /* |
| 545 | * IBM makes a PCI variant of this card which shows up as a |
| 546 | * Trident Microsystems 4DWAVE DX (ethernet network, revision 0x25) |
| 547 | * this card is truly a pcn card, so we have a special case match for |
| 548 | * it. |
| 549 | */ |
| 550 | if (PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff) == PCI_VENDOR_TRIDENT0x1023 && |
| 551 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_TRIDENT_4DWAVE_DX0x2000 && |
| 552 | PCI_CLASS(pa->pa_class)(((pa->pa_class) >> 24) & 0xff) == PCI_CLASS_NETWORK0x02) |
| 553 | return(1); |
| 554 | |
| 555 | return (pci_matchbyid((struct pci_attach_args *)aux, pcn_devices, |
| 556 | nitems(pcn_devices)(sizeof((pcn_devices)) / sizeof((pcn_devices)[0])))); |
| 557 | } |
| 558 | |
| 559 | void |
| 560 | pcn_attach(struct device *parent, struct device *self, void *aux) |
| 561 | { |
| 562 | struct pcn_softc *sc = (struct pcn_softc *) self; |
| 563 | struct pci_attach_args *pa = aux; |
| 564 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; |
Value stored to 'ifp' during its initialization is never read | |
| 565 | pci_chipset_tag_t pc = pa->pa_pc; |
| 566 | pci_intr_handle_t ih; |
| 567 | const char *intrstr = NULL((void *)0); |
| 568 | bus_space_tag_t iot, memt; |
| 569 | bus_space_handle_t ioh, memh; |
| 570 | bus_dma_segment_t seg; |
| 571 | int ioh_valid, memh_valid; |
| 572 | int i, rseg, error; |
| 573 | uint32_t chipid, reg; |
| 574 | uint8_t enaddr[ETHER_ADDR_LEN6]; |
| 575 | |
| 576 | timeout_set(&sc->sc_tick_timeout, pcn_tick, sc); |
| 577 | |
| 578 | /* |
| 579 | * Map the device. |
| 580 | */ |
| 581 | ioh_valid = (pci_mapreg_map(pa, PCN_PCI_CBIO(0x10 + 0x00), PCI_MAPREG_TYPE_IO0x00000001, 0, |
| 582 | &iot, &ioh, NULL((void *)0), NULL((void *)0), 0) == 0); |
| 583 | memh_valid = (pci_mapreg_map(pa, PCN_PCI_CBMEM(0x10 + 0x04), |
| 584 | PCI_MAPREG_TYPE_MEM0x00000000|PCI_MAPREG_MEM_TYPE_32BIT0x00000000, 0, |
| 585 | &memt, &memh, NULL((void *)0), NULL((void *)0), 0) == 0); |
| 586 | |
| 587 | if (memh_valid) { |
| 588 | sc->sc_st = memt; |
| 589 | sc->sc_sh = memh; |
| 590 | } else if (ioh_valid) { |
| 591 | sc->sc_st = iot; |
| 592 | sc->sc_sh = ioh; |
| 593 | } else { |
| 594 | printf(": unable to map device registers\n"); |
| 595 | return; |
| 596 | } |
| 597 | |
| 598 | sc->sc_dmat = pa->pa_dmat; |
| 599 | |
| 600 | /* Get it out of power save mode, if needed. */ |
| 601 | pci_set_powerstate(pc, pa->pa_tag, PCI_PMCSR_STATE_D00x0000); |
| 602 | |
| 603 | /* |
| 604 | * Reset the chip to a known state. This also puts the |
| 605 | * chip into 32-bit mode. |
| 606 | */ |
| 607 | pcn_reset(sc); |
| 608 | |
| 609 | #if !defined(PCN_NO_PROM) |
| 610 | |
| 611 | /* |
| 612 | * Read the Ethernet address from the EEPROM. |
| 613 | */ |
| 614 | for (i = 0; i < ETHER_ADDR_LEN6; i++) |
| 615 | enaddr[i] = bus_space_read_1(sc->sc_st, sc->sc_sh,((sc->sc_st)->read_1((sc->sc_sh), (0x00 + i))) |
| 616 | PCN32_APROM + i)((sc->sc_st)->read_1((sc->sc_sh), (0x00 + i))); |
| 617 | #else |
| 618 | /* |
| 619 | * The PROM is not used; instead we assume that the MAC address |
| 620 | * has been programmed into the device's physical address |
| 621 | * registers by the boot firmware |
| 622 | */ |
| 623 | |
| 624 | for (i=0; i < 3; i++) { |
| 625 | uint32_t val; |
| 626 | val = pcn_csr_read(sc, LE_CSR120x000c + i); |
| 627 | enaddr[2*i] = val & 0x0ff; |
| 628 | enaddr[2*i+1] = (val >> 8) & 0x0ff; |
| 629 | } |
| 630 | #endif |
| 631 | |
| 632 | /* |
| 633 | * Now that the device is mapped, attempt to figure out what |
| 634 | * kind of chip we have. Note that IDL has all 32 bits of |
| 635 | * the chip ID when we're in 32-bit mode. |
| 636 | */ |
| 637 | chipid = pcn_csr_read(sc, LE_CSR880x0058); |
| 638 | sc->sc_variant = pcn_lookup_variant(CHIPID_PARTID(chipid)(((chipid) >> 12) & 0xffff)); |
| 639 | |
| 640 | /* |
| 641 | * Map and establish our interrupt. |
| 642 | */ |
| 643 | if (pci_intr_map(pa, &ih)) { |
| 644 | printf(": unable to map interrupt\n"); |
| 645 | return; |
| 646 | } |
| 647 | intrstr = pci_intr_string(pc, ih); |
| 648 | sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET0x7, pcn_intr, sc, |
| 649 | self->dv_xname); |
| 650 | if (sc->sc_ih == NULL((void *)0)) { |
| 651 | printf(": unable to establish interrupt"); |
| 652 | if (intrstr != NULL((void *)0)) |
| 653 | printf(" at %s", intrstr); |
| 654 | printf("\n"); |
| 655 | return; |
| 656 | } |
| 657 | |
| 658 | /* |
| 659 | * Allocate the control data structures, and create and load the |
| 660 | * DMA map for it. |
| 661 | */ |
| 662 | if ((error = bus_dmamem_alloc(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sizeof (struct pcn_control_data)), ((1 << 12)), (0), (&seg ), (1), (&rseg), (0)) |
| 663 | sizeof(struct pcn_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sizeof (struct pcn_control_data)), ((1 << 12)), (0), (&seg ), (1), (&rseg), (0)) |
| 664 | 0)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sizeof (struct pcn_control_data)), ((1 << 12)), (0), (&seg ), (1), (&rseg), (0))) != 0) { |
| 665 | printf(": unable to allocate control data, error = %d\n", |
| 666 | error); |
| 667 | return; |
| 668 | } |
| 669 | |
| 670 | if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&seg ), (rseg), (sizeof(struct pcn_control_data)), ((caddr_t *)& sc->sc_control_data), (0x0004)) |
| 671 | sizeof(struct pcn_control_data), (caddr_t *)&sc->sc_control_data,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&seg ), (rseg), (sizeof(struct pcn_control_data)), ((caddr_t *)& sc->sc_control_data), (0x0004)) |
| 672 | BUS_DMA_COHERENT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&seg ), (rseg), (sizeof(struct pcn_control_data)), ((caddr_t *)& sc->sc_control_data), (0x0004))) != 0) { |
| 673 | printf(": unable to map control data, error = %d\n", |
| 674 | error); |
| 675 | goto fail_1; |
| 676 | } |
| 677 | |
| 678 | if ((error = bus_dmamap_create(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sizeof (struct pcn_control_data)), (1), (sizeof(struct pcn_control_data )), (0), (0), (&sc->sc_cddmamap)) |
| 679 | sizeof(struct pcn_control_data), 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sizeof (struct pcn_control_data)), (1), (sizeof(struct pcn_control_data )), (0), (0), (&sc->sc_cddmamap)) |
| 680 | sizeof(struct pcn_control_data), 0, 0, &sc->sc_cddmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sizeof (struct pcn_control_data)), (1), (sizeof(struct pcn_control_data )), (0), (0), (&sc->sc_cddmamap))) != 0) { |
| 681 | printf(": unable to create control data DMA map, " |
| 682 | "error = %d\n", error); |
| 683 | goto fail_2; |
| 684 | } |
| 685 | |
| 686 | if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc-> sc_cddmamap), (sc->sc_control_data), (sizeof(struct pcn_control_data )), (((void *)0)), (0)) |
| 687 | sc->sc_control_data, sizeof(struct pcn_control_data), NULL,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc-> sc_cddmamap), (sc->sc_control_data), (sizeof(struct pcn_control_data )), (((void *)0)), (0)) |
| 688 | 0)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc-> sc_cddmamap), (sc->sc_control_data), (sizeof(struct pcn_control_data )), (((void *)0)), (0))) != 0) { |
| 689 | printf(": unable to load control data DMA map, error = %d\n", |
| 690 | error); |
| 691 | goto fail_3; |
| 692 | } |
| 693 | |
| 694 | /* Create the transmit buffer DMA maps. */ |
| 695 | for (i = 0; i < PCN_TXQUEUELEN128; i++) { |
| 696 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 << 11)), (16), ((1 << 11)), (0), (0), (&sc->sc_txsoft [i].txs_dmamap)) |
| 697 | PCN_NTXSEGS, MCLBYTES, 0, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 << 11)), (16), ((1 << 11)), (0), (0), (&sc->sc_txsoft [i].txs_dmamap)) |
| 698 | &sc->sc_txsoft[i].txs_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 << 11)), (16), ((1 << 11)), (0), (0), (&sc->sc_txsoft [i].txs_dmamap))) != 0) { |
| 699 | printf(": unable to create tx DMA map %d, " |
| 700 | "error = %d\n", i, error); |
| 701 | goto fail_4; |
| 702 | } |
| 703 | } |
| 704 | |
| 705 | /* Create the receive buffer DMA maps. */ |
| 706 | for (i = 0; i < PCN_NRXDESC128; i++) { |
| 707 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 << 11)), (1), ((1 << 11)), (0), (0), (&sc->sc_rxsoft [i].rxs_dmamap)) |
| 708 | MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 << 11)), (1), ((1 << 11)), (0), (0), (&sc->sc_rxsoft [i].rxs_dmamap))) != 0) { |
| 709 | printf(": unable to create rx DMA map %d, " |
| 710 | "error = %d\n", i, error); |
| 711 | goto fail_5; |
| 712 | } |
| 713 | sc->sc_rxsoft[i].rxs_mbuf = NULL((void *)0); |
| 714 | } |
| 715 | |
| 716 | printf(", %s, rev %d: %s, address %s\n", sc->sc_variant->pcv_desc, |
| 717 | CHIPID_VER(chipid)(((chipid) >> 28) & 0x7), intrstr, ether_sprintf(enaddr)); |
| 718 | |
| 719 | /* Initialize our media structures. */ |
| 720 | (*sc->sc_variant->pcv_mediainit)(sc); |
| 721 | |
| 722 | /* |
| 723 | * Initialize FIFO watermark info. |
| 724 | */ |
| 725 | switch (sc->sc_variant->pcv_chipid) { |
| 726 | case PARTID_Am79c9700x2430: |
| 727 | case PARTID_Am79c970A0x2621: |
| 728 | sc->sc_rcvfw_desc = pcn_79c970_rcvfw; |
| 729 | sc->sc_xmtsp_desc = pcn_79c970_xmtsp; |
| 730 | sc->sc_xmtfw_desc = pcn_79c970_xmtfw; |
| 731 | break; |
| 732 | |
| 733 | default: |
| 734 | sc->sc_rcvfw_desc = pcn_79c971_rcvfw; |
| 735 | /* |
| 736 | * Read BCR25 to determine how much SRAM is |
| 737 | * on the board. If > 0, then we the chip |
| 738 | * uses different Start Point thresholds. |
| 739 | * |
| 740 | * Note BCR25 and BCR26 are loaded from the |
| 741 | * EEPROM on RST, and unaffected by S_RESET, |
| 742 | * so we don't really have to worry about |
| 743 | * them except for this. |
| 744 | */ |
| 745 | reg = pcn_bcr_read(sc, LE_BCR250x0019) & 0x00ff; |
| 746 | if (reg != 0) |
| 747 | sc->sc_xmtsp_desc = pcn_79c971_xmtsp_sram; |
| 748 | else |
| 749 | sc->sc_xmtsp_desc = pcn_79c971_xmtsp; |
| 750 | sc->sc_xmtfw_desc = pcn_79c971_xmtfw; |
| 751 | break; |
| 752 | } |
| 753 | |
| 754 | /* |
| 755 | * Set up defaults -- see the tables above for what these |
| 756 | * values mean. |
| 757 | * |
| 758 | * XXX How should we tune RCVFW and XMTFW? |
| 759 | */ |
| 760 | sc->sc_rcvfw = 1; /* minimum for full-duplex */ |
| 761 | sc->sc_xmtsp = 1; |
| 762 | sc->sc_xmtfw = 0; |
| 763 | |
| 764 | ifp = &sc->sc_arpcom.ac_if; |
| 765 | bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN6); |
| 766 | bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ16); |
| 767 | ifp->if_softc = sc; |
| 768 | ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000; |
| 769 | ifp->if_ioctl = pcn_ioctl; |
| 770 | ifp->if_start = pcn_start; |
| 771 | ifp->if_watchdog = pcn_watchdog; |
| 772 | ifq_set_maxlen(&ifp->if_snd, PCN_NTXDESC -1)((&ifp->if_snd)->ifq_maxlen = (512 -1)); |
| 773 | |
| 774 | /* Attach the interface. */ |
| 775 | if_attach(ifp); |
| 776 | ether_ifattach(ifp); |
| 777 | return; |
| 778 | |
| 779 | /* |
| 780 | * Free any resources we've allocated during the failed attach |
| 781 | * attempt. Do this in reverse order and fall through. |
| 782 | */ |
| 783 | fail_5: |
| 784 | for (i = 0; i < PCN_NRXDESC128; i++) { |
| 785 | if (sc->sc_rxsoft[i].rxs_dmamap != NULL((void *)0)) |
| 786 | bus_dmamap_destroy(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->sc_rxsoft[i].rxs_dmamap)) |
| 787 | sc->sc_rxsoft[i].rxs_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->sc_rxsoft[i].rxs_dmamap)); |
| 788 | } |
| 789 | fail_4: |
| 790 | for (i = 0; i < PCN_TXQUEUELEN128; i++) { |
| 791 | if (sc->sc_txsoft[i].txs_dmamap != NULL((void *)0)) |
| 792 | bus_dmamap_destroy(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->sc_txsoft[i].txs_dmamap)) |
| 793 | sc->sc_txsoft[i].txs_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->sc_txsoft[i].txs_dmamap)); |
| 794 | } |
| 795 | bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc-> sc_cddmamap)); |
| 796 | fail_3: |
| 797 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->sc_cddmamap)); |
| 798 | fail_2: |
| 799 | bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t )sc->sc_control_data), (sizeof(struct pcn_control_data))) |
| 800 | sizeof(struct pcn_control_data))(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t )sc->sc_control_data), (sizeof(struct pcn_control_data))); |
| 801 | fail_1: |
| 802 | bus_dmamem_free(sc->sc_dmat, &seg, rseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& seg), (rseg)); |
| 803 | } |
| 804 | |
| 805 | /* |
| 806 | * pcn_start: [ifnet interface function] |
| 807 | * |
| 808 | * Start packet transmission on the interface. |
| 809 | */ |
| 810 | void |
| 811 | pcn_start(struct ifnet *ifp) |
| 812 | { |
| 813 | struct pcn_softc *sc = ifp->if_softc; |
| 814 | struct mbuf *m0, *m; |
| 815 | struct pcn_txsoft *txs; |
| 816 | bus_dmamap_t dmamap; |
| 817 | int error, nexttx, lasttx = -1, ofree, seg; |
| 818 | |
| 819 | if (!(ifp->if_flags & IFF_RUNNING0x40) || ifq_is_oactive(&ifp->if_snd)) |
| 820 | return; |
| 821 | |
| 822 | /* |
| 823 | * Remember the previous number of free descriptors and |
| 824 | * the first descriptor we'll use. |
| 825 | */ |
| 826 | ofree = sc->sc_txfree; |
| 827 | |
| 828 | /* |
| 829 | * Loop through the send queue, setting up transmit descriptors |
| 830 | * until we drain the queue, or use up all available transmit |
| 831 | * descriptors. |
| 832 | */ |
| 833 | for (;;) { |
| 834 | /* Grab a packet off the queue. */ |
| 835 | m0 = ifq_deq_begin(&ifp->if_snd); |
| 836 | if (m0 == NULL((void *)0)) |
| 837 | break; |
| 838 | m = NULL((void *)0); |
| 839 | |
| 840 | /* Get a work queue entry. */ |
| 841 | if (sc->sc_txsfree == 0) { |
| 842 | ifq_deq_rollback(&ifp->if_snd, m0); |
| 843 | break; |
| 844 | } |
| 845 | |
| 846 | txs = &sc->sc_txsoft[sc->sc_txsnext]; |
| 847 | dmamap = txs->txs_dmamap; |
| 848 | |
| 849 | /* |
| 850 | * Load the DMA map. If this fails, the packet either |
| 851 | * didn't fit in the allotted number of segments, or we |
| 852 | * were short on resources. In this case, we'll copy |
| 853 | * and try again. |
| 854 | */ |
| 855 | if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( dmamap), (m0), (0x0400|0x0001)) |
| 856 | BUS_DMA_WRITE|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( dmamap), (m0), (0x0400|0x0001)) != 0) { |
| 857 | MGETHDR(m, M_DONTWAIT, MT_DATA)m = m_gethdr((0x0002), (1)); |
| 858 | if (m == NULL((void *)0)) { |
| 859 | ifq_deq_rollback(&ifp->if_snd, m0); |
| 860 | break; |
| 861 | } |
| 862 | if (m0->m_pkthdrM_dat.MH.MH_pkthdr.len > MHLEN((256 - sizeof(struct m_hdr)) - sizeof(struct pkthdr))) { |
| 863 | MCLGET(m, M_DONTWAIT)(void) m_clget((m), (0x0002), (1 << 11)); |
| 864 | if ((m->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0) { |
| 865 | ifq_deq_rollback(&ifp->if_snd, m0); |
| 866 | m_freem(m); |
| 867 | break; |
| 868 | } |
| 869 | } |
| 870 | m_copydata(m0, 0, m0->m_pkthdrM_dat.MH.MH_pkthdr.len, mtod(m, caddr_t)((caddr_t)((m)->m_hdr.mh_data))); |
| 871 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = m0->m_pkthdrM_dat.MH.MH_pkthdr.len; |
| 872 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( dmamap), (m), (0x0400|0x0001)) |
| 873 | m, BUS_DMA_WRITE|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( dmamap), (m), (0x0400|0x0001)); |
| 874 | if (error) { |
| 875 | ifq_deq_rollback(&ifp->if_snd, m0); |
| 876 | break; |
| 877 | } |
| 878 | } |
| 879 | |
| 880 | /* |
| 881 | * Ensure we have enough descriptors free to describe |
| 882 | * the packet. Note, we always reserve one descriptor |
| 883 | * at the end of the ring as a termination point, to |
| 884 | * prevent wrap-around. |
| 885 | */ |
| 886 | if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) { |
| 887 | /* |
| 888 | * Not enough free descriptors to transmit this |
| 889 | * packet. We haven't committed anything yet, |
| 890 | * so just unload the DMA map, put the packet |
| 891 | * back on the queue, and punt. Notify the upper |
| 892 | * layer that there are not more slots left. |
| 893 | * |
| 894 | * XXX We could allocate an mbuf and copy, but |
| 895 | * XXX is it worth it? |
| 896 | */ |
| 897 | ifq_set_oactive(&ifp->if_snd); |
| 898 | bus_dmamap_unload(sc->sc_dmat, dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (dmamap )); |
| 899 | m_freem(m); |
| 900 | ifq_deq_rollback(&ifp->if_snd, m0); |
| 901 | break; |
| 902 | } |
| 903 | |
| 904 | ifq_deq_commit(&ifp->if_snd, m0); |
| 905 | if (m != NULL((void *)0)) { |
| 906 | m_freem(m0); |
| 907 | m0 = m; |
| 908 | } |
| 909 | |
| 910 | /* |
| 911 | * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. |
| 912 | */ |
| 913 | |
| 914 | /* Sync the DMA map. */ |
| 915 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmamap ), (0), (dmamap->dm_mapsize), (0x04)) |
| 916 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmamap ), (0), (dmamap->dm_mapsize), (0x04)); |
| 917 | |
| 918 | /* |
| 919 | * Initialize the transmit descriptors. |
| 920 | */ |
| 921 | if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI33) { |
| 922 | for (nexttx = sc->sc_txnext, seg = 0; |
| 923 | seg < dmamap->dm_nsegs; |
| 924 | seg++, nexttx = PCN_NEXTTX(nexttx)(((nexttx) + 1) & (512 - 1))) { |
| 925 | /* |
| 926 | * If this is the first descriptor we're |
| 927 | * enqueueing, don't set the OWN bit just |
| 928 | * yet. That could cause a race condition. |
| 929 | * We'll do it below. |
| 930 | */ |
| 931 | sc->sc_txdescssc_control_data->pcd_txdescs[nexttx].tmd0 = 0; |
| 932 | sc->sc_txdescssc_control_data->pcd_txdescs[nexttx].tmd2 = |
| 933 | htole32(dmamap->dm_segs[seg].ds_addr)((__uint32_t)(dmamap->dm_segs[seg].ds_addr)); |
| 934 | sc->sc_txdescssc_control_data->pcd_txdescs[nexttx].tmd1 = |
| 935 | htole32(LE_T1_ONES |((__uint32_t)((0xf<<12) | (nexttx == sc->sc_txnext ? 0 : (1<<31)) | ((~(dmamap->dm_segs[seg].ds_len) + 1 ) & (0xfff)))) |
| 936 | (nexttx == sc->sc_txnext ? 0 : LE_T1_OWN) |((__uint32_t)((0xf<<12) | (nexttx == sc->sc_txnext ? 0 : (1<<31)) | ((~(dmamap->dm_segs[seg].ds_len) + 1 ) & (0xfff)))) |
| 937 | (LE_BCNT(dmamap->dm_segs[seg].ds_len) &((__uint32_t)((0xf<<12) | (nexttx == sc->sc_txnext ? 0 : (1<<31)) | ((~(dmamap->dm_segs[seg].ds_len) + 1 ) & (0xfff)))) |
| 938 | LE_T1_BCNT_MASK))((__uint32_t)((0xf<<12) | (nexttx == sc->sc_txnext ? 0 : (1<<31)) | ((~(dmamap->dm_segs[seg].ds_len) + 1 ) & (0xfff)))); |
| 939 | lasttx = nexttx; |
| 940 | } |
| 941 | } else { |
| 942 | for (nexttx = sc->sc_txnext, seg = 0; |
| 943 | seg < dmamap->dm_nsegs; |
| 944 | seg++, nexttx = PCN_NEXTTX(nexttx)(((nexttx) + 1) & (512 - 1))) { |
| 945 | /* |
| 946 | * If this is the first descriptor we're |
| 947 | * enqueueing, don't set the OWN bit just |
| 948 | * yet. That could cause a race condition. |
| 949 | * We'll do it below. |
| 950 | */ |
| 951 | sc->sc_txdescssc_control_data->pcd_txdescs[nexttx].tmd0 = |
| 952 | htole32(dmamap->dm_segs[seg].ds_addr)((__uint32_t)(dmamap->dm_segs[seg].ds_addr)); |
| 953 | sc->sc_txdescssc_control_data->pcd_txdescs[nexttx].tmd2 = 0; |
| 954 | sc->sc_txdescssc_control_data->pcd_txdescs[nexttx].tmd1 = |
| 955 | htole32(LE_T1_ONES |((__uint32_t)((0xf<<12) | (nexttx == sc->sc_txnext ? 0 : (1<<31)) | ((~(dmamap->dm_segs[seg].ds_len) + 1 ) & (0xfff)))) |
| 956 | (nexttx == sc->sc_txnext ? 0 : LE_T1_OWN) |((__uint32_t)((0xf<<12) | (nexttx == sc->sc_txnext ? 0 : (1<<31)) | ((~(dmamap->dm_segs[seg].ds_len) + 1 ) & (0xfff)))) |
| 957 | (LE_BCNT(dmamap->dm_segs[seg].ds_len) &((__uint32_t)((0xf<<12) | (nexttx == sc->sc_txnext ? 0 : (1<<31)) | ((~(dmamap->dm_segs[seg].ds_len) + 1 ) & (0xfff)))) |
| 958 | LE_T1_BCNT_MASK))((__uint32_t)((0xf<<12) | (nexttx == sc->sc_txnext ? 0 : (1<<31)) | ((~(dmamap->dm_segs[seg].ds_len) + 1 ) & (0xfff)))); |
| 959 | lasttx = nexttx; |
| 960 | } |
| 961 | } |
| 962 | |
| 963 | KASSERT(lasttx != -1)((lasttx != -1) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_pcn.c" , 963, "lasttx != -1")); |
| 964 | /* Interrupt on the packet, if appropriate. */ |
| 965 | if ((sc->sc_txsnext & PCN_TXINTR_MASK7) == 0) |
| 966 | sc->sc_txdescssc_control_data->pcd_txdescs[lasttx].tmd1 |= htole32(LE_T1_LTINT)((__uint32_t)((1<<28))); |
| 967 | |
| 968 | /* Set `start of packet' and `end of packet' appropriately. */ |
| 969 | sc->sc_txdescssc_control_data->pcd_txdescs[lasttx].tmd1 |= htole32(LE_T1_ENP)((__uint32_t)((1<<24))); |
| 970 | sc->sc_txdescssc_control_data->pcd_txdescs[sc->sc_txnext].tmd1 |= |
| 971 | htole32(LE_T1_OWN|LE_T1_STP)((__uint32_t)((1<<31)|(1<<25))); |
| 972 | |
| 973 | /* Sync the descriptors we're using. */ |
| 974 | PCN_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,do { int __x, __n; __x = (sc->sc_txnext); __n = (dmamap-> dm_nsegs); if ((__x + __n) > 512) { (*((sc)->sc_dmat)-> _dmamap_sync)(((sc)->sc_dmat), ((sc)->sc_cddmamap), (__builtin_offsetof (struct pcn_control_data, pcd_txdescs[(__x)])), (sizeof(struct letmd) * (512 - __x)), ((0x01|0x04))); __n -= (512 - __x); __x = 0; } (*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat ), ((sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_txdescs[(__x)])), (sizeof(struct letmd) * __n), ((0x01| 0x04))); } while ( 0) |
| 975 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)do { int __x, __n; __x = (sc->sc_txnext); __n = (dmamap-> dm_nsegs); if ((__x + __n) > 512) { (*((sc)->sc_dmat)-> _dmamap_sync)(((sc)->sc_dmat), ((sc)->sc_cddmamap), (__builtin_offsetof (struct pcn_control_data, pcd_txdescs[(__x)])), (sizeof(struct letmd) * (512 - __x)), ((0x01|0x04))); __n -= (512 - __x); __x = 0; } (*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat ), ((sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_txdescs[(__x)])), (sizeof(struct letmd) * __n), ((0x01| 0x04))); } while ( 0); |
| 976 | |
| 977 | /* Kick the transmitter. */ |
| 978 | pcn_csr_write(sc, LE_CSR00x0000, LE_C0_INEA0x0040|LE_C0_TDMD0x0008); |
| 979 | |
| 980 | /* |
| 981 | * Store a pointer to the packet so we can free it later, |
| 982 | * and remember what txdirty will be once the packet is |
| 983 | * done. |
| 984 | */ |
| 985 | txs->txs_mbuf = m0; |
| 986 | txs->txs_firstdesc = sc->sc_txnext; |
| 987 | txs->txs_lastdesc = lasttx; |
| 988 | |
| 989 | /* Advance the tx pointer. */ |
| 990 | sc->sc_txfree -= dmamap->dm_nsegs; |
| 991 | sc->sc_txnext = nexttx; |
| 992 | |
| 993 | sc->sc_txsfree--; |
| 994 | sc->sc_txsnext = PCN_NEXTTXS(sc->sc_txsnext)(((sc->sc_txsnext) + 1) & (128 - 1)); |
| 995 | |
| 996 | #if NBPFILTER1 > 0 |
| 997 | /* Pass the packet to any BPF listeners. */ |
| 998 | if (ifp->if_bpf) |
| 999 | bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT(1 << 1)); |
| 1000 | #endif /* NBPFILTER > 0 */ |
| 1001 | } |
| 1002 | |
| 1003 | if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) { |
| 1004 | /* No more slots left; notify upper layer. */ |
| 1005 | ifq_set_oactive(&ifp->if_snd); |
| 1006 | } |
| 1007 | |
| 1008 | if (sc->sc_txfree != ofree) { |
| 1009 | /* Set a watchdog timer in case the chip flakes out. */ |
| 1010 | ifp->if_timer = 5; |
| 1011 | } |
| 1012 | } |
| 1013 | |
| 1014 | /* |
| 1015 | * pcn_watchdog: [ifnet interface function] |
| 1016 | * |
| 1017 | * Watchdog timer handler. |
| 1018 | */ |
| 1019 | void |
| 1020 | pcn_watchdog(struct ifnet *ifp) |
| 1021 | { |
| 1022 | struct pcn_softc *sc = ifp->if_softc; |
| 1023 | |
| 1024 | /* |
| 1025 | * Since we're not interrupting every packet, sweep |
| 1026 | * up before we report an error. |
| 1027 | */ |
| 1028 | pcn_txintr(sc); |
| 1029 | |
| 1030 | if (sc->sc_txfree != PCN_NTXDESC512) { |
| 1031 | printf("%s: device timeout (txfree %d txsfree %d)\n", |
| 1032 | sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree); |
| 1033 | ifp->if_oerrorsif_data.ifi_oerrors++; |
| 1034 | |
| 1035 | /* Reset the interface. */ |
| 1036 | (void) pcn_init(ifp); |
| 1037 | } |
| 1038 | |
| 1039 | /* Try to get more packets going. */ |
| 1040 | pcn_start(ifp); |
| 1041 | } |
| 1042 | |
| 1043 | /* |
| 1044 | * pcn_ioctl: [ifnet interface function] |
| 1045 | * |
| 1046 | * Handle control requests from the operator. |
| 1047 | */ |
| 1048 | int |
| 1049 | pcn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) |
| 1050 | { |
| 1051 | struct pcn_softc *sc = ifp->if_softc; |
| 1052 | struct ifreq *ifr = (struct ifreq *) data; |
| 1053 | int s, error = 0; |
| 1054 | |
| 1055 | s = splnet()splraise(0x7); |
| 1056 | |
| 1057 | switch (cmd) { |
| 1058 | case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((12))): |
| 1059 | ifp->if_flags |= IFF_UP0x1; |
| 1060 | if (!(ifp->if_flags & IFF_RUNNING0x40)) |
| 1061 | pcn_init(ifp); |
| 1062 | break; |
| 1063 | |
| 1064 | case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))): |
| 1065 | if (ifp->if_flags & IFF_UP0x1) { |
| 1066 | if (ifp->if_flags & IFF_RUNNING0x40) |
| 1067 | error = ENETRESET52; |
| 1068 | else |
| 1069 | pcn_init(ifp); |
| 1070 | } else { |
| 1071 | if (ifp->if_flags & IFF_RUNNING0x40) |
| 1072 | pcn_stop(ifp, 1); |
| 1073 | } |
| 1074 | break; |
| 1075 | |
| 1076 | case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((55))): |
| 1077 | case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifmediareq) & 0x1fff) << 16) | ((('i')) << 8) | ((56))): |
| 1078 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); |
| 1079 | break; |
| 1080 | |
| 1081 | default: |
| 1082 | error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); |
| 1083 | } |
| 1084 | |
| 1085 | if (error == ENETRESET52) { |
| 1086 | if (ifp->if_flags & IFF_RUNNING0x40) |
| 1087 | error = pcn_init(ifp); |
| 1088 | else |
| 1089 | error = 0; |
| 1090 | } |
| 1091 | |
| 1092 | splx(s)spllower(s); |
| 1093 | return (error); |
| 1094 | } |
| 1095 | |
| 1096 | /* |
| 1097 | * pcn_intr: |
| 1098 | * |
| 1099 | * Interrupt service routine. |
| 1100 | */ |
| 1101 | int |
| 1102 | pcn_intr(void *arg) |
| 1103 | { |
| 1104 | struct pcn_softc *sc = arg; |
| 1105 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; |
| 1106 | uint32_t csr0; |
| 1107 | int wantinit, handled = 0; |
| 1108 | |
| 1109 | for (wantinit = 0; wantinit == 0;) { |
| 1110 | csr0 = pcn_csr_read(sc, LE_CSR00x0000); |
| 1111 | if ((csr0 & LE_C0_INTR0x0080) == 0) |
| 1112 | break; |
| 1113 | |
| 1114 | /* ACK the bits and re-enable interrupts. */ |
| 1115 | pcn_csr_write(sc, LE_CSR00x0000, csr0 & |
| 1116 | (LE_C0_INEA0x0040|LE_C0_BABL0x4000|LE_C0_MISS0x1000|LE_C0_MERR0x0800|LE_C0_RINT0x0400| |
| 1117 | LE_C0_TINT0x0200|LE_C0_IDON0x0100)); |
| 1118 | |
| 1119 | handled = 1; |
| 1120 | |
| 1121 | if (csr0 & LE_C0_RINT0x0400) |
| 1122 | wantinit = pcn_rxintr(sc); |
| 1123 | |
| 1124 | if (csr0 & LE_C0_TINT0x0200) |
| 1125 | pcn_txintr(sc); |
| 1126 | |
| 1127 | if (csr0 & LE_C0_ERR0x8000) { |
| 1128 | if (csr0 & LE_C0_BABL0x4000) |
| 1129 | ifp->if_oerrorsif_data.ifi_oerrors++; |
| 1130 | if (csr0 & LE_C0_MISS0x1000) |
| 1131 | ifp->if_ierrorsif_data.ifi_ierrors++; |
| 1132 | if (csr0 & LE_C0_MERR0x0800) { |
| 1133 | printf("%s: memory error\n", |
| 1134 | sc->sc_dev.dv_xname); |
| 1135 | wantinit = 1; |
| 1136 | break; |
| 1137 | } |
| 1138 | } |
| 1139 | |
| 1140 | if ((csr0 & LE_C0_RXON0x0020) == 0) { |
| 1141 | printf("%s: receiver disabled\n", |
| 1142 | sc->sc_dev.dv_xname); |
| 1143 | ifp->if_ierrorsif_data.ifi_ierrors++; |
| 1144 | wantinit = 1; |
| 1145 | } |
| 1146 | |
| 1147 | if ((csr0 & LE_C0_TXON0x0010) == 0) { |
| 1148 | printf("%s: transmitter disabled\n", |
| 1149 | sc->sc_dev.dv_xname); |
| 1150 | ifp->if_oerrorsif_data.ifi_oerrors++; |
| 1151 | wantinit = 1; |
| 1152 | } |
| 1153 | } |
| 1154 | |
| 1155 | if (handled) { |
| 1156 | if (wantinit) |
| 1157 | pcn_init(ifp); |
| 1158 | |
| 1159 | /* Try to get more packets going. */ |
| 1160 | pcn_start(ifp); |
| 1161 | } |
| 1162 | |
| 1163 | return (handled); |
| 1164 | } |
| 1165 | |
| 1166 | /* |
| 1167 | * pcn_spnd: |
| 1168 | * |
| 1169 | * Suspend the chip. |
| 1170 | */ |
| 1171 | void |
| 1172 | pcn_spnd(struct pcn_softc *sc) |
| 1173 | { |
| 1174 | int i; |
| 1175 | |
| 1176 | pcn_csr_write(sc, LE_CSR50x0005, sc->sc_csr5 | LE_C5_SPND0x0001); |
| 1177 | |
| 1178 | for (i = 0; i < 10000; i++) { |
| 1179 | if (pcn_csr_read(sc, LE_CSR50x0005) & LE_C5_SPND0x0001) |
| 1180 | return; |
| 1181 | delay(5)(*delay_func)(5); |
| 1182 | } |
| 1183 | |
| 1184 | printf("%s: WARNING: chip failed to enter suspended state\n", |
| 1185 | sc->sc_dev.dv_xname); |
| 1186 | } |
| 1187 | |
| 1188 | /* |
| 1189 | * pcn_txintr: |
| 1190 | * |
| 1191 | * Helper; handle transmit interrupts. |
| 1192 | */ |
| 1193 | void |
| 1194 | pcn_txintr(struct pcn_softc *sc) |
| 1195 | { |
| 1196 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; |
| 1197 | struct pcn_txsoft *txs; |
| 1198 | uint32_t tmd1, tmd2, tmd; |
| 1199 | int i, j; |
| 1200 | |
| 1201 | ifq_clr_oactive(&ifp->if_snd); |
| 1202 | |
| 1203 | /* |
| 1204 | * Go through our Tx list and free mbufs for those |
| 1205 | * frames which have been transmitted. |
| 1206 | */ |
| 1207 | for (i = sc->sc_txsdirty; sc->sc_txsfree != PCN_TXQUEUELEN128; |
| 1208 | i = PCN_NEXTTXS(i)(((i) + 1) & (128 - 1)), sc->sc_txsfree++) { |
| 1209 | txs = &sc->sc_txsoft[i]; |
| 1210 | |
| 1211 | PCN_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,do { int __x, __n; __x = (txs->txs_firstdesc); __n = (txs-> txs_dmamap->dm_nsegs); if ((__x + __n) > 512) { (*((sc) ->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((sc)-> sc_cddmamap), (__builtin_offsetof(struct pcn_control_data, pcd_txdescs [(__x)])), (sizeof(struct letmd) * (512 - __x)), ((0x02|0x08) )); __n -= (512 - __x); __x = 0; } (*((sc)->sc_dmat)->_dmamap_sync )(((sc)->sc_dmat), ((sc)->sc_cddmamap), (__builtin_offsetof (struct pcn_control_data, pcd_txdescs[(__x)])), (sizeof(struct letmd) * __n), ((0x02|0x08))); } while ( 0) |
| 1212 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)do { int __x, __n; __x = (txs->txs_firstdesc); __n = (txs-> txs_dmamap->dm_nsegs); if ((__x + __n) > 512) { (*((sc) ->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((sc)-> sc_cddmamap), (__builtin_offsetof(struct pcn_control_data, pcd_txdescs [(__x)])), (sizeof(struct letmd) * (512 - __x)), ((0x02|0x08) )); __n -= (512 - __x); __x = 0; } (*((sc)->sc_dmat)->_dmamap_sync )(((sc)->sc_dmat), ((sc)->sc_cddmamap), (__builtin_offsetof (struct pcn_control_data, pcd_txdescs[(__x)])), (sizeof(struct letmd) * __n), ((0x02|0x08))); } while ( 0); |
| 1213 | |
| 1214 | tmd1 = letoh32(sc->sc_txdescs[txs->txs_lastdesc].tmd1)((__uint32_t)(sc->sc_control_data->pcd_txdescs[txs-> txs_lastdesc].tmd1)); |
| 1215 | if (tmd1 & LE_T1_OWN(1<<31)) |
| 1216 | break; |
| 1217 | |
| 1218 | /* |
| 1219 | * Slightly annoying -- we have to loop through the |
| 1220 | * descriptors we've used looking for ERR, since it |
| 1221 | * can appear on any descriptor in the chain. |
| 1222 | */ |
| 1223 | for (j = txs->txs_firstdesc;; j = PCN_NEXTTX(j)(((j) + 1) & (512 - 1))) { |
| 1224 | tmd = letoh32(sc->sc_txdescs[j].tmd1)((__uint32_t)(sc->sc_control_data->pcd_txdescs[j].tmd1) ); |
| 1225 | if (tmd & LE_T1_ERR(1<<30)) { |
| 1226 | ifp->if_oerrorsif_data.ifi_oerrors++; |
| 1227 | if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI33) |
| 1228 | tmd2 = letoh32(sc->sc_txdescs[j].tmd0)((__uint32_t)(sc->sc_control_data->pcd_txdescs[j].tmd0) ); |
| 1229 | else |
| 1230 | tmd2 = letoh32(sc->sc_txdescs[j].tmd2)((__uint32_t)(sc->sc_control_data->pcd_txdescs[j].tmd2) ); |
| 1231 | if (tmd2 & LE_T2_UFLO(1<<30)) { |
| 1232 | if (sc->sc_xmtsp < LE_C80_XMTSP_MAX3) { |
| 1233 | sc->sc_xmtsp++; |
| 1234 | printf("%s: transmit " |
| 1235 | "underrun; new threshold: " |
| 1236 | "%s\n", |
| 1237 | sc->sc_dev.dv_xname, |
| 1238 | sc->sc_xmtsp_desc[ |
| 1239 | sc->sc_xmtsp]); |
| 1240 | pcn_spnd(sc); |
| 1241 | pcn_csr_write(sc, LE_CSR800x0050, |
| 1242 | LE_C80_RCVFW(sc->sc_rcvfw)((sc->sc_rcvfw) << 12) | |
| 1243 | LE_C80_XMTSP(sc->sc_xmtsp)((sc->sc_xmtsp) << 10) | |
| 1244 | LE_C80_XMTFW(sc->sc_xmtfw)((sc->sc_xmtfw) << 8)); |
| 1245 | pcn_csr_write(sc, LE_CSR50x0005, |
| 1246 | sc->sc_csr5); |
| 1247 | } else { |
| 1248 | printf("%s: transmit " |
| 1249 | "underrun\n", |
| 1250 | sc->sc_dev.dv_xname); |
| 1251 | } |
| 1252 | } else if (tmd2 & LE_T2_BUFF(1<<31)) { |
| 1253 | printf("%s: transmit buffer error\n", |
| 1254 | sc->sc_dev.dv_xname); |
| 1255 | } |
| 1256 | if (tmd2 & LE_T2_LCOL(1<<28)) |
| 1257 | ifp->if_collisionsif_data.ifi_collisions++; |
| 1258 | if (tmd2 & LE_T2_RTRY(1<<26)) |
| 1259 | ifp->if_collisionsif_data.ifi_collisions += 16; |
| 1260 | goto next_packet; |
| 1261 | } |
| 1262 | if (j == txs->txs_lastdesc) |
| 1263 | break; |
| 1264 | } |
| 1265 | if (tmd1 & LE_T1_ONE(1<<27)) |
| 1266 | ifp->if_collisionsif_data.ifi_collisions++; |
| 1267 | else if (tmd & LE_T1_MORE(1<<28)) { |
| 1268 | /* Real number is unknown. */ |
| 1269 | ifp->if_collisionsif_data.ifi_collisions += 2; |
| 1270 | } |
| 1271 | next_packet: |
| 1272 | sc->sc_txfree += txs->txs_dmamap->dm_nsegs; |
| 1273 | bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txs-> txs_dmamap), (0), (txs->txs_dmamap->dm_mapsize), (0x08) ) |
| 1274 | 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txs-> txs_dmamap), (0), (txs->txs_dmamap->dm_mapsize), (0x08) ); |
| 1275 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txs ->txs_dmamap)); |
| 1276 | m_freem(txs->txs_mbuf); |
| 1277 | txs->txs_mbuf = NULL((void *)0); |
| 1278 | } |
| 1279 | |
| 1280 | /* Update the dirty transmit buffer pointer. */ |
| 1281 | sc->sc_txsdirty = i; |
| 1282 | |
| 1283 | /* |
| 1284 | * If there are no more pending transmissions, cancel the watchdog |
| 1285 | * timer. |
| 1286 | */ |
| 1287 | if (sc->sc_txsfree == PCN_TXQUEUELEN128) |
| 1288 | ifp->if_timer = 0; |
| 1289 | } |
| 1290 | |
| 1291 | /* |
| 1292 | * pcn_rxintr: |
| 1293 | * |
| 1294 | * Helper; handle receive interrupts. |
| 1295 | */ |
| 1296 | int |
| 1297 | pcn_rxintr(struct pcn_softc *sc) |
| 1298 | { |
| 1299 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; |
| 1300 | struct pcn_rxsoft *rxs; |
| 1301 | struct mbuf *m; |
| 1302 | struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 }; |
| 1303 | uint32_t rmd1; |
| 1304 | int i, len; |
| 1305 | int rv = 0; |
| 1306 | |
| 1307 | for (i = sc->sc_rxptr;; i = PCN_NEXTRX(i)(((i) + 1) & (128 - 1))) { |
| 1308 | rxs = &sc->sc_rxsoft[i]; |
| 1309 | |
| 1310 | PCN_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_rxdescs[((i))])), (sizeof(struct lermd)), ((0x02|0x08)) ); |
| 1311 | |
| 1312 | rmd1 = letoh32(sc->sc_rxdescs[i].rmd1)((__uint32_t)(sc->sc_control_data->pcd_rxdescs[i].rmd1) ); |
| 1313 | |
| 1314 | if (rmd1 & LE_R1_OWN(1<<31)) |
| 1315 | break; |
| 1316 | |
| 1317 | /* |
| 1318 | * Check for errors and make sure the packet fit into |
| 1319 | * a single buffer. We have structured this block of |
| 1320 | * code the way it is in order to compress it into |
| 1321 | * one test in the common case (no error). |
| 1322 | */ |
| 1323 | if (__predict_false((rmd1 & (LE_R1_STP|LE_R1_ENP|LE_R1_ERR)) !=__builtin_expect((((rmd1 & ((1<<25)|(1<<24)|( 1<<30))) != ((1<<25)|(1<<24))) != 0), 0) |
| 1324 | (LE_R1_STP|LE_R1_ENP))__builtin_expect((((rmd1 & ((1<<25)|(1<<24)|( 1<<30))) != ((1<<25)|(1<<24))) != 0), 0)) { |
| 1325 | /* Make sure the packet is in a single buffer. */ |
| 1326 | if ((rmd1 & (LE_R1_STP(1<<25)|LE_R1_ENP(1<<24))) != |
| 1327 | (LE_R1_STP(1<<25)|LE_R1_ENP(1<<24))) { |
| 1328 | printf("%s: packet spilled into next buffer\n", |
| 1329 | sc->sc_dev.dv_xname); |
| 1330 | rv = 1; /* pcn_intr() will re-init */ |
| 1331 | goto done; |
| 1332 | } |
| 1333 | |
| 1334 | /* |
| 1335 | * If the packet had an error, simple recycle the |
| 1336 | * buffer. |
| 1337 | */ |
| 1338 | if (rmd1 & LE_R1_ERR(1<<30)) { |
| 1339 | ifp->if_ierrorsif_data.ifi_ierrors++; |
| 1340 | /* |
| 1341 | * If we got an overflow error, chances |
| 1342 | * are there will be a CRC error. In |
| 1343 | * this case, just print the overflow |
| 1344 | * error, and skip the others. |
| 1345 | */ |
| 1346 | if (rmd1 & LE_R1_OFLO(1<<28)) |
| 1347 | printf("%s: overflow error\n", |
| 1348 | sc->sc_dev.dv_xname); |
| 1349 | else { |
| 1350 | #define PRINTIT(x, str) \ |
| 1351 | if (rmd1 & (x)) \ |
| 1352 | printf("%s: %s\n", \ |
| 1353 | sc->sc_dev.dv_xname, str); |
| 1354 | PRINTIT(LE_R1_FRAM(1<<29), "framing error"); |
| 1355 | PRINTIT(LE_R1_CRC(1<<27), "CRC error"); |
| 1356 | PRINTIT(LE_R1_BUFF(1<<26), "buffer error"); |
| 1357 | } |
| 1358 | #undef PRINTIT |
| 1359 | PCN_INIT_RXDESC(sc, i)do { struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(i)]; struct lermd *__rmd = &(sc)->sc_control_data->pcd_rxdescs [(i)]; struct mbuf *__m = __rxs->rxs_mbuf; __m->m_hdr.mh_data = __m->M_dat.MH.MH_dat.MH_ext.ext_buf + 2; if ((sc)->sc_swstyle == 3) { __rmd->rmd2 = ((__uint32_t)(__rxs->rxs_dmamap-> dm_segs[0].ds_addr + 2)); __rmd->rmd0 = 0; } else { __rmd-> rmd2 = 0; __rmd->rmd0 = ((__uint32_t)(__rxs->rxs_dmamap ->dm_segs[0].ds_addr + 2)); } __rmd->rmd1 = ((__uint32_t )((1<<31)|(0xf<<12)| ((~((1 << 11) - 2) + 1 ) & (0xfff)))); (*(((sc))->sc_dmat)->_dmamap_sync)( (((sc))->sc_dmat), (((sc))->sc_cddmamap), (__builtin_offsetof (struct pcn_control_data, pcd_rxdescs[(((i)))])), (sizeof(struct lermd)), ((0x01|0x04)));} while( 0); |
| 1360 | continue; |
| 1361 | } |
| 1362 | } |
| 1363 | |
| 1364 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs-> rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x02) ) |
| 1365 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs-> rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x02) ); |
| 1366 | |
| 1367 | /* |
| 1368 | * No errors; receive the packet. |
| 1369 | */ |
| 1370 | if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI33) |
| 1371 | len = letoh32(sc->sc_rxdescs[i].rmd0)((__uint32_t)(sc->sc_control_data->pcd_rxdescs[i].rmd0) ) & LE_R1_BCNT_MASK(0xfff); |
| 1372 | else |
| 1373 | len = letoh32(sc->sc_rxdescs[i].rmd2)((__uint32_t)(sc->sc_control_data->pcd_rxdescs[i].rmd2) ) & LE_R1_BCNT_MASK(0xfff); |
| 1374 | |
| 1375 | /* |
| 1376 | * The LANCE family includes the CRC with every packet; |
| 1377 | * trim it off here. |
| 1378 | */ |
| 1379 | len -= ETHER_CRC_LEN4; |
| 1380 | |
| 1381 | /* |
| 1382 | * If the packet is small enough to fit in a |
| 1383 | * single header mbuf, allocate one and copy |
| 1384 | * the data into it. This greatly reduces |
| 1385 | * memory consumption when we receive lots |
| 1386 | * of small packets. |
| 1387 | * |
| 1388 | * Otherwise, we add a new buffer to the receive |
| 1389 | * chain. If this fails, we drop the packet and |
| 1390 | * recycle the old buffer. |
| 1391 | */ |
| 1392 | if (pcn_copy_small != 0 && len <= (MHLEN((256 - sizeof(struct m_hdr)) - sizeof(struct pkthdr)) - 2)) { |
| 1393 | MGETHDR(m, M_DONTWAIT, MT_DATA)m = m_gethdr((0x0002), (1)); |
| 1394 | if (m == NULL((void *)0)) |
| 1395 | goto dropit; |
| 1396 | m->m_datam_hdr.mh_data += 2; |
| 1397 | memcpy(mtod(m, caddr_t),__builtin_memcpy((((caddr_t)((m)->m_hdr.mh_data))), (((caddr_t )((rxs->rxs_mbuf)->m_hdr.mh_data))), (len)) |
| 1398 | mtod(rxs->rxs_mbuf, caddr_t), len)__builtin_memcpy((((caddr_t)((m)->m_hdr.mh_data))), (((caddr_t )((rxs->rxs_mbuf)->m_hdr.mh_data))), (len)); |
| 1399 | PCN_INIT_RXDESC(sc, i)do { struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(i)]; struct lermd *__rmd = &(sc)->sc_control_data->pcd_rxdescs [(i)]; struct mbuf *__m = __rxs->rxs_mbuf; __m->m_hdr.mh_data = __m->M_dat.MH.MH_dat.MH_ext.ext_buf + 2; if ((sc)->sc_swstyle == 3) { __rmd->rmd2 = ((__uint32_t)(__rxs->rxs_dmamap-> dm_segs[0].ds_addr + 2)); __rmd->rmd0 = 0; } else { __rmd-> rmd2 = 0; __rmd->rmd0 = ((__uint32_t)(__rxs->rxs_dmamap ->dm_segs[0].ds_addr + 2)); } __rmd->rmd1 = ((__uint32_t )((1<<31)|(0xf<<12)| ((~((1 << 11) - 2) + 1 ) & (0xfff)))); (*(((sc))->sc_dmat)->_dmamap_sync)( (((sc))->sc_dmat), (((sc))->sc_cddmamap), (__builtin_offsetof (struct pcn_control_data, pcd_rxdescs[(((i)))])), (sizeof(struct lermd)), ((0x01|0x04)));} while( 0); |
| 1400 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs-> rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x01) ) |
| 1401 | rxs->rxs_dmamap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs-> rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x01) ) |
| 1402 | BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs-> rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x01) ); |
| 1403 | } else { |
| 1404 | m = rxs->rxs_mbuf; |
| 1405 | if (pcn_add_rxbuf(sc, i) != 0) { |
| 1406 | dropit: |
| 1407 | ifp->if_ierrorsif_data.ifi_ierrors++; |
| 1408 | PCN_INIT_RXDESC(sc, i)do { struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(i)]; struct lermd *__rmd = &(sc)->sc_control_data->pcd_rxdescs [(i)]; struct mbuf *__m = __rxs->rxs_mbuf; __m->m_hdr.mh_data = __m->M_dat.MH.MH_dat.MH_ext.ext_buf + 2; if ((sc)->sc_swstyle == 3) { __rmd->rmd2 = ((__uint32_t)(__rxs->rxs_dmamap-> dm_segs[0].ds_addr + 2)); __rmd->rmd0 = 0; } else { __rmd-> rmd2 = 0; __rmd->rmd0 = ((__uint32_t)(__rxs->rxs_dmamap ->dm_segs[0].ds_addr + 2)); } __rmd->rmd1 = ((__uint32_t )((1<<31)|(0xf<<12)| ((~((1 << 11) - 2) + 1 ) & (0xfff)))); (*(((sc))->sc_dmat)->_dmamap_sync)( (((sc))->sc_dmat), (((sc))->sc_cddmamap), (__builtin_offsetof (struct pcn_control_data, pcd_rxdescs[(((i)))])), (sizeof(struct lermd)), ((0x01|0x04)));} while( 0); |
| 1409 | bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs-> rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x01) ) |
| 1410 | rxs->rxs_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs-> rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x01) ) |
| 1411 | rxs->rxs_dmamap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs-> rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x01) ) |
| 1412 | BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs-> rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x01) ); |
| 1413 | continue; |
| 1414 | } |
| 1415 | } |
| 1416 | |
| 1417 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len; |
| 1418 | |
| 1419 | ml_enqueue(&ml, m); |
| 1420 | } |
| 1421 | |
| 1422 | /* Update the receive pointer. */ |
| 1423 | sc->sc_rxptr = i; |
| 1424 | done: |
| 1425 | if_input(ifp, &ml); |
| 1426 | return (rv); |
| 1427 | } |
| 1428 | |
| 1429 | /* |
| 1430 | * pcn_tick: |
| 1431 | * |
| 1432 | * One second timer, used to tick the MII. |
| 1433 | */ |
| 1434 | void |
| 1435 | pcn_tick(void *arg) |
| 1436 | { |
| 1437 | struct pcn_softc *sc = arg; |
| 1438 | int s; |
| 1439 | |
| 1440 | s = splnet()splraise(0x7); |
| 1441 | mii_tick(&sc->sc_mii); |
| 1442 | splx(s)spllower(s); |
| 1443 | |
| 1444 | timeout_add_sec(&sc->sc_tick_timeout, 1); |
| 1445 | } |
| 1446 | |
| 1447 | /* |
| 1448 | * pcn_reset: |
| 1449 | * |
| 1450 | * Perform a soft reset on the PCnet-PCI. |
| 1451 | */ |
| 1452 | void |
| 1453 | pcn_reset(struct pcn_softc *sc) |
| 1454 | { |
| 1455 | |
| 1456 | /* |
| 1457 | * The PCnet-PCI chip is reset by reading from the |
| 1458 | * RESET register. Note that while the NE2100 LANCE |
| 1459 | * boards require a write after the read, the PCnet-PCI |
| 1460 | * chips do not require this. |
| 1461 | * |
| 1462 | * Since we don't know if we're in 16-bit or 32-bit |
| 1463 | * mode right now, issue both (it's safe) in the |
| 1464 | * hopes that one will succeed. |
| 1465 | */ |
| 1466 | (void) bus_space_read_2(sc->sc_st, sc->sc_sh, PCN16_RESET)((sc->sc_st)->read_2((sc->sc_sh), (0x14))); |
| 1467 | (void) bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RESET)((sc->sc_st)->read_4((sc->sc_sh), (0x18))); |
| 1468 | |
| 1469 | /* Wait 1ms for it to finish. */ |
| 1470 | delay(1000)(*delay_func)(1000); |
| 1471 | |
| 1472 | /* |
| 1473 | * Select 32-bit I/O mode by issuing a 32-bit write to the |
| 1474 | * RDP. Since the RAP is 0 after a reset, writing a 0 |
| 1475 | * to RDP is safe (since it simply clears CSR0). |
| 1476 | */ |
| 1477 | bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, 0)((sc->sc_st)->write_4((sc->sc_sh), (0x10), (0))); |
| 1478 | } |
| 1479 | |
| 1480 | /* |
| 1481 | * pcn_init: [ifnet interface function] |
| 1482 | * |
| 1483 | * Initialize the interface. Must be called at splnet(). |
| 1484 | */ |
| 1485 | int |
| 1486 | pcn_init(struct ifnet *ifp) |
| 1487 | { |
| 1488 | struct pcn_softc *sc = ifp->if_softc; |
| 1489 | struct pcn_rxsoft *rxs; |
| 1490 | uint8_t *enaddr = LLADDR(ifp->if_sadl)((caddr_t)((ifp->if_sadl)->sdl_data + (ifp->if_sadl) ->sdl_nlen)); |
| 1491 | int i, error = 0; |
| 1492 | uint32_t reg; |
| 1493 | |
| 1494 | /* Cancel any pending I/O. */ |
| 1495 | pcn_stop(ifp, 0); |
| 1496 | |
| 1497 | /* Reset the chip to a known state. */ |
| 1498 | pcn_reset(sc); |
| 1499 | |
| 1500 | /* |
| 1501 | * On the Am79c970, select SSTYLE 2, and SSTYLE 3 on everything |
| 1502 | * else. |
| 1503 | * |
| 1504 | * XXX It'd be really nice to use SSTYLE 2 on all the chips, |
| 1505 | * because the structure layout is compatible with ILACC, |
| 1506 | * but the burst mode is only available in SSTYLE 3, and |
| 1507 | * burst mode should provide some performance enhancement. |
| 1508 | */ |
| 1509 | if (sc->sc_variant->pcv_chipid == PARTID_Am79c9700x2430) |
| 1510 | sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI22; |
| 1511 | else |
| 1512 | sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI33; |
| 1513 | pcn_bcr_write(sc, LE_BCR200x0014, sc->sc_swstyle); |
| 1514 | |
| 1515 | /* Initialize the transmit descriptor ring. */ |
| 1516 | memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs))__builtin_memset((sc->sc_control_data->pcd_txdescs), (0 ), (sizeof(sc->sc_control_data->pcd_txdescs))); |
| 1517 | PCN_CDTXSYNC(sc, 0, PCN_NTXDESC,do { int __x, __n; __x = (0); __n = (512); if ((__x + __n) > 512) { (*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat ), ((sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_txdescs[(__x)])), (sizeof(struct letmd) * (512 - __x)), ((0x01|0x04))); __n -= (512 - __x); __x = 0; } (*((sc)->sc_dmat )->_dmamap_sync)(((sc)->sc_dmat), ((sc)->sc_cddmamap ), (__builtin_offsetof(struct pcn_control_data, pcd_txdescs[( __x)])), (sizeof(struct letmd) * __n), ((0x01|0x04))); } while ( 0) |
| 1518 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)do { int __x, __n; __x = (0); __n = (512); if ((__x + __n) > 512) { (*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat ), ((sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_txdescs[(__x)])), (sizeof(struct letmd) * (512 - __x)), ((0x01|0x04))); __n -= (512 - __x); __x = 0; } (*((sc)->sc_dmat )->_dmamap_sync)(((sc)->sc_dmat), ((sc)->sc_cddmamap ), (__builtin_offsetof(struct pcn_control_data, pcd_txdescs[( __x)])), (sizeof(struct letmd) * __n), ((0x01|0x04))); } while ( 0); |
| 1519 | sc->sc_txfree = PCN_NTXDESC512; |
| 1520 | sc->sc_txnext = 0; |
| 1521 | |
| 1522 | /* Initialize the transmit job descriptors. */ |
| 1523 | for (i = 0; i < PCN_TXQUEUELEN128; i++) |
| 1524 | sc->sc_txsoft[i].txs_mbuf = NULL((void *)0); |
| 1525 | sc->sc_txsfree = PCN_TXQUEUELEN128; |
| 1526 | sc->sc_txsnext = 0; |
| 1527 | sc->sc_txsdirty = 0; |
| 1528 | |
| 1529 | /* |
| 1530 | * Initialize the receive descriptor and receive job |
| 1531 | * descriptor rings. |
| 1532 | */ |
| 1533 | for (i = 0; i < PCN_NRXDESC128; i++) { |
| 1534 | rxs = &sc->sc_rxsoft[i]; |
| 1535 | if (rxs->rxs_mbuf == NULL((void *)0)) { |
| 1536 | if ((error = pcn_add_rxbuf(sc, i)) != 0) { |
| 1537 | printf("%s: unable to allocate or map rx " |
| 1538 | "buffer %d, error = %d\n", |
| 1539 | sc->sc_dev.dv_xname, i, error); |
| 1540 | /* |
| 1541 | * XXX Should attempt to run with fewer receive |
| 1542 | * XXX buffers instead of just failing. |
| 1543 | */ |
| 1544 | pcn_rxdrain(sc); |
| 1545 | goto out; |
| 1546 | } |
| 1547 | } else |
| 1548 | PCN_INIT_RXDESC(sc, i)do { struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(i)]; struct lermd *__rmd = &(sc)->sc_control_data->pcd_rxdescs [(i)]; struct mbuf *__m = __rxs->rxs_mbuf; __m->m_hdr.mh_data = __m->M_dat.MH.MH_dat.MH_ext.ext_buf + 2; if ((sc)->sc_swstyle == 3) { __rmd->rmd2 = ((__uint32_t)(__rxs->rxs_dmamap-> dm_segs[0].ds_addr + 2)); __rmd->rmd0 = 0; } else { __rmd-> rmd2 = 0; __rmd->rmd0 = ((__uint32_t)(__rxs->rxs_dmamap ->dm_segs[0].ds_addr + 2)); } __rmd->rmd1 = ((__uint32_t )((1<<31)|(0xf<<12)| ((~((1 << 11) - 2) + 1 ) & (0xfff)))); (*(((sc))->sc_dmat)->_dmamap_sync)( (((sc))->sc_dmat), (((sc))->sc_cddmamap), (__builtin_offsetof (struct pcn_control_data, pcd_rxdescs[(((i)))])), (sizeof(struct lermd)), ((0x01|0x04)));} while( 0); |
| 1549 | } |
| 1550 | sc->sc_rxptr = 0; |
| 1551 | |
| 1552 | /* Initialize MODE for the initialization block. */ |
| 1553 | sc->sc_mode = 0; |
| 1554 | |
| 1555 | /* |
| 1556 | * If we have MII, simply select MII in the MODE register, |
| 1557 | * and clear ASEL. Otherwise, let ASEL stand (for now), |
| 1558 | * and leave PORTSEL alone (it is ignored with ASEL is set). |
| 1559 | */ |
| 1560 | if (sc->sc_flags & PCN_F_HAS_MII0x0001) { |
| 1561 | pcn_bcr_write(sc, LE_BCR20x0002, |
| 1562 | pcn_bcr_read(sc, LE_BCR20x0002) & ~LE_B2_ASEL0x0002); |
| 1563 | sc->sc_mode |= LE_C15_PORTSEL(PORTSEL_MII)((3) << 7); |
| 1564 | |
| 1565 | /* |
| 1566 | * Disable MII auto-negotiation. We handle that in |
| 1567 | * our own MII layer. |
| 1568 | */ |
| 1569 | pcn_bcr_write(sc, LE_BCR320x0020, |
| 1570 | pcn_bcr_read(sc, LE_BCR320x0020) | LE_B32_DANAS0x0080); |
| 1571 | } |
| 1572 | |
| 1573 | /* Set the multicast filter in the init block. */ |
| 1574 | pcn_set_filter(sc); |
| 1575 | |
| 1576 | /* |
| 1577 | * Set the Tx and Rx descriptor ring addresses in the init |
| 1578 | * block, the TLEN and RLEN other fields of the init block |
| 1579 | * MODE register. |
| 1580 | */ |
| 1581 | sc->sc_initblocksc_control_data->pcd_initblock.init_rdra = htole32(PCN_CDRXADDR(sc, 0))((__uint32_t)(((sc)->sc_cddmamap->dm_segs[0].ds_addr + __builtin_offsetof (struct pcn_control_data, pcd_rxdescs[((0))])))); |
| 1582 | sc->sc_initblocksc_control_data->pcd_initblock.init_tdra = htole32(PCN_CDTXADDR(sc, 0))((__uint32_t)(((sc)->sc_cddmamap->dm_segs[0].ds_addr + __builtin_offsetof (struct pcn_control_data, pcd_txdescs[((0))])))); |
| 1583 | sc->sc_initblocksc_control_data->pcd_initblock.init_mode = htole32(sc->sc_mode |((__uint32_t)(sc->sc_mode | ((ffs(512) - 1) << 28) | ((ffs(128) - 1) << 20))) |
| 1584 | ((ffs(PCN_NTXDESC) - 1) << 28) |((__uint32_t)(sc->sc_mode | ((ffs(512) - 1) << 28) | ((ffs(128) - 1) << 20))) |
| 1585 | ((ffs(PCN_NRXDESC) - 1) << 20))((__uint32_t)(sc->sc_mode | ((ffs(512) - 1) << 28) | ((ffs(128) - 1) << 20))); |
| 1586 | |
| 1587 | /* Set the station address in the init block. */ |
| 1588 | sc->sc_initblocksc_control_data->pcd_initblock.init_padr[0] = htole32(enaddr[0] |((__uint32_t)(enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) | (enaddr[3] << 24))) |
| 1589 | (enaddr[1] << 8) | (enaddr[2] << 16) | (enaddr[3] << 24))((__uint32_t)(enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) | (enaddr[3] << 24))); |
| 1590 | sc->sc_initblocksc_control_data->pcd_initblock.init_padr[1] = htole32(enaddr[4] |((__uint32_t)(enaddr[4] | (enaddr[5] << 8))) |
| 1591 | (enaddr[5] << 8))((__uint32_t)(enaddr[4] | (enaddr[5] << 8))); |
| 1592 | |
| 1593 | /* Initialize CSR3. */ |
| 1594 | pcn_csr_write(sc, LE_CSR30x0003, LE_C3_MISSM0x1000|LE_C3_IDONM0x0100|LE_C3_DXSUFLO0x0040); |
| 1595 | |
| 1596 | /* Initialize CSR4. */ |
| 1597 | pcn_csr_write(sc, LE_CSR40x0004, LE_C4_DMAPLUS0x4000|LE_C4_APAD_XMT0x0800| |
| 1598 | LE_C4_MFCOM0x0100|LE_C4_RCVCCOM0x0010|LE_C4_TXSTRTM0x0004); |
| 1599 | |
| 1600 | /* Initialize CSR5. */ |
| 1601 | sc->sc_csr5 = LE_C5_LTINTEN0x4000|LE_C5_SINTE0x0400; |
| 1602 | pcn_csr_write(sc, LE_CSR50x0005, sc->sc_csr5); |
| 1603 | |
| 1604 | /* |
| 1605 | * If we have an Am79c971 or greater, initialize CSR7. |
| 1606 | * |
| 1607 | * XXX Might be nice to use the MII auto-poll interrupt someday. |
| 1608 | */ |
| 1609 | switch (sc->sc_variant->pcv_chipid) { |
| 1610 | case PARTID_Am79c9700x2430: |
| 1611 | case PARTID_Am79c970A0x2621: |
| 1612 | /* Not available on these chips. */ |
| 1613 | break; |
| 1614 | |
| 1615 | default: |
| 1616 | pcn_csr_write(sc, LE_CSR70x0007, LE_C7_FASTSPNDE0x8000); |
| 1617 | break; |
| 1618 | } |
| 1619 | |
| 1620 | /* |
| 1621 | * On the Am79c970A and greater, initialize BCR18 to |
| 1622 | * enable burst mode. |
| 1623 | * |
| 1624 | * Also enable the "no underflow" option on the Am79c971 and |
| 1625 | * higher, which prevents the chip from generating transmit |
| 1626 | * underflows, yet sill provides decent performance. Note if |
| 1627 | * chip is not connected to external SRAM, then we still have |
| 1628 | * to handle underflow errors (the NOUFLO bit is ignored in |
| 1629 | * that case). |
| 1630 | */ |
| 1631 | reg = pcn_bcr_read(sc, LE_BCR180x0012); |
| 1632 | switch (sc->sc_variant->pcv_chipid) { |
| 1633 | case PARTID_Am79c9700x2430: |
| 1634 | break; |
| 1635 | |
| 1636 | case PARTID_Am79c970A0x2621: |
| 1637 | reg |= LE_B18_BREADE0x0040|LE_B18_BWRITE0x0020; |
| 1638 | break; |
| 1639 | |
| 1640 | default: |
| 1641 | reg |= LE_B18_BREADE0x0040|LE_B18_BWRITE0x0020|LE_B18_NOUFLO0x0800; |
| 1642 | break; |
| 1643 | } |
| 1644 | pcn_bcr_write(sc, LE_BCR180x0012, reg); |
| 1645 | |
| 1646 | /* |
| 1647 | * Initialize CSR80 (FIFO thresholds for Tx and Rx). |
| 1648 | */ |
| 1649 | pcn_csr_write(sc, LE_CSR800x0050, LE_C80_RCVFW(sc->sc_rcvfw)((sc->sc_rcvfw) << 12) | |
| 1650 | LE_C80_XMTSP(sc->sc_xmtsp)((sc->sc_xmtsp) << 10) | LE_C80_XMTFW(sc->sc_xmtfw)((sc->sc_xmtfw) << 8)); |
| 1651 | |
| 1652 | /* |
| 1653 | * Send the init block to the chip, and wait for it |
| 1654 | * to be processed. |
| 1655 | */ |
| 1656 | PCN_CDINITSYNC(sc, BUS_DMASYNC_PREWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_initblock)), (sizeof(struct leinit)), ((0x04))); |
| 1657 | pcn_csr_write(sc, LE_CSR10x0001, PCN_CDINITADDR(sc)((sc)->sc_cddmamap->dm_segs[0].ds_addr + __builtin_offsetof (struct pcn_control_data, pcd_initblock)) & 0xffff); |
| 1658 | pcn_csr_write(sc, LE_CSR20x0002, (PCN_CDINITADDR(sc)((sc)->sc_cddmamap->dm_segs[0].ds_addr + __builtin_offsetof (struct pcn_control_data, pcd_initblock)) >> 16) & 0xffff); |
| 1659 | pcn_csr_write(sc, LE_CSR00x0000, LE_C0_INIT0x0001); |
| 1660 | delay(100)(*delay_func)(100); |
| 1661 | for (i = 0; i < 10000; i++) { |
| 1662 | if (pcn_csr_read(sc, LE_CSR00x0000) & LE_C0_IDON0x0100) |
| 1663 | break; |
| 1664 | delay(10)(*delay_func)(10); |
| 1665 | } |
| 1666 | PCN_CDINITSYNC(sc, BUS_DMASYNC_POSTWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), (( sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data , pcd_initblock)), (sizeof(struct leinit)), ((0x08))); |
| 1667 | if (i == 10000) { |
| 1668 | printf("%s: timeout processing init block\n", |
| 1669 | sc->sc_dev.dv_xname); |
| 1670 | error = EIO5; |
| 1671 | goto out; |
| 1672 | } |
| 1673 | |
| 1674 | /* Set the media. */ |
| 1675 | (void) (*sc->sc_mii.mii_media.ifm_change)(ifp); |
| 1676 | |
| 1677 | /* Enable interrupts and external activity (and ACK IDON). */ |
| 1678 | pcn_csr_write(sc, LE_CSR00x0000, LE_C0_INEA0x0040|LE_C0_STRT0x0002|LE_C0_IDON0x0100); |
| 1679 | |
| 1680 | if (sc->sc_flags & PCN_F_HAS_MII0x0001) { |
| 1681 | /* Start the one second MII clock. */ |
| 1682 | timeout_add_sec(&sc->sc_tick_timeout, 1); |
| 1683 | } |
| 1684 | |
| 1685 | /* ...all done! */ |
| 1686 | ifp->if_flags |= IFF_RUNNING0x40; |
| 1687 | ifq_clr_oactive(&ifp->if_snd); |
| 1688 | |
| 1689 | out: |
| 1690 | if (error) |
| 1691 | printf("%s: interface not running\n", sc->sc_dev.dv_xname); |
| 1692 | return (error); |
| 1693 | } |
| 1694 | |
| 1695 | /* |
| 1696 | * pcn_rxdrain: |
| 1697 | * |
| 1698 | * Drain the receive queue. |
| 1699 | */ |
| 1700 | void |
| 1701 | pcn_rxdrain(struct pcn_softc *sc) |
| 1702 | { |
| 1703 | struct pcn_rxsoft *rxs; |
| 1704 | int i; |
| 1705 | |
| 1706 | for (i = 0; i < PCN_NRXDESC128; i++) { |
| 1707 | rxs = &sc->sc_rxsoft[i]; |
| 1708 | if (rxs->rxs_mbuf != NULL((void *)0)) { |
| 1709 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (rxs ->rxs_dmamap)); |
| 1710 | m_freem(rxs->rxs_mbuf); |
| 1711 | rxs->rxs_mbuf = NULL((void *)0); |
| 1712 | } |
| 1713 | } |
| 1714 | } |
| 1715 | |
| 1716 | /* |
| 1717 | * pcn_stop: [ifnet interface function] |
| 1718 | * |
| 1719 | * Stop transmission on the interface. |
| 1720 | */ |
| 1721 | void |
| 1722 | pcn_stop(struct ifnet *ifp, int disable) |
| 1723 | { |
| 1724 | struct pcn_softc *sc = ifp->if_softc; |
| 1725 | struct pcn_txsoft *txs; |
| 1726 | int i; |
| 1727 | |
| 1728 | if (sc->sc_flags & PCN_F_HAS_MII0x0001) { |
| 1729 | /* Stop the one second clock. */ |
| 1730 | timeout_del(&sc->sc_tick_timeout); |
| 1731 | |
| 1732 | /* Down the MII. */ |
| 1733 | mii_down(&sc->sc_mii); |
| 1734 | } |
| 1735 | |
| 1736 | /* Mark the interface as down and cancel the watchdog timer. */ |
| 1737 | ifp->if_flags &= ~IFF_RUNNING0x40; |
| 1738 | ifq_clr_oactive(&ifp->if_snd); |
| 1739 | ifp->if_timer = 0; |
| 1740 | |
| 1741 | /* Stop the chip. */ |
| 1742 | pcn_csr_write(sc, LE_CSR00x0000, LE_C0_STOP0x0004); |
| 1743 | |
| 1744 | /* Release any queued transmit buffers. */ |
| 1745 | for (i = 0; i < PCN_TXQUEUELEN128; i++) { |
| 1746 | txs = &sc->sc_txsoft[i]; |
| 1747 | if (txs->txs_mbuf != NULL((void *)0)) { |
| 1748 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txs ->txs_dmamap)); |
| 1749 | m_freem(txs->txs_mbuf); |
| 1750 | txs->txs_mbuf = NULL((void *)0); |
| 1751 | } |
| 1752 | } |
| 1753 | |
| 1754 | if (disable) |
| 1755 | pcn_rxdrain(sc); |
| 1756 | } |
| 1757 | |
| 1758 | /* |
| 1759 | * pcn_add_rxbuf: |
| 1760 | * |
| 1761 | * Add a receive buffer to the indicated descriptor. |
| 1762 | */ |
| 1763 | int |
| 1764 | pcn_add_rxbuf(struct pcn_softc *sc, int idx) |
| 1765 | { |
| 1766 | struct pcn_rxsoft *rxs = &sc->sc_rxsoft[idx]; |
| 1767 | struct mbuf *m; |
| 1768 | int error; |
| 1769 | |
| 1770 | MGETHDR(m, M_DONTWAIT, MT_DATA)m = m_gethdr((0x0002), (1)); |
| 1771 | if (m == NULL((void *)0)) |
| 1772 | return (ENOBUFS55); |
| 1773 | |
| 1774 | MCLGET(m, M_DONTWAIT)(void) m_clget((m), (0x0002), (1 << 11)); |
| 1775 | if ((m->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0) { |
| 1776 | m_freem(m); |
| 1777 | return (ENOBUFS55); |
| 1778 | } |
| 1779 | |
| 1780 | if (rxs->rxs_mbuf != NULL((void *)0)) |
| 1781 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (rxs ->rxs_dmamap)); |
| 1782 | |
| 1783 | rxs->rxs_mbuf = m; |
| 1784 | |
| 1785 | error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (rxs-> rxs_dmamap), (m->M_dat.MH.MH_dat.MH_ext.ext_buf), (m->M_dat .MH.MH_dat.MH_ext.ext_size), (((void *)0)), (0x0200|0x0001)) |
| 1786 | m->m_ext.ext_buf, m->m_ext.ext_size, NULL,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (rxs-> rxs_dmamap), (m->M_dat.MH.MH_dat.MH_ext.ext_buf), (m->M_dat .MH.MH_dat.MH_ext.ext_size), (((void *)0)), (0x0200|0x0001)) |
| 1787 | BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (rxs-> rxs_dmamap), (m->M_dat.MH.MH_dat.MH_ext.ext_buf), (m->M_dat .MH.MH_dat.MH_ext.ext_size), (((void *)0)), (0x0200|0x0001)); |
| 1788 | if (error) { |
| 1789 | printf("%s: can't load rx DMA map %d, error = %d\n", |
| 1790 | sc->sc_dev.dv_xname, idx, error); |
| 1791 | panic("pcn_add_rxbuf"); |
| 1792 | } |
| 1793 | |
| 1794 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs-> rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x01) ) |
| 1795 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs-> rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x01) ); |
| 1796 | |
| 1797 | PCN_INIT_RXDESC(sc, idx)do { struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(idx) ]; struct lermd *__rmd = &(sc)->sc_control_data->pcd_rxdescs [(idx)]; struct mbuf *__m = __rxs->rxs_mbuf; __m->m_hdr .mh_data = __m->M_dat.MH.MH_dat.MH_ext.ext_buf + 2; if ((sc )->sc_swstyle == 3) { __rmd->rmd2 = ((__uint32_t)(__rxs ->rxs_dmamap->dm_segs[0].ds_addr + 2)); __rmd->rmd0 = 0; } else { __rmd->rmd2 = 0; __rmd->rmd0 = ((__uint32_t )(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2)); } __rmd-> rmd1 = ((__uint32_t)((1<<31)|(0xf<<12)| ((~((1 << 11) - 2) + 1) & (0xfff)))); (*(((sc))->sc_dmat)->_dmamap_sync )((((sc))->sc_dmat), (((sc))->sc_cddmamap), (__builtin_offsetof (struct pcn_control_data, pcd_rxdescs[(((idx)))])), (sizeof(struct lermd)), ((0x01|0x04)));} while( 0); |
| 1798 | |
| 1799 | return (0); |
| 1800 | } |
| 1801 | |
| 1802 | /* |
| 1803 | * pcn_set_filter: |
| 1804 | * |
| 1805 | * Set up the receive filter. |
| 1806 | */ |
| 1807 | void |
| 1808 | pcn_set_filter(struct pcn_softc *sc) |
| 1809 | { |
| 1810 | struct arpcom *ac = &sc->sc_arpcom; |
| 1811 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; |
| 1812 | struct ether_multi *enm; |
| 1813 | struct ether_multistep step; |
| 1814 | uint32_t crc; |
| 1815 | |
| 1816 | ifp->if_flags &= ~IFF_ALLMULTI0x200; |
| 1817 | |
| 1818 | if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0) { |
| 1819 | ifp->if_flags |= IFF_ALLMULTI0x200; |
| 1820 | if (ifp->if_flags & IFF_PROMISC0x100) |
| 1821 | sc->sc_mode |= LE_C15_PROM0x8000; |
| 1822 | sc->sc_initblocksc_control_data->pcd_initblock.init_ladrf[0] = |
| 1823 | sc->sc_initblocksc_control_data->pcd_initblock.init_ladrf[1] = |
| 1824 | sc->sc_initblocksc_control_data->pcd_initblock.init_ladrf[2] = |
| 1825 | sc->sc_initblocksc_control_data->pcd_initblock.init_ladrf[3] = 0xffff; |
| 1826 | } else { |
| 1827 | sc->sc_initblocksc_control_data->pcd_initblock.init_ladrf[0] = |
| 1828 | sc->sc_initblocksc_control_data->pcd_initblock.init_ladrf[1] = |
| 1829 | sc->sc_initblocksc_control_data->pcd_initblock.init_ladrf[2] = |
| 1830 | sc->sc_initblocksc_control_data->pcd_initblock.init_ladrf[3] = 0; |
| 1831 | |
| 1832 | /* |
| 1833 | * Set up the multicast address filter by passing all multicast |
| 1834 | * addresses through a CRC generator, and then using the high |
| 1835 | * order 6 bits as an index into the 64-bit logical address |
| 1836 | * filter. The high order bits select the word, while the rest |
| 1837 | * of the bits select the bit within the word. |
| 1838 | */ |
| 1839 | ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first ); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step )).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while ( 0); |
| 1840 | while (enm != NULL((void *)0)) { |
| 1841 | crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN6); |
| 1842 | |
| 1843 | /* Just want the 6 most significant bits. */ |
| 1844 | crc >>= 26; |
| 1845 | |
| 1846 | /* Set the corresponding bit in the filter. */ |
| 1847 | sc->sc_initblocksc_control_data->pcd_initblock.init_ladrf[crc >> 4] |= |
| 1848 | htole16(1 << (crc & 0xf))((__uint16_t)(1 << (crc & 0xf))); |
| 1849 | |
| 1850 | ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm = (((enm))->enm_list.le_next); } while ( 0); |
| 1851 | } |
| 1852 | } |
| 1853 | } |
| 1854 | |
| 1855 | /* |
| 1856 | * pcn_79c970_mediainit: |
| 1857 | * |
| 1858 | * Initialize media for the Am79c970. |
| 1859 | */ |
| 1860 | void |
| 1861 | pcn_79c970_mediainit(struct pcn_softc *sc) |
| 1862 | { |
| 1863 | ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK0xff00000000000000ULL, pcn_79c970_mediachange, |
| 1864 | pcn_79c970_mediastatus); |
| 1865 | |
| 1866 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_10_55, |
| 1867 | PORTSEL_AUI0, NULL((void *)0)); |
| 1868 | if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A0x2621) |
| 1869 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_10_55|IFM_FDX0x0000010000000000ULL, |
| 1870 | PORTSEL_AUI0, NULL((void *)0)); |
| 1871 | |
| 1872 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_10_T3, |
| 1873 | PORTSEL_10T1, NULL((void *)0)); |
| 1874 | if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A0x2621) |
| 1875 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_10_T3|IFM_FDX0x0000010000000000ULL, |
| 1876 | PORTSEL_10T1, NULL((void *)0)); |
| 1877 | |
| 1878 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL, |
| 1879 | 0, NULL((void *)0)); |
| 1880 | if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A0x2621) |
| 1881 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL|IFM_FDX0x0000010000000000ULL, |
| 1882 | 0, NULL((void *)0)); |
| 1883 | |
| 1884 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL); |
| 1885 | } |
| 1886 | |
| 1887 | /* |
| 1888 | * pcn_79c970_mediastatus: [ifmedia interface function] |
| 1889 | * |
| 1890 | * Get the current interface media status (Am79c970 version). |
| 1891 | */ |
| 1892 | void |
| 1893 | pcn_79c970_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) |
| 1894 | { |
| 1895 | struct pcn_softc *sc = ifp->if_softc; |
| 1896 | |
| 1897 | /* |
| 1898 | * The currently selected media is always the active media. |
| 1899 | * Note: We have no way to determine what media the AUTO |
| 1900 | * process picked. |
| 1901 | */ |
| 1902 | ifmr->ifm_active = sc->sc_mii.mii_media.ifm_media; |
| 1903 | } |
| 1904 | |
| 1905 | /* |
| 1906 | * pcn_79c970_mediachange: [ifmedia interface function] |
| 1907 | * |
| 1908 | * Set hardware to newly-selected media (Am79c970 version). |
| 1909 | */ |
| 1910 | int |
| 1911 | pcn_79c970_mediachange(struct ifnet *ifp) |
| 1912 | { |
| 1913 | struct pcn_softc *sc = ifp->if_softc; |
| 1914 | uint32_t reg; |
| 1915 | |
| 1916 | if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media)((sc->sc_mii.mii_media.ifm_media) & 0x00000000000000ffULL ) == IFM_AUTO0ULL) { |
| 1917 | /* |
| 1918 | * CSR15:PORTSEL doesn't matter. Just set BCR2:ASEL. |
| 1919 | */ |
| 1920 | reg = pcn_bcr_read(sc, LE_BCR20x0002); |
| 1921 | reg |= LE_B2_ASEL0x0002; |
| 1922 | pcn_bcr_write(sc, LE_BCR20x0002, reg); |
| 1923 | } else { |
| 1924 | /* |
| 1925 | * Clear BCR2:ASEL and set the new CSR15:PORTSEL value. |
| 1926 | */ |
| 1927 | reg = pcn_bcr_read(sc, LE_BCR20x0002); |
| 1928 | reg &= ~LE_B2_ASEL0x0002; |
| 1929 | pcn_bcr_write(sc, LE_BCR20x0002, reg); |
| 1930 | |
| 1931 | reg = pcn_csr_read(sc, LE_CSR150x000f); |
| 1932 | reg = (reg & ~LE_C15_PORTSEL(PORTSEL_MASK)((3) << 7)) | |
| 1933 | LE_C15_PORTSEL(sc->sc_mii.mii_media.ifm_cur->ifm_data)((sc->sc_mii.mii_media.ifm_cur->ifm_data) << 7); |
| 1934 | pcn_csr_write(sc, LE_CSR150x000f, reg); |
| 1935 | } |
| 1936 | |
| 1937 | if ((sc->sc_mii.mii_media.ifm_media & IFM_FDX0x0000010000000000ULL) != 0) { |
| 1938 | reg = LE_B9_FDEN0x0001; |
| 1939 | if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media)((sc->sc_mii.mii_media.ifm_media) & 0x00000000000000ffULL ) == IFM_10_55) |
| 1940 | reg |= LE_B9_AUIFD0x0002; |
| 1941 | pcn_bcr_write(sc, LE_BCR90x0009, reg); |
| 1942 | } else |
| 1943 | pcn_bcr_write(sc, LE_BCR90x0009, 0); |
| 1944 | |
| 1945 | return (0); |
| 1946 | } |
| 1947 | |
| 1948 | /* |
| 1949 | * pcn_79c971_mediainit: |
| 1950 | * |
| 1951 | * Initialize media for the Am79c971. |
| 1952 | */ |
| 1953 | void |
| 1954 | pcn_79c971_mediainit(struct pcn_softc *sc) |
| 1955 | { |
| 1956 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; |
| 1957 | |
| 1958 | /* We have MII. */ |
| 1959 | sc->sc_flags |= PCN_F_HAS_MII0x0001; |
| 1960 | |
| 1961 | /* |
| 1962 | * The built-in 10BASE-T interface is mapped to the MII |
| 1963 | * on the PCNet-FAST. Unfortunately, there's no EEPROM |
| 1964 | * word that tells us which PHY to use. |
| 1965 | * This driver used to ignore all but the first PHY to |
| 1966 | * answer, but this code was removed to support multiple |
| 1967 | * external PHYs. As the default instance will be the first |
| 1968 | * one to answer, no harm is done by letting the possibly |
| 1969 | * non-connected internal PHY show up. |
| 1970 | */ |
| 1971 | |
| 1972 | /* Initialize our media structures and probe the MII. */ |
| 1973 | sc->sc_mii.mii_ifp = ifp; |
| 1974 | sc->sc_mii.mii_readreg = pcn_mii_readreg; |
| 1975 | sc->sc_mii.mii_writereg = pcn_mii_writereg; |
| 1976 | sc->sc_mii.mii_statchg = pcn_mii_statchg; |
| 1977 | ifmedia_init(&sc->sc_mii.mii_media, 0, pcn_79c971_mediachange, |
| 1978 | pcn_79c971_mediastatus); |
| 1979 | |
| 1980 | mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY-1, |
| 1981 | MII_OFFSET_ANY-1, 0); |
| 1982 | if (LIST_FIRST(&sc->sc_mii.mii_phys)((&sc->sc_mii.mii_phys)->lh_first) == NULL((void *)0)) { |
| 1983 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_NONE2ULL, 0, NULL((void *)0)); |
| 1984 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_NONE2ULL); |
| 1985 | } else |
| 1986 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL); |
| 1987 | } |
| 1988 | |
| 1989 | /* |
| 1990 | * pcn_79c971_mediastatus: [ifmedia interface function] |
| 1991 | * |
| 1992 | * Get the current interface media status (Am79c971 version). |
| 1993 | */ |
| 1994 | void |
| 1995 | pcn_79c971_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) |
| 1996 | { |
| 1997 | struct pcn_softc *sc = ifp->if_softc; |
| 1998 | |
| 1999 | mii_pollstat(&sc->sc_mii); |
| 2000 | ifmr->ifm_status = sc->sc_mii.mii_media_status; |
| 2001 | ifmr->ifm_active = sc->sc_mii.mii_media_active; |
| 2002 | } |
| 2003 | |
| 2004 | /* |
| 2005 | * pcn_79c971_mediachange: [ifmedia interface function] |
| 2006 | * |
| 2007 | * Set hardware to newly-selected media (Am79c971 version). |
| 2008 | */ |
| 2009 | int |
| 2010 | pcn_79c971_mediachange(struct ifnet *ifp) |
| 2011 | { |
| 2012 | struct pcn_softc *sc = ifp->if_softc; |
| 2013 | |
| 2014 | if (ifp->if_flags & IFF_UP0x1) |
| 2015 | mii_mediachg(&sc->sc_mii); |
| 2016 | return (0); |
| 2017 | } |
| 2018 | |
| 2019 | /* |
| 2020 | * pcn_mii_readreg: [mii interface function] |
| 2021 | * |
| 2022 | * Read a PHY register on the MII. |
| 2023 | */ |
| 2024 | int |
| 2025 | pcn_mii_readreg(struct device *self, int phy, int reg) |
| 2026 | { |
| 2027 | struct pcn_softc *sc = (void *) self; |
| 2028 | uint32_t rv; |
| 2029 | |
| 2030 | pcn_bcr_write(sc, LE_BCR330x0021, reg | (phy << PHYAD_SHIFT5)); |
| 2031 | rv = pcn_bcr_read(sc, LE_BCR340x0022) & LE_B34_MIIMD0xffff; |
| 2032 | if (rv == 0xffff) |
| 2033 | return (0); |
| 2034 | |
| 2035 | return (rv); |
| 2036 | } |
| 2037 | |
| 2038 | /* |
| 2039 | * pcn_mii_writereg: [mii interface function] |
| 2040 | * |
| 2041 | * Write a PHY register on the MII. |
| 2042 | */ |
| 2043 | void |
| 2044 | pcn_mii_writereg(struct device *self, int phy, int reg, int val) |
| 2045 | { |
| 2046 | struct pcn_softc *sc = (void *) self; |
| 2047 | |
| 2048 | pcn_bcr_write(sc, LE_BCR330x0021, reg | (phy << PHYAD_SHIFT5)); |
| 2049 | pcn_bcr_write(sc, LE_BCR340x0022, val); |
| 2050 | } |
| 2051 | |
| 2052 | /* |
| 2053 | * pcn_mii_statchg: [mii interface function] |
| 2054 | * |
| 2055 | * Callback from MII layer when media changes. |
| 2056 | */ |
| 2057 | void |
| 2058 | pcn_mii_statchg(struct device *self) |
| 2059 | { |
| 2060 | struct pcn_softc *sc = (void *) self; |
| 2061 | |
| 2062 | if ((sc->sc_mii.mii_media_active & IFM_FDX0x0000010000000000ULL) != 0) |
| 2063 | pcn_bcr_write(sc, LE_BCR90x0009, LE_B9_FDEN0x0001); |
| 2064 | else |
| 2065 | pcn_bcr_write(sc, LE_BCR90x0009, 0); |
| 2066 | } |