File: | dev/ic/gem.c |
Warning: | line 1777, column 3 The right operand of '|' is a garbage value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: gem.c,v 1.126 2020/12/12 11:48:52 jan Exp $ */ | |||
2 | /* $NetBSD: gem.c,v 1.1 2001/09/16 00:11:43 eeh Exp $ */ | |||
3 | ||||
4 | /* | |||
5 | * | |||
6 | * Copyright (C) 2001 Eduardo Horvath. | |||
7 | * All rights reserved. | |||
8 | * | |||
9 | * | |||
10 | * Redistribution and use in source and binary forms, with or without | |||
11 | * modification, are permitted provided that the following conditions | |||
12 | * are met: | |||
13 | * 1. Redistributions of source code must retain the above copyright | |||
14 | * notice, this list of conditions and the following disclaimer. | |||
15 | * 2. Redistributions in binary form must reproduce the above copyright | |||
16 | * notice, this list of conditions and the following disclaimer in the | |||
17 | * documentation and/or other materials provided with the distribution. | |||
18 | * | |||
19 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND | |||
20 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||
21 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||
22 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE | |||
23 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||
24 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |||
25 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |||
26 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |||
27 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |||
28 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |||
29 | * SUCH DAMAGE. | |||
30 | * | |||
31 | */ | |||
32 | ||||
33 | /* | |||
34 | * Driver for Sun GEM ethernet controllers. | |||
35 | */ | |||
36 | ||||
37 | #include "bpfilter.h" | |||
38 | ||||
39 | #include <sys/param.h> | |||
40 | #include <sys/systm.h> | |||
41 | #include <sys/timeout.h> | |||
42 | #include <sys/mbuf.h> | |||
43 | #include <sys/syslog.h> | |||
44 | #include <sys/malloc.h> | |||
45 | #include <sys/kernel.h> | |||
46 | #include <sys/socket.h> | |||
47 | #include <sys/ioctl.h> | |||
48 | #include <sys/errno.h> | |||
49 | #include <sys/device.h> | |||
50 | #include <sys/endian.h> | |||
51 | #include <sys/atomic.h> | |||
52 | ||||
53 | #include <net/if.h> | |||
54 | #include <net/if_media.h> | |||
55 | ||||
56 | #include <netinet/in.h> | |||
57 | #include <netinet/if_ether.h> | |||
58 | ||||
59 | #if NBPFILTER1 > 0 | |||
60 | #include <net/bpf.h> | |||
61 | #endif | |||
62 | ||||
63 | #include <machine/bus.h> | |||
64 | #include <machine/intr.h> | |||
65 | ||||
66 | #include <dev/mii/mii.h> | |||
67 | #include <dev/mii/miivar.h> | |||
68 | ||||
69 | #include <dev/ic/gemreg.h> | |||
70 | #include <dev/ic/gemvar.h> | |||
71 | ||||
72 | #define TRIES10000 10000 | |||
73 | ||||
74 | struct cfdriver gem_cd = { | |||
75 | NULL((void *)0), "gem", DV_IFNET | |||
76 | }; | |||
77 | ||||
78 | void gem_start(struct ifqueue *); | |||
79 | void gem_stop(struct ifnet *, int); | |||
80 | int gem_ioctl(struct ifnet *, u_long, caddr_t); | |||
81 | void gem_tick(void *); | |||
82 | void gem_watchdog(struct ifnet *); | |||
83 | int gem_init(struct ifnet *); | |||
84 | void gem_init_regs(struct gem_softc *); | |||
85 | int gem_ringsize(int); | |||
86 | int gem_meminit(struct gem_softc *); | |||
87 | void gem_mifinit(struct gem_softc *); | |||
88 | int gem_bitwait(struct gem_softc *, bus_space_handle_t, int, | |||
89 | u_int32_t, u_int32_t); | |||
90 | void gem_reset(struct gem_softc *); | |||
91 | int gem_reset_rx(struct gem_softc *); | |||
92 | int gem_reset_tx(struct gem_softc *); | |||
93 | int gem_disable_rx(struct gem_softc *); | |||
94 | int gem_disable_tx(struct gem_softc *); | |||
95 | void gem_rx_watchdog(void *); | |||
96 | void gem_rxdrain(struct gem_softc *); | |||
97 | void gem_fill_rx_ring(struct gem_softc *); | |||
98 | int gem_add_rxbuf(struct gem_softc *, int idx); | |||
99 | int gem_load_mbuf(struct gem_softc *, struct gem_sxd *, | |||
100 | struct mbuf *); | |||
101 | void gem_iff(struct gem_softc *); | |||
102 | ||||
103 | /* MII methods & callbacks */ | |||
104 | int gem_mii_readreg(struct device *, int, int); | |||
105 | void gem_mii_writereg(struct device *, int, int, int); | |||
106 | void gem_mii_statchg(struct device *); | |||
107 | int gem_pcs_readreg(struct device *, int, int); | |||
108 | void gem_pcs_writereg(struct device *, int, int, int); | |||
109 | ||||
110 | int gem_mediachange(struct ifnet *); | |||
111 | void gem_mediastatus(struct ifnet *, struct ifmediareq *); | |||
112 | ||||
113 | int gem_eint(struct gem_softc *, u_int); | |||
114 | int gem_rint(struct gem_softc *); | |||
115 | int gem_tint(struct gem_softc *, u_int32_t); | |||
116 | int gem_pint(struct gem_softc *); | |||
117 | ||||
118 | #ifdef GEM_DEBUG | |||
119 | #define DPRINTF(sc, x) if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG0x4) \ | |||
120 | printf x | |||
121 | #else | |||
122 | #define DPRINTF(sc, x) /* nothing */ | |||
123 | #endif | |||
124 | ||||
125 | /* | |||
126 | * Attach a Gem interface to the system. | |||
127 | */ | |||
128 | void | |||
129 | gem_config(struct gem_softc *sc) | |||
130 | { | |||
131 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
132 | struct mii_data *mii = &sc->sc_mii; | |||
133 | struct mii_softc *child; | |||
134 | int i, error, mii_flags, phyad; | |||
135 | struct ifmedia_entry *ifm; | |||
136 | ||||
137 | /* Make sure the chip is stopped. */ | |||
138 | ifp->if_softc = sc; | |||
139 | gem_reset(sc); | |||
140 | ||||
141 | /* | |||
142 | * Allocate the control data structures, and create and load the | |||
143 | * DMA map for it. | |||
144 | */ | |||
145 | if ((error = bus_dmamem_alloc(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), ( sizeof(struct gem_control_data)), ((1 << 12)), (0), (& sc->sc_cdseg), (1), (&sc->sc_cdnseg), (0)) | |||
146 | sizeof(struct gem_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), ( sizeof(struct gem_control_data)), ((1 << 12)), (0), (& sc->sc_cdseg), (1), (&sc->sc_cdnseg), (0)) | |||
147 | 1, &sc->sc_cdnseg, 0)(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), ( sizeof(struct gem_control_data)), ((1 << 12)), (0), (& sc->sc_cdseg), (1), (&sc->sc_cdnseg), (0))) != 0) { | |||
148 | printf("\n%s: unable to allocate control data, error = %d\n", | |||
149 | sc->sc_dev.dv_xname, error); | |||
150 | goto fail_0; | |||
151 | } | |||
152 | ||||
153 | /* XXX should map this in with correct endianness */ | |||
154 | if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,(*(sc->sc_dmatag)->_dmamem_map)((sc->sc_dmatag), (& sc->sc_cdseg), (sc->sc_cdnseg), (sizeof(struct gem_control_data )), ((caddr_t *)&sc->sc_control_data), (0x0004)) | |||
155 | sizeof(struct gem_control_data), (caddr_t *)&sc->sc_control_data,(*(sc->sc_dmatag)->_dmamem_map)((sc->sc_dmatag), (& sc->sc_cdseg), (sc->sc_cdnseg), (sizeof(struct gem_control_data )), ((caddr_t *)&sc->sc_control_data), (0x0004)) | |||
156 | BUS_DMA_COHERENT)(*(sc->sc_dmatag)->_dmamem_map)((sc->sc_dmatag), (& sc->sc_cdseg), (sc->sc_cdnseg), (sizeof(struct gem_control_data )), ((caddr_t *)&sc->sc_control_data), (0x0004))) != 0) { | |||
157 | printf("\n%s: unable to map control data, error = %d\n", | |||
158 | sc->sc_dev.dv_xname, error); | |||
159 | goto fail_1; | |||
160 | } | |||
161 | ||||
162 | if ((error = bus_dmamap_create(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), ( sizeof(struct gem_control_data)), (1), (sizeof(struct gem_control_data )), (0), (0), (&sc->sc_cddmamap)) | |||
163 | sizeof(struct gem_control_data), 1,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), ( sizeof(struct gem_control_data)), (1), (sizeof(struct gem_control_data )), (0), (0), (&sc->sc_cddmamap)) | |||
164 | sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), ( sizeof(struct gem_control_data)), (1), (sizeof(struct gem_control_data )), (0), (0), (&sc->sc_cddmamap))) != 0) { | |||
165 | printf("\n%s: unable to create control data DMA map, " | |||
166 | "error = %d\n", sc->sc_dev.dv_xname, error); | |||
167 | goto fail_2; | |||
168 | } | |||
169 | ||||
170 | if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc ->sc_cddmamap), (sc->sc_control_data), (sizeof(struct gem_control_data )), (((void *)0)), (0)) | |||
171 | sc->sc_control_data, sizeof(struct gem_control_data), NULL,(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc ->sc_cddmamap), (sc->sc_control_data), (sizeof(struct gem_control_data )), (((void *)0)), (0)) | |||
172 | 0)(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc ->sc_cddmamap), (sc->sc_control_data), (sizeof(struct gem_control_data )), (((void *)0)), (0))) != 0) { | |||
173 | printf("\n%s: unable to load control data DMA map, error = %d\n", | |||
174 | sc->sc_dev.dv_xname, error); | |||
175 | goto fail_3; | |||
176 | } | |||
177 | ||||
178 | /* | |||
179 | * Create the receive buffer DMA maps. | |||
180 | */ | |||
181 | for (i = 0; i < GEM_NRXDESC128; i++) { | |||
182 | if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), ( (1 << 11)), (1), ((1 << 11)), (0), (0), (&sc-> sc_rxsoft[i].rxs_dmamap)) | |||
183 | MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), ( (1 << 11)), (1), ((1 << 11)), (0), (0), (&sc-> sc_rxsoft[i].rxs_dmamap))) != 0) { | |||
184 | printf("\n%s: unable to create rx DMA map %d, " | |||
185 | "error = %d\n", sc->sc_dev.dv_xname, i, error); | |||
186 | goto fail_5; | |||
187 | } | |||
188 | sc->sc_rxsoft[i].rxs_mbuf = NULL((void *)0); | |||
189 | } | |||
190 | /* | |||
191 | * Create the transmit buffer DMA maps. | |||
192 | */ | |||
193 | for (i = 0; i < GEM_NTXDESC(64 * 16); i++) { | |||
194 | if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), ( (1 << 11)), (16), ((1 << 11)), (0), (0x0001), (& sc->sc_txd[i].sd_map)) | |||
195 | GEM_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), ( (1 << 11)), (16), ((1 << 11)), (0), (0x0001), (& sc->sc_txd[i].sd_map)) | |||
196 | &sc->sc_txd[i].sd_map)(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), ( (1 << 11)), (16), ((1 << 11)), (0), (0x0001), (& sc->sc_txd[i].sd_map))) != 0) { | |||
197 | printf("\n%s: unable to create tx DMA map %d, " | |||
198 | "error = %d\n", sc->sc_dev.dv_xname, i, error); | |||
199 | goto fail_6; | |||
200 | } | |||
201 | sc->sc_txd[i].sd_mbuf = NULL((void *)0); | |||
202 | } | |||
203 | ||||
204 | /* | |||
205 | * From this point forward, the attachment cannot fail. A failure | |||
206 | * before this point releases all resources that may have been | |||
207 | * allocated. | |||
208 | */ | |||
209 | ||||
210 | /* Announce ourselves. */ | |||
211 | printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); | |||
212 | ||||
213 | /* Get RX FIFO size */ | |||
214 | sc->sc_rxfifosize = 64 * | |||
215 | bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_FIFO_SIZE)((sc->sc_bustag)->read_4((sc->sc_h1), (0x4120))); | |||
216 | ||||
217 | /* Initialize ifnet structure. */ | |||
218 | strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname); | |||
219 | ifp->if_softc = sc; | |||
220 | ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000; | |||
221 | ifp->if_xflags = IFXF_MPSAFE0x1; | |||
222 | ifp->if_qstart = gem_start; | |||
223 | ifp->if_ioctl = gem_ioctl; | |||
224 | ifp->if_watchdog = gem_watchdog; | |||
225 | ifq_set_maxlen(&ifp->if_snd, GEM_NTXDESC - 1)((&ifp->if_snd)->ifq_maxlen = ((64 * 16) - 1)); | |||
226 | ||||
227 | ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010; | |||
228 | ||||
229 | /* Initialize ifmedia structures and MII info */ | |||
230 | mii->mii_ifp = ifp; | |||
231 | mii->mii_readreg = gem_mii_readreg; | |||
232 | mii->mii_writereg = gem_mii_writereg; | |||
233 | mii->mii_statchg = gem_mii_statchg; | |||
234 | ||||
235 | ifmedia_init(&mii->mii_media, 0, gem_mediachange, gem_mediastatus); | |||
236 | ||||
237 | /* Bad things will happen if we touch this register on ERI. */ | |||
238 | if (sc->sc_variant != GEM_SUN_ERI2) | |||
239 | bus_space_write_4(sc->sc_bustag, sc->sc_h1,((sc->sc_bustag)->write_4((sc->sc_h1), (0x9050), (0) )) | |||
240 | GEM_MII_DATAPATH_MODE, 0)((sc->sc_bustag)->write_4((sc->sc_h1), (0x9050), (0) )); | |||
241 | ||||
242 | gem_mifinit(sc); | |||
243 | ||||
244 | mii_flags = MIIF_DOPAUSE0x0100; | |||
245 | ||||
246 | /* | |||
247 | * Look for an external PHY. | |||
248 | */ | |||
249 | if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI10x00000200) { | |||
250 | sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL0x00000001; | |||
251 | bus_space_write_4(sc->sc_bustag, sc->sc_h1,((sc->sc_bustag)->write_4((sc->sc_h1), (0x6210), (sc ->sc_mif_config))) | |||
252 | GEM_MIF_CONFIG, sc->sc_mif_config)((sc->sc_bustag)->write_4((sc->sc_h1), (0x6210), (sc ->sc_mif_config))); | |||
253 | ||||
254 | switch (sc->sc_variant) { | |||
255 | case GEM_SUN_ERI2: | |||
256 | phyad = GEM_PHYAD_EXTERNAL0; | |||
257 | break; | |||
258 | default: | |||
259 | phyad = MII_PHY_ANY-1; | |||
260 | break; | |||
261 | } | |||
262 | ||||
263 | mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad, | |||
264 | MII_OFFSET_ANY-1, mii_flags); | |||
265 | } | |||
266 | ||||
267 | /* | |||
268 | * Fall back on an internal PHY if no external PHY was found. | |||
269 | * Note that with Apple (K2) GMACs GEM_MIF_CONFIG_MDI0 can't be | |||
270 | * trusted when the firmware has powered down the chip | |||
271 | */ | |||
272 | child = LIST_FIRST(&mii->mii_phys)((&mii->mii_phys)->lh_first); | |||
273 | if (child == NULL((void *)0) && | |||
274 | (sc->sc_mif_config & GEM_MIF_CONFIG_MDI00x00000100 || GEM_IS_APPLE(sc)((sc)->sc_variant == 3 || (sc)->sc_variant == 4))) { | |||
275 | sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL0x00000001; | |||
276 | bus_space_write_4(sc->sc_bustag, sc->sc_h1,((sc->sc_bustag)->write_4((sc->sc_h1), (0x6210), (sc ->sc_mif_config))) | |||
277 | GEM_MIF_CONFIG, sc->sc_mif_config)((sc->sc_bustag)->write_4((sc->sc_h1), (0x6210), (sc ->sc_mif_config))); | |||
278 | ||||
279 | switch (sc->sc_variant) { | |||
280 | case GEM_SUN_ERI2: | |||
281 | case GEM_APPLE_K2_GMAC4: | |||
282 | phyad = GEM_PHYAD_INTERNAL1; | |||
283 | break; | |||
284 | case GEM_APPLE_GMAC3: | |||
285 | phyad = GEM_PHYAD_EXTERNAL0; | |||
286 | break; | |||
287 | default: | |||
288 | phyad = MII_PHY_ANY-1; | |||
289 | break; | |||
290 | } | |||
291 | ||||
292 | mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad, | |||
293 | MII_OFFSET_ANY-1, mii_flags); | |||
294 | } | |||
295 | ||||
296 | /* | |||
297 | * Try the external PCS SERDES if we didn't find any MII | |||
298 | * devices. | |||
299 | */ | |||
300 | child = LIST_FIRST(&mii->mii_phys)((&mii->mii_phys)->lh_first); | |||
301 | if (child == NULL((void *)0) && sc->sc_variant != GEM_SUN_ERI2) { | |||
302 | bus_space_write_4(sc->sc_bustag, sc->sc_h1,((sc->sc_bustag)->write_4((sc->sc_h1), (0x9050), (0x00000002 ))) | |||
303 | GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_SERDES)((sc->sc_bustag)->write_4((sc->sc_h1), (0x9050), (0x00000002 ))); | |||
304 | ||||
305 | bus_space_write_4(sc->sc_bustag, sc->sc_h1,((sc->sc_bustag)->write_4((sc->sc_h1), (0x9054), (0x00000001 |0x00000002))) | |||
306 | GEM_MII_SLINK_CONTROL,((sc->sc_bustag)->write_4((sc->sc_h1), (0x9054), (0x00000001 |0x00000002))) | |||
307 | GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D)((sc->sc_bustag)->write_4((sc->sc_h1), (0x9054), (0x00000001 |0x00000002))); | |||
308 | ||||
309 | bus_space_write_4(sc->sc_bustag, sc->sc_h1,((sc->sc_bustag)->write_4((sc->sc_h1), (0x9010), (0x00000001 ))) | |||
310 | GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE)((sc->sc_bustag)->write_4((sc->sc_h1), (0x9010), (0x00000001 ))); | |||
311 | ||||
312 | mii->mii_readreg = gem_pcs_readreg; | |||
313 | mii->mii_writereg = gem_pcs_writereg; | |||
314 | ||||
315 | mii_flags |= MIIF_NOISOLATE0x0002; | |||
316 | ||||
317 | mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY-1, | |||
318 | MII_OFFSET_ANY-1, mii_flags); | |||
319 | } | |||
320 | ||||
321 | child = LIST_FIRST(&mii->mii_phys)((&mii->mii_phys)->lh_first); | |||
322 | if (child == NULL((void *)0)) { | |||
323 | /* No PHY attached */ | |||
324 | ifmedia_add(&sc->sc_mediasc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_MANUAL1ULL, 0, NULL((void *)0)); | |||
325 | ifmedia_set(&sc->sc_mediasc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_MANUAL1ULL); | |||
326 | } else { | |||
327 | /* | |||
328 | * XXX - we can really do the following ONLY if the | |||
329 | * phy indeed has the auto negotiation capability!! | |||
330 | */ | |||
331 | ifmedia_set(&sc->sc_mediasc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL); | |||
332 | } | |||
333 | ||||
334 | /* Check if we support GigE media. */ | |||
335 | TAILQ_FOREACH(ifm, &sc->sc_media.ifm_list, ifm_list)for((ifm) = ((&sc->sc_mii.mii_media.ifm_list)->tqh_first ); (ifm) != ((void *)0); (ifm) = ((ifm)->ifm_list.tqe_next )) { | |||
336 | if (IFM_SUBTYPE(ifm->ifm_media)((ifm->ifm_media) & 0x00000000000000ffULL) == IFM_1000_T16 || | |||
337 | IFM_SUBTYPE(ifm->ifm_media)((ifm->ifm_media) & 0x00000000000000ffULL) == IFM_1000_SX11 || | |||
338 | IFM_SUBTYPE(ifm->ifm_media)((ifm->ifm_media) & 0x00000000000000ffULL) == IFM_1000_LX14 || | |||
339 | IFM_SUBTYPE(ifm->ifm_media)((ifm->ifm_media) & 0x00000000000000ffULL) == IFM_1000_CX15) { | |||
340 | sc->sc_flags |= GEM_GIGABIT0x0001; | |||
341 | break; | |||
342 | } | |||
343 | } | |||
344 | ||||
345 | /* Attach the interface. */ | |||
346 | if_attach(ifp); | |||
347 | ether_ifattach(ifp); | |||
348 | ||||
349 | timeout_set(&sc->sc_tick_ch, gem_tick, sc); | |||
350 | timeout_set(&sc->sc_rx_watchdog, gem_rx_watchdog, sc); | |||
351 | return; | |||
352 | ||||
353 | /* | |||
354 | * Free any resources we've allocated during the failed attach | |||
355 | * attempt. Do this in reverse order and fall through. | |||
356 | */ | |||
357 | fail_6: | |||
358 | for (i = 0; i < GEM_NTXDESC(64 * 16); i++) { | |||
359 | if (sc->sc_txd[i].sd_map != NULL((void *)0)) | |||
360 | bus_dmamap_destroy(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_txd[i].sd_map)) | |||
361 | sc->sc_txd[i].sd_map)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_txd[i].sd_map)); | |||
362 | } | |||
363 | fail_5: | |||
364 | for (i = 0; i < GEM_NRXDESC128; i++) { | |||
365 | if (sc->sc_rxsoft[i].rxs_dmamap != NULL((void *)0)) | |||
366 | bus_dmamap_destroy(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_rxsoft[i].rxs_dmamap)) | |||
367 | sc->sc_rxsoft[i].rxs_dmamap)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_rxsoft[i].rxs_dmamap)); | |||
368 | } | |||
369 | bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), ( sc->sc_cddmamap)); | |||
370 | fail_3: | |||
371 | bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_cddmamap)); | |||
372 | fail_2: | |||
373 | bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,(*(sc->sc_dmatag)->_dmamem_unmap)((sc->sc_dmatag), ( (caddr_t)sc->sc_control_data), (sizeof(struct gem_control_data ))) | |||
374 | sizeof(struct gem_control_data))(*(sc->sc_dmatag)->_dmamem_unmap)((sc->sc_dmatag), ( (caddr_t)sc->sc_control_data), (sizeof(struct gem_control_data ))); | |||
375 | fail_1: | |||
376 | bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg)(*(sc->sc_dmatag)->_dmamem_free)((sc->sc_dmatag), (& sc->sc_cdseg), (sc->sc_cdnseg)); | |||
377 | fail_0: | |||
378 | return; | |||
379 | } | |||
380 | ||||
381 | void | |||
382 | gem_unconfig(struct gem_softc *sc) | |||
383 | { | |||
384 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
385 | int i; | |||
386 | ||||
387 | gem_stop(ifp, 1); | |||
388 | ||||
389 | for (i = 0; i < GEM_NTXDESC(64 * 16); i++) { | |||
390 | if (sc->sc_txd[i].sd_map != NULL((void *)0)) | |||
391 | bus_dmamap_destroy(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_txd[i].sd_map)) | |||
392 | sc->sc_txd[i].sd_map)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_txd[i].sd_map)); | |||
393 | } | |||
394 | for (i = 0; i < GEM_NRXDESC128; i++) { | |||
395 | if (sc->sc_rxsoft[i].rxs_dmamap != NULL((void *)0)) | |||
396 | bus_dmamap_destroy(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_rxsoft[i].rxs_dmamap)) | |||
397 | sc->sc_rxsoft[i].rxs_dmamap)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_rxsoft[i].rxs_dmamap)); | |||
398 | } | |||
399 | bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), ( sc->sc_cddmamap)); | |||
400 | bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag), (sc->sc_cddmamap)); | |||
401 | bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,(*(sc->sc_dmatag)->_dmamem_unmap)((sc->sc_dmatag), ( (caddr_t)sc->sc_control_data), (sizeof(struct gem_control_data ))) | |||
402 | sizeof(struct gem_control_data))(*(sc->sc_dmatag)->_dmamem_unmap)((sc->sc_dmatag), ( (caddr_t)sc->sc_control_data), (sizeof(struct gem_control_data ))); | |||
403 | bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg)(*(sc->sc_dmatag)->_dmamem_free)((sc->sc_dmatag), (& sc->sc_cdseg), (sc->sc_cdnseg)); | |||
404 | ||||
405 | /* Detach all PHYs */ | |||
406 | mii_detach(&sc->sc_mii, MII_PHY_ANY-1, MII_OFFSET_ANY-1); | |||
407 | ||||
408 | /* Delete all remaining media. */ | |||
409 | ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY((uint64_t) -1)); | |||
410 | ||||
411 | ether_ifdetach(ifp); | |||
412 | if_detach(ifp); | |||
413 | } | |||
414 | ||||
415 | ||||
416 | void | |||
417 | gem_tick(void *arg) | |||
418 | { | |||
419 | struct gem_softc *sc = arg; | |||
420 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
421 | bus_space_tag_t t = sc->sc_bustag; | |||
422 | bus_space_handle_t mac = sc->sc_h1; | |||
423 | int s; | |||
424 | u_int32_t v; | |||
425 | ||||
426 | s = splnet()splraise(0x7); | |||
427 | /* unload collisions counters */ | |||
428 | v = bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT)((t)->read_4((mac), (0x6108))) + | |||
429 | bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT)((t)->read_4((mac), (0x610c))); | |||
430 | ifp->if_collisionsif_data.ifi_collisions += v + | |||
431 | bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT)((t)->read_4((mac), (0x6100))) + | |||
432 | bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT)((t)->read_4((mac), (0x6104))); | |||
433 | ifp->if_oerrorsif_data.ifi_oerrors += v; | |||
434 | ||||
435 | /* read error counters */ | |||
436 | ifp->if_ierrorsif_data.ifi_ierrors += | |||
437 | bus_space_read_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT)((t)->read_4((mac), (0x611c))) + | |||
438 | bus_space_read_4(t, mac, GEM_MAC_RX_ALIGN_ERR)((t)->read_4((mac), (0x6120))) + | |||
439 | bus_space_read_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT)((t)->read_4((mac), (0x6124))) + | |||
440 | bus_space_read_4(t, mac, GEM_MAC_RX_CODE_VIOL)((t)->read_4((mac), (0x6128))); | |||
441 | ||||
442 | /* clear the hardware counters */ | |||
443 | bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0)((t)->write_4((mac), (0x6100), (0))); | |||
444 | bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0)((t)->write_4((mac), (0x6104), (0))); | |||
445 | bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0)((t)->write_4((mac), (0x6108), (0))); | |||
446 | bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0)((t)->write_4((mac), (0x610c), (0))); | |||
447 | bus_space_write_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT, 0)((t)->write_4((mac), (0x611c), (0))); | |||
448 | bus_space_write_4(t, mac, GEM_MAC_RX_ALIGN_ERR, 0)((t)->write_4((mac), (0x6120), (0))); | |||
449 | bus_space_write_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT, 0)((t)->write_4((mac), (0x6124), (0))); | |||
450 | bus_space_write_4(t, mac, GEM_MAC_RX_CODE_VIOL, 0)((t)->write_4((mac), (0x6128), (0))); | |||
451 | ||||
452 | /* | |||
453 | * If buffer allocation fails, the receive ring may become | |||
454 | * empty. There is no receive interrupt to recover from that. | |||
455 | */ | |||
456 | if (if_rxr_inuse(&sc->sc_rx_ring)((&sc->sc_rx_ring)->rxr_alive) == 0) { | |||
457 | gem_fill_rx_ring(sc); | |||
458 | bus_space_write_4(t, mac, GEM_RX_KICK, sc->sc_rx_prod)((t)->write_4((mac), (0x4100), (sc->sc_rx_prod))); | |||
459 | } | |||
460 | ||||
461 | mii_tick(&sc->sc_mii); | |||
462 | splx(s)spllower(s); | |||
463 | ||||
464 | timeout_add_sec(&sc->sc_tick_ch, 1); | |||
465 | } | |||
466 | ||||
467 | int | |||
468 | gem_bitwait(struct gem_softc *sc, bus_space_handle_t h, int r, | |||
469 | u_int32_t clr, u_int32_t set) | |||
470 | { | |||
471 | int i; | |||
472 | u_int32_t reg; | |||
473 | ||||
474 | for (i = TRIES10000; i--; DELAY(100)(*delay_func)(100)) { | |||
475 | reg = bus_space_read_4(sc->sc_bustag, h, r)((sc->sc_bustag)->read_4((h), (r))); | |||
476 | if ((reg & clr) == 0 && (reg & set) == set) | |||
477 | return (1); | |||
478 | } | |||
479 | ||||
480 | return (0); | |||
481 | } | |||
482 | ||||
483 | void | |||
484 | gem_reset(struct gem_softc *sc) | |||
485 | { | |||
486 | bus_space_tag_t t = sc->sc_bustag; | |||
487 | bus_space_handle_t h = sc->sc_h2; | |||
488 | int s; | |||
489 | ||||
490 | s = splnet()splraise(0x7); | |||
491 | DPRINTF(sc, ("%s: gem_reset\n", sc->sc_dev.dv_xname)); | |||
492 | gem_reset_rx(sc); | |||
493 | gem_reset_tx(sc); | |||
494 | ||||
495 | /* Do a full reset */ | |||
496 | bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX)((t)->write_4((h), (0x0010), (0x000000002|0x000000001))); | |||
497 | if (!gem_bitwait(sc, h, GEM_RESET0x0010, GEM_RESET_RX0x000000002 | GEM_RESET_TX0x000000001, 0)) | |||
498 | printf("%s: cannot reset device\n", sc->sc_dev.dv_xname); | |||
499 | splx(s)spllower(s); | |||
500 | } | |||
501 | ||||
502 | ||||
503 | /* | |||
504 | * Drain the receive queue. | |||
505 | */ | |||
506 | void | |||
507 | gem_rxdrain(struct gem_softc *sc) | |||
508 | { | |||
509 | struct gem_rxsoft *rxs; | |||
510 | int i; | |||
511 | ||||
512 | for (i = 0; i < GEM_NRXDESC128; i++) { | |||
513 | rxs = &sc->sc_rxsoft[i]; | |||
514 | if (rxs->rxs_mbuf != NULL((void *)0)) { | |||
515 | bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (rxs ->rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), ( 0x02)) | |||
516 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (rxs ->rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), ( 0x02)); | |||
517 | bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), ( rxs->rxs_dmamap)); | |||
518 | m_freem(rxs->rxs_mbuf); | |||
519 | rxs->rxs_mbuf = NULL((void *)0); | |||
520 | } | |||
521 | } | |||
522 | sc->sc_rx_prod = sc->sc_rx_cons = 0; | |||
523 | } | |||
524 | ||||
525 | /* | |||
526 | * Reset the whole thing. | |||
527 | */ | |||
528 | void | |||
529 | gem_stop(struct ifnet *ifp, int softonly) | |||
530 | { | |||
531 | struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; | |||
532 | struct gem_sxd *sd; | |||
533 | u_int32_t i; | |||
534 | ||||
535 | DPRINTF(sc, ("%s: gem_stop\n", sc->sc_dev.dv_xname)); | |||
536 | ||||
537 | timeout_del(&sc->sc_tick_ch); | |||
538 | ||||
539 | /* | |||
540 | * Mark the interface down and cancel the watchdog timer. | |||
541 | */ | |||
542 | ifp->if_flags &= ~IFF_RUNNING0x40; | |||
543 | ifq_clr_oactive(&ifp->if_snd); | |||
544 | ifp->if_timer = 0; | |||
545 | ||||
546 | if (!softonly) { | |||
547 | mii_down(&sc->sc_mii); | |||
548 | ||||
549 | gem_reset_rx(sc); | |||
550 | gem_reset_tx(sc); | |||
551 | } | |||
552 | ||||
553 | intr_barrier(sc->sc_ih); | |||
554 | ifq_barrier(&ifp->if_snd); | |||
555 | ||||
556 | KASSERT((ifp->if_flags & IFF_RUNNING) == 0)(((ifp->if_flags & 0x40) == 0) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/ic/gem.c", 556, "(ifp->if_flags & IFF_RUNNING) == 0" )); | |||
557 | ||||
558 | /* | |||
559 | * Release any queued transmit buffers. | |||
560 | */ | |||
561 | for (i = 0; i < GEM_NTXDESC(64 * 16); i++) { | |||
562 | sd = &sc->sc_txd[i]; | |||
563 | if (sd->sd_mbuf != NULL((void *)0)) { | |||
564 | bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sd ->sd_map), (0), (sd->sd_map->dm_mapsize), (0x08)) | |||
565 | sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sd ->sd_map), (0), (sd->sd_map->dm_mapsize), (0x08)); | |||
566 | bus_dmamap_unload(sc->sc_dmatag, sd->sd_map)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), ( sd->sd_map)); | |||
567 | m_freem(sd->sd_mbuf); | |||
568 | sd->sd_mbuf = NULL((void *)0); | |||
569 | } | |||
570 | } | |||
571 | sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0; | |||
572 | ||||
573 | gem_rxdrain(sc); | |||
574 | } | |||
575 | ||||
576 | ||||
577 | /* | |||
578 | * Reset the receiver | |||
579 | */ | |||
580 | int | |||
581 | gem_reset_rx(struct gem_softc *sc) | |||
582 | { | |||
583 | bus_space_tag_t t = sc->sc_bustag; | |||
584 | bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2; | |||
585 | ||||
586 | /* | |||
587 | * Resetting while DMA is in progress can cause a bus hang, so we | |||
588 | * disable DMA first. | |||
589 | */ | |||
590 | gem_disable_rx(sc); | |||
591 | bus_space_write_4(t, h, GEM_RX_CONFIG, 0)((t)->write_4((h), (0x4000), (0))); | |||
592 | /* Wait till it finishes */ | |||
593 | if (!gem_bitwait(sc, h, GEM_RX_CONFIG0x4000, 1, 0)) | |||
594 | printf("%s: cannot disable rx dma\n", sc->sc_dev.dv_xname); | |||
595 | /* Wait 5ms extra. */ | |||
596 | delay(5000)(*delay_func)(5000); | |||
597 | ||||
598 | /* Finally, reset the ERX */ | |||
599 | bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_RX)((t)->write_4((h2), (0x0010), (0x000000002))); | |||
600 | /* Wait till it finishes */ | |||
601 | if (!gem_bitwait(sc, h2, GEM_RESET0x0010, GEM_RESET_RX0x000000002, 0)) { | |||
602 | printf("%s: cannot reset receiver\n", sc->sc_dev.dv_xname); | |||
603 | return (1); | |||
604 | } | |||
605 | return (0); | |||
606 | } | |||
607 | ||||
608 | ||||
609 | /* | |||
610 | * Reset the transmitter | |||
611 | */ | |||
612 | int | |||
613 | gem_reset_tx(struct gem_softc *sc) | |||
614 | { | |||
615 | bus_space_tag_t t = sc->sc_bustag; | |||
616 | bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2; | |||
617 | ||||
618 | /* | |||
619 | * Resetting while DMA is in progress can cause a bus hang, so we | |||
620 | * disable DMA first. | |||
621 | */ | |||
622 | gem_disable_tx(sc); | |||
623 | bus_space_write_4(t, h, GEM_TX_CONFIG, 0)((t)->write_4((h), (0x2004), (0))); | |||
624 | /* Wait till it finishes */ | |||
625 | if (!gem_bitwait(sc, h, GEM_TX_CONFIG0x2004, 1, 0)) | |||
626 | printf("%s: cannot disable tx dma\n", sc->sc_dev.dv_xname); | |||
627 | /* Wait 5ms extra. */ | |||
628 | delay(5000)(*delay_func)(5000); | |||
629 | ||||
630 | /* Finally, reset the ETX */ | |||
631 | bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_TX)((t)->write_4((h2), (0x0010), (0x000000001))); | |||
632 | /* Wait till it finishes */ | |||
633 | if (!gem_bitwait(sc, h2, GEM_RESET0x0010, GEM_RESET_TX0x000000001, 0)) { | |||
634 | printf("%s: cannot reset transmitter\n", | |||
635 | sc->sc_dev.dv_xname); | |||
636 | return (1); | |||
637 | } | |||
638 | return (0); | |||
639 | } | |||
640 | ||||
641 | /* | |||
642 | * Disable receiver. | |||
643 | */ | |||
644 | int | |||
645 | gem_disable_rx(struct gem_softc *sc) | |||
646 | { | |||
647 | bus_space_tag_t t = sc->sc_bustag; | |||
648 | bus_space_handle_t h = sc->sc_h1; | |||
649 | u_int32_t cfg; | |||
650 | ||||
651 | /* Flip the enable bit */ | |||
652 | cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG)((t)->read_4((h), (0x6034))); | |||
653 | cfg &= ~GEM_MAC_RX_ENABLE0x00000001; | |||
654 | bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg)((t)->write_4((h), (0x6034), (cfg))); | |||
655 | ||||
656 | /* Wait for it to finish */ | |||
657 | return (gem_bitwait(sc, h, GEM_MAC_RX_CONFIG0x6034, GEM_MAC_RX_ENABLE0x00000001, 0)); | |||
658 | } | |||
659 | ||||
660 | /* | |||
661 | * Disable transmitter. | |||
662 | */ | |||
663 | int | |||
664 | gem_disable_tx(struct gem_softc *sc) | |||
665 | { | |||
666 | bus_space_tag_t t = sc->sc_bustag; | |||
667 | bus_space_handle_t h = sc->sc_h1; | |||
668 | u_int32_t cfg; | |||
669 | ||||
670 | /* Flip the enable bit */ | |||
671 | cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG)((t)->read_4((h), (0x6030))); | |||
672 | cfg &= ~GEM_MAC_TX_ENABLE0x00000001; | |||
673 | bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg)((t)->write_4((h), (0x6030), (cfg))); | |||
674 | ||||
675 | /* Wait for it to finish */ | |||
676 | return (gem_bitwait(sc, h, GEM_MAC_TX_CONFIG0x6030, GEM_MAC_TX_ENABLE0x00000001, 0)); | |||
677 | } | |||
678 | ||||
679 | /* | |||
680 | * Initialize interface. | |||
681 | */ | |||
682 | int | |||
683 | gem_meminit(struct gem_softc *sc) | |||
684 | { | |||
685 | int i; | |||
686 | ||||
687 | /* | |||
688 | * Initialize the transmit descriptor ring. | |||
689 | */ | |||
690 | for (i = 0; i < GEM_NTXDESC(64 * 16); i++) { | |||
691 | sc->sc_txdescssc_control_data->gcd_txdescs[i].gd_flags = 0; | |||
692 | sc->sc_txdescssc_control_data->gcd_txdescs[i].gd_addr = 0; | |||
693 | } | |||
694 | GEM_CDTXSYNC(sc, 0, GEM_NTXDESC,do { int __x, __n; __x = (0); __n = ((64 * 16)); if ((__x + __n ) > (64 * 16)) { (*((sc)->sc_dmatag)->_dmamap_sync)( ((sc)->sc_dmatag), ((sc)->sc_cddmamap), (__builtin_offsetof (struct gem_control_data, gcd_txdescs[(__x)])), (sizeof(struct gem_desc) * ((64 * 16) - __x)), ((0x01|0x04))); __n -= ((64 * 16) - __x); __x = 0; } (*((sc)->sc_dmatag)->_dmamap_sync )(((sc)->sc_dmatag), ((sc)->sc_cddmamap), (__builtin_offsetof (struct gem_control_data, gcd_txdescs[(__x)])), (sizeof(struct gem_desc) * __n), ((0x01|0x04))); } while (0) | |||
695 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)do { int __x, __n; __x = (0); __n = ((64 * 16)); if ((__x + __n ) > (64 * 16)) { (*((sc)->sc_dmatag)->_dmamap_sync)( ((sc)->sc_dmatag), ((sc)->sc_cddmamap), (__builtin_offsetof (struct gem_control_data, gcd_txdescs[(__x)])), (sizeof(struct gem_desc) * ((64 * 16) - __x)), ((0x01|0x04))); __n -= ((64 * 16) - __x); __x = 0; } (*((sc)->sc_dmatag)->_dmamap_sync )(((sc)->sc_dmatag), ((sc)->sc_cddmamap), (__builtin_offsetof (struct gem_control_data, gcd_txdescs[(__x)])), (sizeof(struct gem_desc) * __n), ((0x01|0x04))); } while (0); | |||
696 | ||||
697 | /* | |||
698 | * Initialize the receive descriptor and receive job | |||
699 | * descriptor rings. | |||
700 | */ | |||
701 | for (i = 0; i < GEM_NRXDESC128; i++) { | |||
702 | sc->sc_rxdescssc_control_data->gcd_rxdescs[i].gd_flags = 0; | |||
703 | sc->sc_rxdescssc_control_data->gcd_rxdescs[i].gd_addr = 0; | |||
704 | } | |||
705 | /* Hardware reads RX descriptors in multiples of four. */ | |||
706 | if_rxr_init(&sc->sc_rx_ring, 4, GEM_NRXDESC128 - 4); | |||
707 | gem_fill_rx_ring(sc); | |||
708 | ||||
709 | return (0); | |||
710 | } | |||
711 | ||||
712 | int | |||
713 | gem_ringsize(int sz) | |||
714 | { | |||
715 | switch (sz) { | |||
716 | case 32: | |||
717 | return GEM_RING_SZ_32(0<<1); | |||
718 | case 64: | |||
719 | return GEM_RING_SZ_64(1<<1); | |||
720 | case 128: | |||
721 | return GEM_RING_SZ_128(2<<1); | |||
722 | case 256: | |||
723 | return GEM_RING_SZ_256(3<<1); | |||
724 | case 512: | |||
725 | return GEM_RING_SZ_512(4<<1); | |||
726 | case 1024: | |||
727 | return GEM_RING_SZ_1024(5<<1); | |||
728 | case 2048: | |||
729 | return GEM_RING_SZ_2048(6<<1); | |||
730 | case 4096: | |||
731 | return GEM_RING_SZ_4096(7<<1); | |||
732 | case 8192: | |||
733 | return GEM_RING_SZ_8192(8<<1); | |||
734 | default: | |||
735 | printf("gem: invalid Receive Descriptor ring size %d\n", sz); | |||
736 | return GEM_RING_SZ_32(0<<1); | |||
737 | } | |||
738 | } | |||
739 | ||||
740 | /* | |||
741 | * Initialization of interface; set up initialization block | |||
742 | * and transmit/receive descriptor rings. | |||
743 | */ | |||
744 | int | |||
745 | gem_init(struct ifnet *ifp) | |||
746 | { | |||
747 | ||||
748 | struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; | |||
749 | bus_space_tag_t t = sc->sc_bustag; | |||
750 | bus_space_handle_t h = sc->sc_h1; | |||
751 | int s; | |||
752 | u_int32_t v; | |||
753 | ||||
754 | s = splnet()splraise(0x7); | |||
755 | ||||
756 | DPRINTF(sc, ("%s: gem_init: calling stop\n", sc->sc_dev.dv_xname)); | |||
757 | /* | |||
758 | * Initialization sequence. The numbered steps below correspond | |||
759 | * to the sequence outlined in section 6.3.5.1 in the Ethernet | |||
760 | * Channel Engine manual (part of the PCIO manual). | |||
761 | * See also the STP2002-STQ document from Sun Microsystems. | |||
762 | */ | |||
763 | ||||
764 | /* step 1 & 2. Reset the Ethernet Channel */ | |||
765 | gem_stop(ifp, 0); | |||
766 | gem_reset(sc); | |||
767 | DPRINTF(sc, ("%s: gem_init: restarting\n", sc->sc_dev.dv_xname)); | |||
768 | ||||
769 | /* Re-initialize the MIF */ | |||
770 | gem_mifinit(sc); | |||
771 | ||||
772 | /* Call MI reset function if any */ | |||
773 | if (sc->sc_hwreset) | |||
774 | (*sc->sc_hwreset)(sc); | |||
775 | ||||
776 | /* step 3. Setup data structures in host memory */ | |||
777 | gem_meminit(sc); | |||
778 | ||||
779 | /* step 4. TX MAC registers & counters */ | |||
780 | gem_init_regs(sc); | |||
781 | ||||
782 | /* step 5. RX MAC registers & counters */ | |||
783 | gem_iff(sc); | |||
784 | ||||
785 | /* step 6 & 7. Program Descriptor Ring Base Addresses */ | |||
786 | bus_space_write_4(t, h, GEM_TX_RING_PTR_HI,((t)->write_4((h), (0x200c), ((((uint64_t)((sc)->sc_cddmamap ->dm_segs[0].ds_addr + __builtin_offsetof(struct gem_control_data , gcd_txdescs[((0))]))) >> 32)))) | |||
787 | (((uint64_t)GEM_CDTXADDR(sc,0)) >> 32))((t)->write_4((h), (0x200c), ((((uint64_t)((sc)->sc_cddmamap ->dm_segs[0].ds_addr + __builtin_offsetof(struct gem_control_data , gcd_txdescs[((0))]))) >> 32)))); | |||
788 | bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0))((t)->write_4((h), (0x2008), (((sc)->sc_cddmamap->dm_segs [0].ds_addr + __builtin_offsetof(struct gem_control_data, gcd_txdescs [((0))]))))); | |||
789 | ||||
790 | bus_space_write_4(t, h, GEM_RX_RING_PTR_HI,((t)->write_4((h), (0x4008), ((((uint64_t)((sc)->sc_cddmamap ->dm_segs[0].ds_addr + __builtin_offsetof(struct gem_control_data , gcd_rxdescs[((0))]))) >> 32)))) | |||
791 | (((uint64_t)GEM_CDRXADDR(sc,0)) >> 32))((t)->write_4((h), (0x4008), ((((uint64_t)((sc)->sc_cddmamap ->dm_segs[0].ds_addr + __builtin_offsetof(struct gem_control_data , gcd_rxdescs[((0))]))) >> 32)))); | |||
792 | bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0))((t)->write_4((h), (0x4004), (((sc)->sc_cddmamap->dm_segs [0].ds_addr + __builtin_offsetof(struct gem_control_data, gcd_rxdescs [((0))]))))); | |||
793 | ||||
794 | /* step 8. Global Configuration & Interrupt Mask */ | |||
795 | bus_space_write_4(t, h, GEM_INTMASK,((t)->write_4((h), (0x0010), (~(0x000000001| 0x000000002| 0x000000010 |0x000000020| 0x000000040|0x000002000| 0x000010000|0x000020000 | 0x000040000)))) | |||
796 | ~(GEM_INTR_TX_INTME|((t)->write_4((h), (0x0010), (~(0x000000001| 0x000000002| 0x000000010 |0x000000020| 0x000000040|0x000002000| 0x000010000|0x000020000 | 0x000040000)))) | |||
797 | GEM_INTR_TX_EMPTY|((t)->write_4((h), (0x0010), (~(0x000000001| 0x000000002| 0x000000010 |0x000000020| 0x000000040|0x000002000| 0x000010000|0x000020000 | 0x000040000)))) | |||
798 | GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF|((t)->write_4((h), (0x0010), (~(0x000000001| 0x000000002| 0x000000010 |0x000000020| 0x000000040|0x000002000| 0x000010000|0x000020000 | 0x000040000)))) | |||
799 | GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS|((t)->write_4((h), (0x0010), (~(0x000000001| 0x000000002| 0x000000010 |0x000000020| 0x000000040|0x000002000| 0x000010000|0x000020000 | 0x000040000)))) | |||
800 | GEM_INTR_MAC_CONTROL|GEM_INTR_MIF|((t)->write_4((h), (0x0010), (~(0x000000001| 0x000000002| 0x000000010 |0x000000020| 0x000000040|0x000002000| 0x000010000|0x000020000 | 0x000040000)))) | |||
801 | GEM_INTR_BERR))((t)->write_4((h), (0x0010), (~(0x000000001| 0x000000002| 0x000000010 |0x000000020| 0x000000040|0x000002000| 0x000010000|0x000020000 | 0x000040000)))); | |||
802 | bus_space_write_4(t, h, GEM_MAC_RX_MASK,((t)->write_4((h), (0x6024), (0x00000001|0x00000004))) | |||
803 | GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT)((t)->write_4((h), (0x6024), (0x00000001|0x00000004))); | |||
804 | bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff)((t)->write_4((h), (0x6020), (0xffff))); /* XXXX */ | |||
805 | bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0)((t)->write_4((h), (0x6028), (0))); /* XXXX */ | |||
806 | ||||
807 | /* step 9. ETX Configuration: use mostly default values */ | |||
808 | ||||
809 | /* Enable DMA */ | |||
810 | v = gem_ringsize(GEM_NTXDESC(64 * 16) /*XXX*/); | |||
811 | v |= ((sc->sc_variant == GEM_SUN_ERI2 ? 0x100 : 0x04ff) << 10) & | |||
812 | GEM_TX_CONFIG_TXFIFO_TH0x001ffc00; | |||
813 | bus_space_write_4(t, h, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN)((t)->write_4((h), (0x2004), (v | 0x00000001))); | |||
814 | bus_space_write_4(t, h, GEM_TX_KICK, 0)((t)->write_4((h), (0x2000), (0))); | |||
815 | ||||
816 | /* step 10. ERX Configuration */ | |||
817 | ||||
818 | /* Encode Receive Descriptor ring size: four possible values */ | |||
819 | v = gem_ringsize(GEM_NRXDESC128 /*XXX*/); | |||
820 | /* Enable DMA */ | |||
821 | bus_space_write_4(t, h, GEM_RX_CONFIG,((t)->write_4((h), (0x4000), (v|(4<<24)| (2<<10 )|0x00000001| (0<<13)))) | |||
822 | v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|((t)->write_4((h), (0x4000), (v|(4<<24)| (2<<10 )|0x00000001| (0<<13)))) | |||
823 | (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN|((t)->write_4((h), (0x4000), (v|(4<<24)| (2<<10 )|0x00000001| (0<<13)))) | |||
824 | (0<<GEM_RX_CONFIG_CXM_START_SHFT))((t)->write_4((h), (0x4000), (v|(4<<24)| (2<<10 )|0x00000001| (0<<13)))); | |||
825 | /* | |||
826 | * The following value is for an OFF Threshold of about 3/4 full | |||
827 | * and an ON Threshold of 1/4 full. | |||
828 | */ | |||
829 | bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH,((t)->write_4((h), (0x4020), ((3 * sc->sc_rxfifosize / 256 ) | ((sc->sc_rxfifosize / 256) << 12)))) | |||
830 | (3 * sc->sc_rxfifosize / 256) |((t)->write_4((h), (0x4020), ((3 * sc->sc_rxfifosize / 256 ) | ((sc->sc_rxfifosize / 256) << 12)))) | |||
831 | ((sc->sc_rxfifosize / 256) << 12))((t)->write_4((h), (0x4020), ((3 * sc->sc_rxfifosize / 256 ) | ((sc->sc_rxfifosize / 256) << 12)))); | |||
832 | bus_space_write_4(t, h, GEM_RX_BLANKING, (6 << 12) | 6)((t)->write_4((h), (0x4108), ((6 << 12) | 6))); | |||
833 | ||||
834 | /* step 11. Configure Media */ | |||
835 | mii_mediachg(&sc->sc_mii); | |||
836 | ||||
837 | /* step 12. RX_MAC Configuration Register */ | |||
838 | v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG)((t)->read_4((h), (0x6034))); | |||
839 | v |= GEM_MAC_RX_ENABLE0x00000001 | GEM_MAC_RX_STRIP_CRC0x00000004; | |||
840 | bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v)((t)->write_4((h), (0x6034), (v))); | |||
841 | ||||
842 | /* step 14. Issue Transmit Pending command */ | |||
843 | ||||
844 | /* Call MI initialization function if any */ | |||
845 | if (sc->sc_hwinit) | |||
846 | (*sc->sc_hwinit)(sc); | |||
847 | ||||
848 | /* step 15. Give the receiver a swift kick */ | |||
849 | bus_space_write_4(t, h, GEM_RX_KICK, sc->sc_rx_prod)((t)->write_4((h), (0x4100), (sc->sc_rx_prod))); | |||
850 | ||||
851 | /* Start the one second timer. */ | |||
852 | timeout_add_sec(&sc->sc_tick_ch, 1); | |||
853 | ||||
854 | ifp->if_flags |= IFF_RUNNING0x40; | |||
855 | ifq_clr_oactive(&ifp->if_snd); | |||
856 | ||||
857 | splx(s)spllower(s); | |||
858 | ||||
859 | return (0); | |||
860 | } | |||
861 | ||||
862 | void | |||
863 | gem_init_regs(struct gem_softc *sc) | |||
864 | { | |||
865 | bus_space_tag_t t = sc->sc_bustag; | |||
866 | bus_space_handle_t h = sc->sc_h1; | |||
867 | u_int32_t v; | |||
868 | ||||
869 | /* These regs are not cleared on reset */ | |||
870 | sc->sc_inited = 0; | |||
871 | if (!sc->sc_inited) { | |||
872 | /* Load recommended values */ | |||
873 | bus_space_write_4(t, h, GEM_MAC_IPG0, 0x00)((t)->write_4((h), (0x6040), (0x00))); | |||
874 | bus_space_write_4(t, h, GEM_MAC_IPG1, 0x08)((t)->write_4((h), (0x6044), (0x08))); | |||
875 | bus_space_write_4(t, h, GEM_MAC_IPG2, 0x04)((t)->write_4((h), (0x6048), (0x04))); | |||
876 | ||||
877 | bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN)((t)->write_4((h), (0x6050), (64))); | |||
878 | /* Max frame and max burst size */ | |||
879 | bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,((t)->write_4((h), (0x6054), ((1518 + 4) | (0x2000 << 16)))) | |||
880 | (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16))((t)->write_4((h), (0x6054), ((1518 + 4) | (0x2000 << 16)))); | |||
881 | ||||
882 | bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x07)((t)->write_4((h), (0x6058), (0x07))); | |||
883 | bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x04)((t)->write_4((h), (0x605c), (0x04))); | |||
884 | bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10)((t)->write_4((h), (0x6060), (0x10))); | |||
885 | bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088)((t)->write_4((h), (0x6064), (0x8088))); | |||
886 | bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED,((t)->write_4((h), (0x6130), (((sc->sc_arpcom.ac_enaddr [5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff))) | |||
887 | ((sc->sc_arpcom.ac_enaddr[5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff)((t)->write_4((h), (0x6130), (((sc->sc_arpcom.ac_enaddr [5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff))); | |||
888 | ||||
889 | /* Secondary MAC addr set to 0:0:0:0:0:0 */ | |||
890 | bus_space_write_4(t, h, GEM_MAC_ADDR3, 0)((t)->write_4((h), (0x608c), (0))); | |||
891 | bus_space_write_4(t, h, GEM_MAC_ADDR4, 0)((t)->write_4((h), (0x6090), (0))); | |||
892 | bus_space_write_4(t, h, GEM_MAC_ADDR5, 0)((t)->write_4((h), (0x6094), (0))); | |||
893 | ||||
894 | /* MAC control addr set to 0:1:c2:0:1:80 */ | |||
895 | bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001)((t)->write_4((h), (0x6098), (0x0001))); | |||
896 | bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200)((t)->write_4((h), (0x609c), (0xc200))); | |||
897 | bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180)((t)->write_4((h), (0x60a0), (0x0180))); | |||
898 | ||||
899 | /* MAC filter addr set to 0:0:0:0:0:0 */ | |||
900 | bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0)((t)->write_4((h), (0x60a4), (0))); | |||
901 | bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0)((t)->write_4((h), (0x60a8), (0))); | |||
902 | bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0)((t)->write_4((h), (0x60ac), (0))); | |||
903 | ||||
904 | bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0)((t)->write_4((h), (0x60b0), (0))); | |||
905 | bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0)((t)->write_4((h), (0x60b4), (0))); | |||
906 | ||||
907 | sc->sc_inited = 1; | |||
908 | } | |||
909 | ||||
910 | /* Counters need to be zeroed */ | |||
911 | bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0)((t)->write_4((h), (0x6100), (0))); | |||
912 | bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0)((t)->write_4((h), (0x6104), (0))); | |||
913 | bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0)((t)->write_4((h), (0x6108), (0))); | |||
914 | bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0)((t)->write_4((h), (0x610c), (0))); | |||
915 | bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0)((t)->write_4((h), (0x6110), (0))); | |||
916 | bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0)((t)->write_4((h), (0x6114), (0))); | |||
917 | bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0)((t)->write_4((h), (0x6118), (0))); | |||
918 | bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0)((t)->write_4((h), (0x611c), (0))); | |||
919 | bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0)((t)->write_4((h), (0x6120), (0))); | |||
920 | bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0)((t)->write_4((h), (0x6124), (0))); | |||
921 | bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0)((t)->write_4((h), (0x6128), (0))); | |||
922 | ||||
923 | /* Set XOFF PAUSE time */ | |||
924 | bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1bf0)((t)->write_4((h), (0x6008), (0x1bf0))); | |||
925 | ||||
926 | /* | |||
927 | * Set the internal arbitration to "infinite" bursts of the | |||
928 | * maximum length of 31 * 64 bytes so DMA transfers aren't | |||
929 | * split up in cache line size chunks. This greatly improves | |||
930 | * especially RX performance. | |||
931 | * Enable silicon bug workarounds for the Apple variants. | |||
932 | */ | |||
933 | v = GEM_CONFIG_TXDMA_LIMIT0x00000003e | GEM_CONFIG_RXDMA_LIMIT0x0000007c0; | |||
934 | if (sc->sc_pci) | |||
935 | v |= GEM_CONFIG_BURST_INF0x000000001; | |||
936 | else | |||
937 | v |= GEM_CONFIG_BURST_640x000000000; | |||
938 | if (sc->sc_variant != GEM_SUN_GEM1 && sc->sc_variant != GEM_SUN_ERI2) | |||
939 | v |= GEM_CONFIG_RONPAULBIT0x000000800 | GEM_CONFIG_BUG2FIX0x000001000; | |||
940 | bus_space_write_4(t, h, GEM_CONFIG, v)((t)->write_4((h), (0x0004), (v))); | |||
941 | ||||
942 | /* | |||
943 | * Set the station address. | |||
944 | */ | |||
945 | bus_space_write_4(t, h, GEM_MAC_ADDR0,((t)->write_4((h), (0x6080), ((sc->sc_arpcom.ac_enaddr[ 4]<<8) | sc->sc_arpcom.ac_enaddr[5]))) | |||
946 | (sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5])((t)->write_4((h), (0x6080), ((sc->sc_arpcom.ac_enaddr[ 4]<<8) | sc->sc_arpcom.ac_enaddr[5]))); | |||
947 | bus_space_write_4(t, h, GEM_MAC_ADDR1,((t)->write_4((h), (0x6084), ((sc->sc_arpcom.ac_enaddr[ 2]<<8) | sc->sc_arpcom.ac_enaddr[3]))) | |||
948 | (sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3])((t)->write_4((h), (0x6084), ((sc->sc_arpcom.ac_enaddr[ 2]<<8) | sc->sc_arpcom.ac_enaddr[3]))); | |||
949 | bus_space_write_4(t, h, GEM_MAC_ADDR2,((t)->write_4((h), (0x6088), ((sc->sc_arpcom.ac_enaddr[ 0]<<8) | sc->sc_arpcom.ac_enaddr[1]))) | |||
950 | (sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1])((t)->write_4((h), (0x6088), ((sc->sc_arpcom.ac_enaddr[ 0]<<8) | sc->sc_arpcom.ac_enaddr[1]))); | |||
951 | } | |||
952 | ||||
953 | /* | |||
954 | * Receive interrupt. | |||
955 | */ | |||
956 | int | |||
957 | gem_rint(struct gem_softc *sc) | |||
958 | { | |||
959 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
960 | bus_space_tag_t t = sc->sc_bustag; | |||
961 | bus_space_handle_t h = sc->sc_h1; | |||
962 | struct gem_rxsoft *rxs; | |||
963 | struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 }; | |||
964 | struct mbuf *m; | |||
965 | u_int64_t rxstat; | |||
966 | int i, len; | |||
967 | ||||
968 | if (if_rxr_inuse(&sc->sc_rx_ring)((&sc->sc_rx_ring)->rxr_alive) == 0) | |||
969 | return (0); | |||
970 | ||||
971 | for (i = sc->sc_rx_cons; if_rxr_inuse(&sc->sc_rx_ring)((&sc->sc_rx_ring)->rxr_alive) > 0; | |||
972 | i = GEM_NEXTRX(i)((i + 1) & (128 - 1))) { | |||
973 | rxs = &sc->sc_rxsoft[i]; | |||
974 | ||||
975 | GEM_CDRXSYNC(sc, i,(*((sc)->sc_dmatag)->_dmamap_sync)(((sc)->sc_dmatag) , ((sc)->sc_cddmamap), (__builtin_offsetof(struct gem_control_data , gcd_rxdescs[((i))])), (sizeof(struct gem_desc)), ((0x02|0x08 ))) | |||
976 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*((sc)->sc_dmatag)->_dmamap_sync)(((sc)->sc_dmatag) , ((sc)->sc_cddmamap), (__builtin_offsetof(struct gem_control_data , gcd_rxdescs[((i))])), (sizeof(struct gem_desc)), ((0x02|0x08 ))); | |||
977 | ||||
978 | rxstat = GEM_DMA_READ(sc, &sc->sc_rxdescs[i].gd_flags)(((sc)->sc_pci) ? ((__uint64_t)(*(__uint64_t *)(&sc-> sc_control_data->gcd_rxdescs[i].gd_flags))) : (__uint64_t) (__builtin_constant_p(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) ? (__uint64_t)((((__uint64_t)( *(__uint64_t *)(&sc->sc_control_data->gcd_rxdescs[i ].gd_flags)) & 0xff) << 56) | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data->gcd_rxdescs[i].gd_flags)) & 0xff00ULL) << 40 | ((__uint64_t)(*(__uint64_t *) (&sc->sc_control_data->gcd_rxdescs[i].gd_flags)) & 0xff0000ULL) << 24 | ((__uint64_t)(*(__uint64_t *)(& sc->sc_control_data->gcd_rxdescs[i].gd_flags)) & 0xff000000ULL ) << 8 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) & 0xff00000000ULL) >> 8 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) & 0xff00000000000000ULL) >> 56) : __swap64md(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)))); | |||
979 | ||||
980 | if (rxstat & GEM_RD_OWN0x0000000080000000LL) { | |||
981 | /* We have processed all of the receive buffers. */ | |||
982 | break; | |||
983 | } | |||
984 | ||||
985 | bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (rxs ->rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), ( 0x02)) | |||
986 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (rxs ->rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), ( 0x02)); | |||
987 | bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), ( rxs->rxs_dmamap)); | |||
988 | ||||
989 | m = rxs->rxs_mbuf; | |||
990 | rxs->rxs_mbuf = NULL((void *)0); | |||
991 | ||||
992 | if_rxr_put(&sc->sc_rx_ring, 1)do { (&sc->sc_rx_ring)->rxr_alive -= (1); } while ( 0); | |||
993 | ||||
994 | if (rxstat & GEM_RD_BAD_CRC0x4000000000000000LL) { | |||
995 | ifp->if_ierrorsif_data.ifi_ierrors++; | |||
996 | #ifdef GEM_DEBUG | |||
997 | printf("%s: receive error: CRC error\n", | |||
998 | sc->sc_dev.dv_xname); | |||
999 | #endif | |||
1000 | m_freem(m); | |||
1001 | continue; | |||
1002 | } | |||
1003 | ||||
1004 | #ifdef GEM_DEBUG | |||
1005 | if (ifp->if_flags & IFF_DEBUG0x4) { | |||
1006 | printf(" rxsoft %p descriptor %d: ", rxs, i); | |||
1007 | printf("gd_flags: 0x%016llx\t", (long long) | |||
1008 | GEM_DMA_READ(sc, &sc->sc_rxdescs[i].gd_flags)(((sc)->sc_pci) ? ((__uint64_t)(*(__uint64_t *)(&sc-> sc_control_data->gcd_rxdescs[i].gd_flags))) : (__uint64_t) (__builtin_constant_p(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) ? (__uint64_t)((((__uint64_t)( *(__uint64_t *)(&sc->sc_control_data->gcd_rxdescs[i ].gd_flags)) & 0xff) << 56) | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data->gcd_rxdescs[i].gd_flags)) & 0xff00ULL) << 40 | ((__uint64_t)(*(__uint64_t *) (&sc->sc_control_data->gcd_rxdescs[i].gd_flags)) & 0xff0000ULL) << 24 | ((__uint64_t)(*(__uint64_t *)(& sc->sc_control_data->gcd_rxdescs[i].gd_flags)) & 0xff000000ULL ) << 8 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) & 0xff00000000ULL) >> 8 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags)) & 0xff00000000000000ULL) >> 56) : __swap64md(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_flags))))); | |||
1009 | printf("gd_addr: 0x%016llx\n", (long long) | |||
1010 | GEM_DMA_READ(sc, &sc->sc_rxdescs[i].gd_addr)(((sc)->sc_pci) ? ((__uint64_t)(*(__uint64_t *)(&sc-> sc_control_data->gcd_rxdescs[i].gd_addr))) : (__uint64_t)( __builtin_constant_p(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_addr)) ? (__uint64_t)((((__uint64_t)(* (__uint64_t *)(&sc->sc_control_data->gcd_rxdescs[i] .gd_addr)) & 0xff) << 56) | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data->gcd_rxdescs[i].gd_addr)) & 0xff00ULL) << 40 | ((__uint64_t)(*(__uint64_t *)(& sc->sc_control_data->gcd_rxdescs[i].gd_addr)) & 0xff0000ULL ) << 24 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_addr)) & 0xff000000ULL) << 8 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data-> gcd_rxdescs[i].gd_addr)) & 0xff00000000ULL) >> 8 | ( (__uint64_t)(*(__uint64_t *)(&sc->sc_control_data-> gcd_rxdescs[i].gd_addr)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data-> gcd_rxdescs[i].gd_addr)) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(*(__uint64_t *)(&sc->sc_control_data-> gcd_rxdescs[i].gd_addr)) & 0xff00000000000000ULL) >> 56) : __swap64md(*(__uint64_t *)(&sc->sc_control_data ->gcd_rxdescs[i].gd_addr))))); | |||
1011 | } | |||
1012 | #endif | |||
1013 | ||||
1014 | /* No errors; receive the packet. */ | |||
1015 | len = GEM_RD_BUFLEN(rxstat)(((rxstat)&0x000000007fff0000LL)>>16); | |||
1016 | ||||
1017 | m->m_datam_hdr.mh_data += 2; /* We're already off by two */ | |||
1018 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len; | |||
1019 | ||||
1020 | ml_enqueue(&ml, m); | |||
1021 | } | |||
1022 | ||||
1023 | if (ifiq_input(&ifp->if_rcv, &ml)) | |||
1024 | if_rxr_livelocked(&sc->sc_rx_ring); | |||
1025 | ||||
1026 | /* Update the receive pointer. */ | |||
1027 | sc->sc_rx_cons = i; | |||
1028 | gem_fill_rx_ring(sc); | |||
1029 | bus_space_write_4(t, h, GEM_RX_KICK, sc->sc_rx_prod)((t)->write_4((h), (0x4100), (sc->sc_rx_prod))); | |||
1030 | ||||
1031 | DPRINTF(sc, ("gem_rint: done sc->sc_rx_cons %d, complete %d\n", | |||
1032 | sc->sc_rx_cons, bus_space_read_4(t, h, GEM_RX_COMPLETION))); | |||
1033 | ||||
1034 | return (1); | |||
1035 | } | |||
1036 | ||||
1037 | void | |||
1038 | gem_fill_rx_ring(struct gem_softc *sc) | |||
1039 | { | |||
1040 | u_int slots; | |||
1041 | ||||
1042 | for (slots = if_rxr_get(&sc->sc_rx_ring, GEM_NRXDESC128 - 4); | |||
1043 | slots > 0; slots--) { | |||
1044 | if (gem_add_rxbuf(sc, sc->sc_rx_prod)) | |||
1045 | break; | |||
1046 | } | |||
1047 | if_rxr_put(&sc->sc_rx_ring, slots)do { (&sc->sc_rx_ring)->rxr_alive -= (slots); } while (0); | |||
1048 | } | |||
1049 | ||||
1050 | /* | |||
1051 | * Add a receive buffer to the indicated descriptor. | |||
1052 | */ | |||
1053 | int | |||
1054 | gem_add_rxbuf(struct gem_softc *sc, int idx) | |||
1055 | { | |||
1056 | struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; | |||
1057 | struct mbuf *m; | |||
1058 | int error; | |||
1059 | ||||
1060 | m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES)m_clget((((void *)0)), (0x0002), ((1 << 11))); | |||
1061 | if (!m) | |||
1062 | return (ENOBUFS55); | |||
1063 | m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = MCLBYTES(1 << 11); | |||
1064 | ||||
1065 | #ifdef GEM_DEBUG | |||
1066 | /* bzero the packet to check dma */ | |||
1067 | memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size)__builtin_memset((m->M_dat.MH.MH_dat.MH_ext.ext_buf), (0), (m->M_dat.MH.MH_dat.MH_ext.ext_size)); | |||
1068 | #endif | |||
1069 | ||||
1070 | rxs->rxs_mbuf = m; | |||
1071 | ||||
1072 | error = bus_dmamap_load_mbuf(sc->sc_dmatag, rxs->rxs_dmamap, m,(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag ), (rxs->rxs_dmamap), (m), (0x0200|0x0001)) | |||
1073 | BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag ), (rxs->rxs_dmamap), (m), (0x0200|0x0001)); | |||
1074 | if (error) { | |||
1075 | printf("%s: can't load rx DMA map %d, error = %d\n", | |||
1076 | sc->sc_dev.dv_xname, idx, error); | |||
1077 | panic("gem_add_rxbuf"); /* XXX */ | |||
1078 | } | |||
1079 | ||||
1080 | bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (rxs ->rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), ( 0x01)) | |||
1081 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (rxs ->rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), ( 0x01)); | |||
1082 | ||||
1083 | GEM_INIT_RXDESC(sc, idx)do { struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(idx)]; struct gem_desc *__rxd = &sc->sc_control_data->gcd_rxdescs [(idx)]; struct mbuf *__m = __rxs->rxs_mbuf; ((((sc))-> sc_pci) ? (*(__uint64_t *)((&__rxd->gd_addr)) = ((__uint64_t )((__rxs->rxs_dmamap->dm_segs[0].ds_addr)))) : (*(__uint64_t *)((&__rxd->gd_addr)) = (__uint64_t)(__builtin_constant_p ((__rxs->rxs_dmamap->dm_segs[0].ds_addr)) ? (__uint64_t )((((__uint64_t)((__rxs->rxs_dmamap->dm_segs[0].ds_addr )) & 0xff) << 56) | ((__uint64_t)((__rxs->rxs_dmamap ->dm_segs[0].ds_addr)) & 0xff00ULL) << 40 | ((__uint64_t )((__rxs->rxs_dmamap->dm_segs[0].ds_addr)) & 0xff0000ULL ) << 24 | ((__uint64_t)((__rxs->rxs_dmamap->dm_segs [0].ds_addr)) & 0xff000000ULL) << 8 | ((__uint64_t) ((__rxs->rxs_dmamap->dm_segs[0].ds_addr)) & 0xff00000000ULL ) >> 8 | ((__uint64_t)((__rxs->rxs_dmamap->dm_segs [0].ds_addr)) & 0xff0000000000ULL) >> 24 | ((__uint64_t )((__rxs->rxs_dmamap->dm_segs[0].ds_addr)) & 0xff000000000000ULL ) >> 40 | ((__uint64_t)((__rxs->rxs_dmamap->dm_segs [0].ds_addr)) & 0xff00000000000000ULL) >> 56) : __swap64md ((__rxs->rxs_dmamap->dm_segs[0].ds_addr))))); ((((sc))-> sc_pci) ? (*(__uint64_t *)((&__rxd->gd_flags)) = ((__uint64_t )(((((__m->M_dat.MH.MH_dat.MH_ext.ext_size)<<16) & 0x000000007fff0000LL) | 0x0000000080000000LL)))) : (*(__uint64_t *)((&__rxd->gd_flags)) = (__uint64_t)(__builtin_constant_p (((((__m->M_dat.MH.MH_dat.MH_ext.ext_size)<<16) & 0x000000007fff0000LL) | 0x0000000080000000LL)) ? (__uint64_t )((((__uint64_t)(((((__m->M_dat.MH.MH_dat.MH_ext.ext_size) <<16) & 0x000000007fff0000LL) | 0x0000000080000000LL )) & 0xff) << 56) | ((__uint64_t)(((((__m->M_dat .MH.MH_dat.MH_ext.ext_size)<<16) & 0x000000007fff0000LL ) | 0x0000000080000000LL)) & 0xff00ULL) << 40 | ((__uint64_t )(((((__m->M_dat.MH.MH_dat.MH_ext.ext_size)<<16) & 0x000000007fff0000LL) | 0x0000000080000000LL)) & 0xff0000ULL ) << 24 | ((__uint64_t)(((((__m->M_dat.MH.MH_dat.MH_ext .ext_size)<<16) & 0x000000007fff0000LL) | 0x0000000080000000LL )) & 0xff000000ULL) << 8 | ((__uint64_t)(((((__m-> M_dat.MH.MH_dat.MH_ext.ext_size)<<16) & 0x000000007fff0000LL ) | 0x0000000080000000LL)) & 0xff00000000ULL) >> 8 | ((__uint64_t)(((((__m->M_dat.MH.MH_dat.MH_ext.ext_size)<< 16) & 0x000000007fff0000LL) | 0x0000000080000000LL)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(((((__m->M_dat .MH.MH_dat.MH_ext.ext_size)<<16) & 0x000000007fff0000LL ) | 0x0000000080000000LL)) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(((((__m->M_dat.MH.MH_dat.MH_ext.ext_size )<<16) & 0x000000007fff0000LL) | 0x0000000080000000LL )) & 0xff00000000000000ULL) >> 56) : __swap64md(((( (__m->M_dat.MH.MH_dat.MH_ext.ext_size)<<16) & 0x000000007fff0000LL ) | 0x0000000080000000LL))))); (*(((sc))->sc_dmatag)->_dmamap_sync )((((sc))->sc_dmatag), (((sc))->sc_cddmamap), (__builtin_offsetof (struct gem_control_data, gcd_rxdescs[(((idx)))])), (sizeof(struct gem_desc)), ((0x01|0x04))); } while (0); | |||
1084 | ||||
1085 | sc->sc_rx_prod = GEM_NEXTRX(sc->sc_rx_prod)((sc->sc_rx_prod + 1) & (128 - 1)); | |||
1086 | ||||
1087 | return (0); | |||
1088 | } | |||
1089 | ||||
1090 | int | |||
1091 | gem_eint(struct gem_softc *sc, u_int status) | |||
1092 | { | |||
1093 | if ((status & GEM_INTR_MIF0x000020000) != 0) { | |||
1094 | #ifdef GEM_DEBUG | |||
1095 | printf("%s: link status changed\n", sc->sc_dev.dv_xname); | |||
1096 | #endif | |||
1097 | return (1); | |||
1098 | } | |||
1099 | ||||
1100 | printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, GEM_INTR_BITS"\020" "\1INTME\2TXEMPTY\3TXDONE" "\5RXDONE\6RXNOBUF\7RX_TAG_ERR" "\16PCS\17TXMAC\20RXMAC" "\21MACCONTROL\22MIF\23BERR"); | |||
1101 | return (1); | |||
1102 | } | |||
1103 | ||||
1104 | int | |||
1105 | gem_pint(struct gem_softc *sc) | |||
1106 | { | |||
1107 | bus_space_tag_t t = sc->sc_bustag; | |||
1108 | bus_space_handle_t seb = sc->sc_h1; | |||
1109 | u_int32_t status; | |||
1110 | ||||
1111 | status = bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS)((t)->read_4((seb), (0x9018))); | |||
1112 | status |= bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS)((t)->read_4((seb), (0x9018))); | |||
1113 | #ifdef GEM_DEBUG | |||
1114 | if (status) | |||
1115 | printf("%s: link status changed\n", sc->sc_dev.dv_xname); | |||
1116 | #endif | |||
1117 | return (1); | |||
1118 | } | |||
1119 | ||||
1120 | int | |||
1121 | gem_intr(void *v) | |||
1122 | { | |||
1123 | struct gem_softc *sc = (struct gem_softc *)v; | |||
1124 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
1125 | bus_space_tag_t t = sc->sc_bustag; | |||
1126 | bus_space_handle_t seb = sc->sc_h1; | |||
1127 | u_int32_t status; | |||
1128 | int r = 0; | |||
1129 | ||||
1130 | status = bus_space_read_4(t, seb, GEM_STATUS)((t)->read_4((seb), (0x000c))); | |||
1131 | DPRINTF(sc, ("%s: gem_intr: cplt %xstatus %b\n", | |||
1132 | sc->sc_dev.dv_xname, (status>>19), status, GEM_INTR_BITS)); | |||
1133 | ||||
1134 | if (status == 0xffffffff) | |||
1135 | return (0); | |||
1136 | ||||
1137 | if ((status & GEM_INTR_PCS0x000002000) != 0) | |||
1138 | r |= gem_pint(sc); | |||
1139 | ||||
1140 | if ((status & (GEM_INTR_RX_TAG_ERR0x000000040 | GEM_INTR_BERR0x000040000)) != 0) | |||
1141 | r |= gem_eint(sc, status); | |||
1142 | ||||
1143 | if ((status & (GEM_INTR_TX_EMPTY0x000000002 | GEM_INTR_TX_INTME0x000000001)) != 0) | |||
1144 | r |= gem_tint(sc, status); | |||
1145 | ||||
1146 | if ((status & (GEM_INTR_RX_DONE0x000000010 | GEM_INTR_RX_NOBUF0x000000020)) != 0) | |||
1147 | r |= gem_rint(sc); | |||
1148 | ||||
1149 | /* We should eventually do more than just print out error stats. */ | |||
1150 | if (status & GEM_INTR_TX_MAC0x000004000) { | |||
1151 | int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS)((t)->read_4((seb), (0x6010))); | |||
1152 | #ifdef GEM_DEBUG | |||
1153 | if (txstat & ~GEM_MAC_TX_XMIT_DONE0x00000001) | |||
1154 | printf("%s: MAC tx fault, status %x\n", | |||
1155 | sc->sc_dev.dv_xname, txstat); | |||
1156 | #endif | |||
1157 | if (txstat & (GEM_MAC_TX_UNDERRUN0x00000002 | GEM_MAC_TX_PKT_TOO_LONG0x00000004)) { | |||
1158 | KERNEL_LOCK()_kernel_lock(); | |||
1159 | gem_init(ifp); | |||
1160 | KERNEL_UNLOCK()_kernel_unlock(); | |||
1161 | } | |||
1162 | } | |||
1163 | if (status & GEM_INTR_RX_MAC0x000008000) { | |||
1164 | int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS)((t)->read_4((seb), (0x6014))); | |||
1165 | #ifdef GEM_DEBUG | |||
1166 | if (rxstat & ~GEM_MAC_RX_DONE0x00000001) | |||
1167 | printf("%s: MAC rx fault, status %x\n", | |||
1168 | sc->sc_dev.dv_xname, rxstat); | |||
1169 | #endif | |||
1170 | if (rxstat & GEM_MAC_RX_OVERFLOW0x00000002) { | |||
1171 | ifp->if_ierrorsif_data.ifi_ierrors++; | |||
1172 | ||||
1173 | /* | |||
1174 | * Apparently a silicon bug causes ERI to hang | |||
1175 | * from time to time. So if we detect an RX | |||
1176 | * FIFO overflow, we fire off a timer, and | |||
1177 | * check whether we're still making progress | |||
1178 | * by looking at the RX FIFO write and read | |||
1179 | * pointers. | |||
1180 | */ | |||
1181 | sc->sc_rx_fifo_wr_ptr = | |||
1182 | bus_space_read_4(t, seb, GEM_RX_FIFO_WR_PTR)((t)->read_4((seb), (0x400c))); | |||
1183 | sc->sc_rx_fifo_rd_ptr = | |||
1184 | bus_space_read_4(t, seb, GEM_RX_FIFO_RD_PTR)((t)->read_4((seb), (0x4014))); | |||
1185 | timeout_add_msec(&sc->sc_rx_watchdog, 400); | |||
1186 | } | |||
1187 | #ifdef GEM_DEBUG | |||
1188 | else if (rxstat & ~(GEM_MAC_RX_DONE0x00000001 | GEM_MAC_RX_FRAME_CNT0x00000004)) | |||
1189 | printf("%s: MAC rx fault, status %x\n", | |||
1190 | sc->sc_dev.dv_xname, rxstat); | |||
1191 | #endif | |||
1192 | } | |||
1193 | return (r); | |||
1194 | } | |||
1195 | ||||
1196 | void | |||
1197 | gem_rx_watchdog(void *arg) | |||
1198 | { | |||
1199 | struct gem_softc *sc = arg; | |||
1200 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
1201 | bus_space_tag_t t = sc->sc_bustag; | |||
1202 | bus_space_handle_t h = sc->sc_h1; | |||
1203 | u_int32_t rx_fifo_wr_ptr; | |||
1204 | u_int32_t rx_fifo_rd_ptr; | |||
1205 | u_int32_t state; | |||
1206 | ||||
1207 | if ((ifp->if_flags & IFF_RUNNING0x40) == 0) | |||
1208 | return; | |||
1209 | ||||
1210 | rx_fifo_wr_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_WR_PTR)((t)->read_4((h), (0x400c))); | |||
1211 | rx_fifo_rd_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_RD_PTR)((t)->read_4((h), (0x4014))); | |||
1212 | state = bus_space_read_4(t, h, GEM_MAC_MAC_STATE)((t)->read_4((h), (0x6134))); | |||
1213 | if ((state & GEM_MAC_STATE_OVERFLOW0x03800000) == GEM_MAC_STATE_OVERFLOW0x03800000) { | |||
1214 | if ((rx_fifo_wr_ptr == rx_fifo_rd_ptr) || | |||
1215 | ((sc->sc_rx_fifo_wr_ptr == rx_fifo_wr_ptr) && | |||
1216 | (sc->sc_rx_fifo_rd_ptr == rx_fifo_rd_ptr))) { | |||
1217 | /* | |||
1218 | * The RX state machine is still in overflow state and | |||
1219 | * the RX FIFO write and read pointers seem to be | |||
1220 | * stuck. Whack the chip over the head to get things | |||
1221 | * going again. | |||
1222 | */ | |||
1223 | gem_init(ifp); | |||
1224 | } else { | |||
1225 | /* | |||
1226 | * We made some progress, but is not certain that the | |||
1227 | * overflow condition has been resolved. Check again. | |||
1228 | */ | |||
1229 | sc->sc_rx_fifo_wr_ptr = rx_fifo_wr_ptr; | |||
1230 | sc->sc_rx_fifo_rd_ptr = rx_fifo_rd_ptr; | |||
1231 | timeout_add_msec(&sc->sc_rx_watchdog, 400); | |||
1232 | } | |||
1233 | } | |||
1234 | } | |||
1235 | ||||
1236 | void | |||
1237 | gem_watchdog(struct ifnet *ifp) | |||
1238 | { | |||
1239 | struct gem_softc *sc = ifp->if_softc; | |||
1240 | ||||
1241 | DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " | |||
1242 | "GEM_MAC_RX_CONFIG %x\n", | |||
1243 | bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_CONFIG), | |||
1244 | bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_STATUS), | |||
1245 | bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_CONFIG))); | |||
1246 | ||||
1247 | log(LOG_ERR3, "%s: device timeout\n", sc->sc_dev.dv_xname); | |||
1248 | ++ifp->if_oerrorsif_data.ifi_oerrors; | |||
1249 | ||||
1250 | /* Try to get more packets going. */ | |||
1251 | gem_init(ifp); | |||
1252 | } | |||
1253 | ||||
1254 | /* | |||
1255 | * Initialize the MII Management Interface | |||
1256 | */ | |||
1257 | void | |||
1258 | gem_mifinit(struct gem_softc *sc) | |||
1259 | { | |||
1260 | bus_space_tag_t t = sc->sc_bustag; | |||
1261 | bus_space_handle_t mif = sc->sc_h1; | |||
1262 | ||||
1263 | /* Configure the MIF in frame mode */ | |||
1264 | sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG)((t)->read_4((mif), (0x6210))); | |||
1265 | sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA0x00000004; | |||
1266 | bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config)((t)->write_4((mif), (0x6210), (sc->sc_mif_config))); | |||
1267 | } | |||
1268 | ||||
1269 | /* | |||
1270 | * MII interface | |||
1271 | * | |||
1272 | * The GEM MII interface supports at least three different operating modes: | |||
1273 | * | |||
1274 | * Bitbang mode is implemented using data, clock and output enable registers. | |||
1275 | * | |||
1276 | * Frame mode is implemented by loading a complete frame into the frame | |||
1277 | * register and polling the valid bit for completion. | |||
1278 | * | |||
1279 | * Polling mode uses the frame register but completion is indicated by | |||
1280 | * an interrupt. | |||
1281 | * | |||
1282 | */ | |||
1283 | int | |||
1284 | gem_mii_readreg(struct device *self, int phy, int reg) | |||
1285 | { | |||
1286 | struct gem_softc *sc = (void *)self; | |||
1287 | bus_space_tag_t t = sc->sc_bustag; | |||
1288 | bus_space_handle_t mif = sc->sc_h1; | |||
1289 | int n; | |||
1290 | u_int32_t v; | |||
1291 | ||||
1292 | #ifdef GEM_DEBUG | |||
1293 | if (sc->sc_debug) | |||
1294 | printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); | |||
1295 | #endif | |||
1296 | ||||
1297 | /* Construct the frame command */ | |||
1298 | v = (reg << GEM_MIF_REG_SHIFT18) | (phy << GEM_MIF_PHY_SHIFT23) | | |||
1299 | GEM_MIF_FRAME_READ0x60020000; | |||
1300 | ||||
1301 | bus_space_write_4(t, mif, GEM_MIF_FRAME, v)((t)->write_4((mif), (0x620c), (v))); | |||
1302 | for (n = 0; n < 100; n++) { | |||
1303 | DELAY(1)(*delay_func)(1); | |||
1304 | v = bus_space_read_4(t, mif, GEM_MIF_FRAME)((t)->read_4((mif), (0x620c))); | |||
1305 | if (v & GEM_MIF_FRAME_TA00x00010000) | |||
1306 | return (v & GEM_MIF_FRAME_DATA0x0000ffff); | |||
1307 | } | |||
1308 | ||||
1309 | printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname); | |||
1310 | return (0); | |||
1311 | } | |||
1312 | ||||
1313 | void | |||
1314 | gem_mii_writereg(struct device *self, int phy, int reg, int val) | |||
1315 | { | |||
1316 | struct gem_softc *sc = (void *)self; | |||
1317 | bus_space_tag_t t = sc->sc_bustag; | |||
1318 | bus_space_handle_t mif = sc->sc_h1; | |||
1319 | int n; | |||
1320 | u_int32_t v; | |||
1321 | ||||
1322 | #ifdef GEM_DEBUG | |||
1323 | if (sc->sc_debug) | |||
1324 | printf("gem_mii_writereg: phy %d reg %d val %x\n", | |||
1325 | phy, reg, val); | |||
1326 | #endif | |||
1327 | ||||
1328 | /* Construct the frame command */ | |||
1329 | v = GEM_MIF_FRAME_WRITE0x50020000 | | |||
1330 | (phy << GEM_MIF_PHY_SHIFT23) | | |||
1331 | (reg << GEM_MIF_REG_SHIFT18) | | |||
1332 | (val & GEM_MIF_FRAME_DATA0x0000ffff); | |||
1333 | ||||
1334 | bus_space_write_4(t, mif, GEM_MIF_FRAME, v)((t)->write_4((mif), (0x620c), (v))); | |||
1335 | for (n = 0; n < 100; n++) { | |||
1336 | DELAY(1)(*delay_func)(1); | |||
1337 | v = bus_space_read_4(t, mif, GEM_MIF_FRAME)((t)->read_4((mif), (0x620c))); | |||
1338 | if (v & GEM_MIF_FRAME_TA00x00010000) | |||
1339 | return; | |||
1340 | } | |||
1341 | ||||
1342 | printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname); | |||
1343 | } | |||
1344 | ||||
1345 | void | |||
1346 | gem_mii_statchg(struct device *dev) | |||
1347 | { | |||
1348 | struct gem_softc *sc = (void *)dev; | |||
1349 | #ifdef GEM_DEBUG | |||
1350 | uint64_t instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media)(((sc->sc_mii.mii_media.ifm_cur->ifm_media) & 0xff00000000000000ULL ) >> 56); | |||
1351 | #endif | |||
1352 | bus_space_tag_t t = sc->sc_bustag; | |||
1353 | bus_space_handle_t mac = sc->sc_h1; | |||
1354 | u_int32_t v; | |||
1355 | ||||
1356 | #ifdef GEM_DEBUG | |||
1357 | if (sc->sc_debug) | |||
1358 | printf("gem_mii_statchg: status change: phy = %lld\n", instance); | |||
1359 | #endif | |||
1360 | ||||
1361 | /* Set tx full duplex options */ | |||
1362 | bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0)((t)->write_4((mac), (0x6030), (0))); | |||
1363 | delay(10000)(*delay_func)(10000); /* reg must be cleared and delay before changing. */ | |||
1364 | v = GEM_MAC_TX_ENA_IPG00x00000008|GEM_MAC_TX_NGU0x00000010|GEM_MAC_TX_NGU_LIMIT0x00000020| | |||
1365 | GEM_MAC_TX_ENABLE0x00000001; | |||
1366 | if ((IFM_OPTIONS(sc->sc_mii.mii_media_active)((sc->sc_mii.mii_media_active) & (0x00000000ffff0000ULL |0x00ffff0000000000ULL)) & IFM_FDX0x0000010000000000ULL) != 0) { | |||
1367 | v |= GEM_MAC_TX_IGN_CARRIER0x00000002|GEM_MAC_TX_IGN_COLLIS0x00000004; | |||
1368 | } | |||
1369 | bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v)((t)->write_4((mac), (0x6030), (v))); | |||
1370 | ||||
1371 | /* XIF Configuration */ | |||
1372 | v = GEM_MAC_XIF_TX_MII_ENA0x00000001; | |||
1373 | v |= GEM_MAC_XIF_LINK_LED0x00000020; | |||
1374 | ||||
1375 | /* External MII needs echo disable if half duplex. */ | |||
1376 | if ((IFM_OPTIONS(sc->sc_mii.mii_media_active)((sc->sc_mii.mii_media_active) & (0x00000000ffff0000ULL |0x00ffff0000000000ULL)) & IFM_FDX0x0000010000000000ULL) != 0) | |||
1377 | /* turn on full duplex LED */ | |||
1378 | v |= GEM_MAC_XIF_FDPLX_LED0x00000040; | |||
1379 | else | |||
1380 | /* half duplex -- disable echo */ | |||
1381 | v |= GEM_MAC_XIF_ECHO_DISABL0x00000004; | |||
1382 | ||||
1383 | switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)((sc->sc_mii.mii_media_active) & 0x00000000000000ffULL )) { | |||
1384 | case IFM_1000_T16: /* Gigabit using GMII interface */ | |||
1385 | case IFM_1000_SX11: | |||
1386 | v |= GEM_MAC_XIF_GMII_MODE0x00000008; | |||
1387 | break; | |||
1388 | default: | |||
1389 | v &= ~GEM_MAC_XIF_GMII_MODE0x00000008; | |||
1390 | } | |||
1391 | bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v)((t)->write_4((mac), (0x603c), (v))); | |||
1392 | ||||
1393 | /* | |||
1394 | * 802.3x flow control | |||
1395 | */ | |||
1396 | v = bus_space_read_4(t, mac, GEM_MAC_CONTROL_CONFIG)((t)->read_4((mac), (0x6038))); | |||
1397 | v &= ~(GEM_MAC_CC_RX_PAUSE0x00000002 | GEM_MAC_CC_TX_PAUSE0x00000001); | |||
1398 | if ((IFM_OPTIONS(sc->sc_mii.mii_media_active)((sc->sc_mii.mii_media_active) & (0x00000000ffff0000ULL |0x00ffff0000000000ULL)) & IFM_ETH_RXPAUSE0x0000000000020000ULL) != 0) | |||
1399 | v |= GEM_MAC_CC_RX_PAUSE0x00000002; | |||
1400 | if ((IFM_OPTIONS(sc->sc_mii.mii_media_active)((sc->sc_mii.mii_media_active) & (0x00000000ffff0000ULL |0x00ffff0000000000ULL)) & IFM_ETH_TXPAUSE0x0000000000040000ULL) != 0) | |||
1401 | v |= GEM_MAC_CC_TX_PAUSE0x00000001; | |||
1402 | bus_space_write_4(t, mac, GEM_MAC_CONTROL_CONFIG, v)((t)->write_4((mac), (0x6038), (v))); | |||
1403 | } | |||
1404 | ||||
1405 | int | |||
1406 | gem_pcs_readreg(struct device *self, int phy, int reg) | |||
1407 | { | |||
1408 | struct gem_softc *sc = (void *)self; | |||
1409 | bus_space_tag_t t = sc->sc_bustag; | |||
1410 | bus_space_handle_t pcs = sc->sc_h1; | |||
1411 | ||||
1412 | #ifdef GEM_DEBUG | |||
1413 | if (sc->sc_debug) | |||
1414 | printf("gem_pcs_readreg: phy %d reg %d\n", phy, reg); | |||
1415 | #endif | |||
1416 | ||||
1417 | if (phy != GEM_PHYAD_EXTERNAL0) | |||
1418 | return (0); | |||
1419 | ||||
1420 | switch (reg) { | |||
1421 | case MII_BMCR0x00: | |||
1422 | reg = GEM_MII_CONTROL0x9000; | |||
1423 | break; | |||
1424 | case MII_BMSR0x01: | |||
1425 | reg = GEM_MII_STATUS0x9004; | |||
1426 | break; | |||
1427 | case MII_ANAR0x04: | |||
1428 | reg = GEM_MII_ANAR0x9008; | |||
1429 | break; | |||
1430 | case MII_ANLPAR0x05: | |||
1431 | reg = GEM_MII_ANLPAR0x900c; | |||
1432 | break; | |||
1433 | case MII_EXTSR0x0f: | |||
1434 | return (EXTSR_1000XFDX0x8000|EXTSR_1000XHDX0x4000); | |||
1435 | default: | |||
1436 | return (0); | |||
1437 | } | |||
1438 | ||||
1439 | return bus_space_read_4(t, pcs, reg)((t)->read_4((pcs), (reg))); | |||
1440 | } | |||
1441 | ||||
1442 | void | |||
1443 | gem_pcs_writereg(struct device *self, int phy, int reg, int val) | |||
1444 | { | |||
1445 | struct gem_softc *sc = (void *)self; | |||
1446 | bus_space_tag_t t = sc->sc_bustag; | |||
1447 | bus_space_handle_t pcs = sc->sc_h1; | |||
1448 | int reset = 0; | |||
1449 | ||||
1450 | #ifdef GEM_DEBUG | |||
1451 | if (sc->sc_debug) | |||
1452 | printf("gem_pcs_writereg: phy %d reg %d val %x\n", | |||
1453 | phy, reg, val); | |||
1454 | #endif | |||
1455 | ||||
1456 | if (phy != GEM_PHYAD_EXTERNAL0) | |||
1457 | return; | |||
1458 | ||||
1459 | if (reg == MII_ANAR0x04) | |||
1460 | bus_space_write_4(t, pcs, GEM_MII_CONFIG, 0)((t)->write_4((pcs), (0x9010), (0))); | |||
1461 | ||||
1462 | switch (reg) { | |||
1463 | case MII_BMCR0x00: | |||
1464 | reset = (val & GEM_MII_CONTROL_RESET0x00008000); | |||
1465 | reg = GEM_MII_CONTROL0x9000; | |||
1466 | break; | |||
1467 | case MII_BMSR0x01: | |||
1468 | reg = GEM_MII_STATUS0x9004; | |||
1469 | break; | |||
1470 | case MII_ANAR0x04: | |||
1471 | reg = GEM_MII_ANAR0x9008; | |||
1472 | break; | |||
1473 | case MII_ANLPAR0x05: | |||
1474 | reg = GEM_MII_ANLPAR0x900c; | |||
1475 | break; | |||
1476 | default: | |||
1477 | return; | |||
1478 | } | |||
1479 | ||||
1480 | bus_space_write_4(t, pcs, reg, val)((t)->write_4((pcs), (reg), (val))); | |||
1481 | ||||
1482 | if (reset) | |||
1483 | gem_bitwait(sc, pcs, GEM_MII_CONTROL0x9000, GEM_MII_CONTROL_RESET0x00008000, 0); | |||
1484 | ||||
1485 | if (reg == GEM_MII_ANAR0x9008 || reset) { | |||
1486 | bus_space_write_4(t, pcs, GEM_MII_SLINK_CONTROL,((t)->write_4((pcs), (0x9054), (0x00000001|0x00000002))) | |||
1487 | GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D)((t)->write_4((pcs), (0x9054), (0x00000001|0x00000002))); | |||
1488 | bus_space_write_4(t, pcs, GEM_MII_CONFIG,((t)->write_4((pcs), (0x9010), (0x00000001))) | |||
1489 | GEM_MII_CONFIG_ENABLE)((t)->write_4((pcs), (0x9010), (0x00000001))); | |||
1490 | } | |||
1491 | } | |||
1492 | ||||
1493 | int | |||
1494 | gem_mediachange(struct ifnet *ifp) | |||
1495 | { | |||
1496 | struct gem_softc *sc = ifp->if_softc; | |||
1497 | struct mii_data *mii = &sc->sc_mii; | |||
1498 | ||||
1499 | if (mii->mii_instance) { | |||
1500 | struct mii_softc *miisc; | |||
1501 | LIST_FOREACH(miisc, &mii->mii_phys, mii_list)for((miisc) = ((&mii->mii_phys)->lh_first); (miisc) != ((void *)0); (miisc) = ((miisc)->mii_list.le_next)) | |||
1502 | mii_phy_reset(miisc); | |||
1503 | } | |||
1504 | ||||
1505 | return (mii_mediachg(&sc->sc_mii)); | |||
1506 | } | |||
1507 | ||||
1508 | void | |||
1509 | gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) | |||
1510 | { | |||
1511 | struct gem_softc *sc = ifp->if_softc; | |||
1512 | ||||
1513 | mii_pollstat(&sc->sc_mii); | |||
1514 | ifmr->ifm_active = sc->sc_mii.mii_media_active; | |||
1515 | ifmr->ifm_status = sc->sc_mii.mii_media_status; | |||
1516 | } | |||
1517 | ||||
1518 | /* | |||
1519 | * Process an ioctl request. | |||
1520 | */ | |||
1521 | int | |||
1522 | gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) | |||
1523 | { | |||
1524 | struct gem_softc *sc = ifp->if_softc; | |||
1525 | struct ifreq *ifr = (struct ifreq *)data; | |||
1526 | int s, error = 0; | |||
1527 | ||||
1528 | s = splnet()splraise(0x7); | |||
1529 | ||||
1530 | switch (cmd) { | |||
1531 | case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((12))): | |||
1532 | ifp->if_flags |= IFF_UP0x1; | |||
1533 | if ((ifp->if_flags & IFF_RUNNING0x40) == 0) | |||
1534 | gem_init(ifp); | |||
1535 | break; | |||
1536 | ||||
1537 | case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))): | |||
1538 | if (ifp->if_flags & IFF_UP0x1) { | |||
1539 | if (ifp->if_flags & IFF_RUNNING0x40) | |||
1540 | error = ENETRESET52; | |||
1541 | else | |||
1542 | gem_init(ifp); | |||
1543 | } else { | |||
1544 | if (ifp->if_flags & IFF_RUNNING0x40) | |||
1545 | gem_stop(ifp, 0); | |||
1546 | } | |||
1547 | #ifdef GEM_DEBUG | |||
1548 | sc->sc_debug = (ifp->if_flags & IFF_DEBUG0x4) != 0 ? 1 : 0; | |||
1549 | #endif | |||
1550 | break; | |||
1551 | ||||
1552 | case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifmediareq) & 0x1fff) << 16) | ((('i')) << 8) | ((56))): | |||
1553 | case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((55))): | |||
1554 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_mediasc_mii.mii_media, cmd); | |||
1555 | break; | |||
1556 | ||||
1557 | case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((170))): | |||
1558 | error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data, | |||
1559 | NULL((void *)0), MCLBYTES(1 << 11), &sc->sc_rx_ring); | |||
1560 | break; | |||
1561 | ||||
1562 | default: | |||
1563 | error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); | |||
1564 | } | |||
1565 | ||||
1566 | if (error == ENETRESET52) { | |||
1567 | if (ifp->if_flags & IFF_RUNNING0x40) | |||
1568 | gem_iff(sc); | |||
1569 | error = 0; | |||
1570 | } | |||
1571 | ||||
1572 | splx(s)spllower(s); | |||
1573 | return (error); | |||
1574 | } | |||
1575 | ||||
1576 | void | |||
1577 | gem_iff(struct gem_softc *sc) | |||
1578 | { | |||
1579 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
1580 | struct arpcom *ac = &sc->sc_arpcom; | |||
1581 | struct ether_multi *enm; | |||
1582 | struct ether_multistep step; | |||
1583 | bus_space_tag_t t = sc->sc_bustag; | |||
1584 | bus_space_handle_t h = sc->sc_h1; | |||
1585 | u_int32_t crc, hash[16], rxcfg; | |||
1586 | int i; | |||
1587 | ||||
1588 | rxcfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG)((t)->read_4((h), (0x6034))); | |||
1589 | rxcfg &= ~(GEM_MAC_RX_HASH_FILTER0x00000020 | GEM_MAC_RX_PROMISCUOUS0x00000008 | | |||
1590 | GEM_MAC_RX_PROMISC_GRP0x00000010); | |||
1591 | ifp->if_flags &= ~IFF_ALLMULTI0x200; | |||
1592 | ||||
1593 | if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0) { | |||
1594 | ifp->if_flags |= IFF_ALLMULTI0x200; | |||
1595 | if (ifp->if_flags & IFF_PROMISC0x100) | |||
1596 | rxcfg |= GEM_MAC_RX_PROMISCUOUS0x00000008; | |||
1597 | else | |||
1598 | rxcfg |= GEM_MAC_RX_PROMISC_GRP0x00000010; | |||
1599 | } else { | |||
1600 | /* | |||
1601 | * Set up multicast address filter by passing all multicast | |||
1602 | * addresses through a crc generator, and then using the | |||
1603 | * high order 8 bits as an index into the 256 bit logical | |||
1604 | * address filter. The high order 4 bits selects the word, | |||
1605 | * while the other 4 bits select the bit within the word | |||
1606 | * (where bit 0 is the MSB). | |||
1607 | */ | |||
1608 | ||||
1609 | rxcfg |= GEM_MAC_RX_HASH_FILTER0x00000020; | |||
1610 | ||||
1611 | /* Clear hash table */ | |||
1612 | for (i = 0; i < 16; i++) | |||
1613 | hash[i] = 0; | |||
1614 | ||||
1615 | ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first ); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step )).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while ( 0); | |||
1616 | while (enm != NULL((void *)0)) { | |||
1617 | crc = ether_crc32_le(enm->enm_addrlo, | |||
1618 | ETHER_ADDR_LEN6); | |||
1619 | ||||
1620 | /* Just want the 8 most significant bits. */ | |||
1621 | crc >>= 24; | |||
1622 | ||||
1623 | /* Set the corresponding bit in the filter. */ | |||
1624 | hash[crc >> 4] |= 1 << (15 - (crc & 15)); | |||
1625 | ||||
1626 | ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm = (((enm))->enm_list.le_next); } while ( 0); | |||
1627 | } | |||
1628 | ||||
1629 | /* Now load the hash table into the chip (if we are using it) */ | |||
1630 | for (i = 0; i < 16; i++) { | |||
1631 | bus_space_write_4(t, h,((t)->write_4((h), (0x60c0 + i * (0x60c4 - 0x60c0)), (hash [i]))) | |||
1632 | GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0),((t)->write_4((h), (0x60c0 + i * (0x60c4 - 0x60c0)), (hash [i]))) | |||
1633 | hash[i])((t)->write_4((h), (0x60c0 + i * (0x60c4 - 0x60c0)), (hash [i]))); | |||
1634 | } | |||
1635 | } | |||
1636 | ||||
1637 | bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, rxcfg)((t)->write_4((h), (0x6034), (rxcfg))); | |||
1638 | } | |||
1639 | ||||
1640 | /* | |||
1641 | * Transmit interrupt. | |||
1642 | */ | |||
1643 | int | |||
1644 | gem_tint(struct gem_softc *sc, u_int32_t status) | |||
1645 | { | |||
1646 | struct ifnet *ifp = &sc->sc_arpcom.ac_if; | |||
1647 | struct gem_sxd *sd; | |||
1648 | u_int32_t cons, prod; | |||
1649 | int free = 0; | |||
1650 | ||||
1651 | prod = status >> 19; | |||
1652 | cons = sc->sc_tx_cons; | |||
1653 | while (cons != prod) { | |||
1654 | sd = &sc->sc_txd[cons]; | |||
1655 | if (sd->sd_mbuf != NULL((void *)0)) { | |||
1656 | bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sd ->sd_map), (0), (sd->sd_map->dm_mapsize), (0x08)) | |||
1657 | sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sd ->sd_map), (0), (sd->sd_map->dm_mapsize), (0x08)); | |||
1658 | bus_dmamap_unload(sc->sc_dmatag, sd->sd_map)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), ( sd->sd_map)); | |||
1659 | m_freem(sd->sd_mbuf); | |||
1660 | sd->sd_mbuf = NULL((void *)0); | |||
1661 | } | |||
1662 | ||||
1663 | free = 1; | |||
1664 | ||||
1665 | cons++; | |||
1666 | cons &= GEM_NTXDESC(64 * 16) - 1; | |||
1667 | } | |||
1668 | ||||
1669 | if (free == 0) | |||
1670 | return (0); | |||
1671 | ||||
1672 | sc->sc_tx_cons = cons; | |||
1673 | ||||
1674 | if (sc->sc_tx_prod == cons) | |||
1675 | ifp->if_timer = 0; | |||
1676 | ||||
1677 | if (ifq_is_oactive(&ifp->if_snd)) | |||
1678 | ifq_restart(&ifp->if_snd); | |||
1679 | ||||
1680 | return (1); | |||
1681 | } | |||
1682 | ||||
1683 | int | |||
1684 | gem_load_mbuf(struct gem_softc *sc, struct gem_sxd *sd, struct mbuf *m) | |||
1685 | { | |||
1686 | int error; | |||
1687 | ||||
1688 | error = bus_dmamap_load_mbuf(sc->sc_dmatag, sd->sd_map, m,(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag ), (sd->sd_map), (m), (0x0001)) | |||
1689 | BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag ), (sd->sd_map), (m), (0x0001)); | |||
1690 | switch (error) { | |||
1691 | case 0: | |||
1692 | break; | |||
1693 | ||||
1694 | case EFBIG27: /* mbuf chain is too fragmented */ | |||
1695 | if (m_defrag(m, M_DONTWAIT0x0002) == 0 && | |||
1696 | bus_dmamap_load_mbuf(sc->sc_dmatag, sd->sd_map, m,(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag ), (sd->sd_map), (m), (0x0001)) | |||
1697 | BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag ), (sd->sd_map), (m), (0x0001)) == 0) | |||
1698 | break; | |||
1699 | /* FALLTHROUGH */ | |||
1700 | default: | |||
1701 | return (1); | |||
1702 | } | |||
1703 | ||||
1704 | return (0); | |||
1705 | } | |||
1706 | ||||
1707 | void | |||
1708 | gem_start(struct ifqueue *ifq) | |||
1709 | { | |||
1710 | struct ifnet *ifp = ifq->ifq_if; | |||
1711 | struct gem_softc *sc = ifp->if_softc; | |||
1712 | struct gem_sxd *sd; | |||
1713 | struct mbuf *m; | |||
1714 | uint64_t flags, nflags; | |||
| ||||
1715 | bus_dmamap_t map; | |||
1716 | uint32_t prod; | |||
1717 | uint32_t free, used = 0; | |||
1718 | uint32_t first, last; | |||
1719 | int i; | |||
1720 | ||||
1721 | prod = sc->sc_tx_prod; | |||
1722 | ||||
1723 | /* figure out space */ | |||
1724 | free = sc->sc_tx_cons; | |||
1725 | if (free <= prod) | |||
1726 | free += GEM_NTXDESC(64 * 16); | |||
1727 | free -= prod; | |||
1728 | ||||
1729 | bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc ->sc_cddmamap), (0), (sizeof(struct gem_desc) * (64 * 16)) , (0x04)) | |||
1730 | 0, sizeof(struct gem_desc) * GEM_NTXDESC,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc ->sc_cddmamap), (0), (sizeof(struct gem_desc) * (64 * 16)) , (0x04)) | |||
1731 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc ->sc_cddmamap), (0), (sizeof(struct gem_desc) * (64 * 16)) , (0x04)); | |||
1732 | ||||
1733 | for (;;) { | |||
1734 | if (used + GEM_NTXSEGS16 + 1 > free) { | |||
1735 | ifq_set_oactive(&ifp->if_snd); | |||
1736 | break; | |||
1737 | } | |||
1738 | ||||
1739 | m = ifq_dequeue(ifq); | |||
1740 | if (m == NULL((void *)0)) | |||
1741 | break; | |||
1742 | ||||
1743 | first = prod; | |||
1744 | sd = &sc->sc_txd[first]; | |||
1745 | map = sd->sd_map; | |||
1746 | ||||
1747 | if (gem_load_mbuf(sc, sd, m)) { | |||
1748 | m_freem(m); | |||
1749 | ifp->if_oerrorsif_data.ifi_oerrors++; | |||
1750 | continue; | |||
1751 | } | |||
1752 | ||||
1753 | #if NBPFILTER1 > 0 | |||
1754 | if (ifp->if_bpf) | |||
1755 | bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1)); | |||
1756 | #endif | |||
1757 | ||||
1758 | bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (map ), (0), (map->dm_mapsize), (0x04)) | |||
1759 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (map ), (0), (map->dm_mapsize), (0x04)); | |||
1760 | ||||
1761 | nflags = GEM_TD_START_OF_PACKET0x0000000080000000LL; | |||
1762 | for (i = 0; i < map->dm_nsegs; i++) { | |||
1763 | flags = nflags | | |||
1764 | (map->dm_segs[i].ds_len & GEM_TD_BUFSIZE0x0000000000007fffLL); | |||
1765 | ||||
1766 | GEM_DMA_WRITE(sc, &sc->sc_txdescs[prod].gd_addr,(((sc)->sc_pci) ? (*(__uint64_t *)((&sc->sc_control_data ->gcd_txdescs[prod].gd_addr)) = ((__uint64_t)((map->dm_segs [i].ds_addr)))) : (*(__uint64_t *)((&sc->sc_control_data ->gcd_txdescs[prod].gd_addr)) = (__uint64_t)(__builtin_constant_p ((map->dm_segs[i].ds_addr)) ? (__uint64_t)((((__uint64_t)( (map->dm_segs[i].ds_addr)) & 0xff) << 56) | ((__uint64_t )((map->dm_segs[i].ds_addr)) & 0xff00ULL) << 40 | ((__uint64_t)((map->dm_segs[i].ds_addr)) & 0xff0000ULL ) << 24 | ((__uint64_t)((map->dm_segs[i].ds_addr)) & 0xff000000ULL) << 8 | ((__uint64_t)((map->dm_segs[i ].ds_addr)) & 0xff00000000ULL) >> 8 | ((__uint64_t) ((map->dm_segs[i].ds_addr)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)((map->dm_segs[i].ds_addr)) & 0xff000000000000ULL ) >> 40 | ((__uint64_t)((map->dm_segs[i].ds_addr)) & 0xff00000000000000ULL) >> 56) : __swap64md((map->dm_segs [i].ds_addr))))) | |||
1767 | map->dm_segs[i].ds_addr)(((sc)->sc_pci) ? (*(__uint64_t *)((&sc->sc_control_data ->gcd_txdescs[prod].gd_addr)) = ((__uint64_t)((map->dm_segs [i].ds_addr)))) : (*(__uint64_t *)((&sc->sc_control_data ->gcd_txdescs[prod].gd_addr)) = (__uint64_t)(__builtin_constant_p ((map->dm_segs[i].ds_addr)) ? (__uint64_t)((((__uint64_t)( (map->dm_segs[i].ds_addr)) & 0xff) << 56) | ((__uint64_t )((map->dm_segs[i].ds_addr)) & 0xff00ULL) << 40 | ((__uint64_t)((map->dm_segs[i].ds_addr)) & 0xff0000ULL ) << 24 | ((__uint64_t)((map->dm_segs[i].ds_addr)) & 0xff000000ULL) << 8 | ((__uint64_t)((map->dm_segs[i ].ds_addr)) & 0xff00000000ULL) >> 8 | ((__uint64_t) ((map->dm_segs[i].ds_addr)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)((map->dm_segs[i].ds_addr)) & 0xff000000000000ULL ) >> 40 | ((__uint64_t)((map->dm_segs[i].ds_addr)) & 0xff00000000000000ULL) >> 56) : __swap64md((map->dm_segs [i].ds_addr))))); | |||
1768 | GEM_DMA_WRITE(sc, &sc->sc_txdescs[prod].gd_flags,(((sc)->sc_pci) ? (*(__uint64_t *)((&sc->sc_control_data ->gcd_txdescs[prod].gd_flags)) = ((__uint64_t)((flags)))) : (*(__uint64_t *)((&sc->sc_control_data->gcd_txdescs [prod].gd_flags)) = (__uint64_t)(__builtin_constant_p((flags) ) ? (__uint64_t)((((__uint64_t)((flags)) & 0xff) << 56) | ((__uint64_t)((flags)) & 0xff00ULL) << 40 | ( (__uint64_t)((flags)) & 0xff0000ULL) << 24 | ((__uint64_t )((flags)) & 0xff000000ULL) << 8 | ((__uint64_t)((flags )) & 0xff00000000ULL) >> 8 | ((__uint64_t)((flags)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)((flags) ) & 0xff000000000000ULL) >> 40 | ((__uint64_t)((flags )) & 0xff00000000000000ULL) >> 56) : __swap64md((flags ))))) | |||
1769 | flags)(((sc)->sc_pci) ? (*(__uint64_t *)((&sc->sc_control_data ->gcd_txdescs[prod].gd_flags)) = ((__uint64_t)((flags)))) : (*(__uint64_t *)((&sc->sc_control_data->gcd_txdescs [prod].gd_flags)) = (__uint64_t)(__builtin_constant_p((flags) ) ? (__uint64_t)((((__uint64_t)((flags)) & 0xff) << 56) | ((__uint64_t)((flags)) & 0xff00ULL) << 40 | ( (__uint64_t)((flags)) & 0xff0000ULL) << 24 | ((__uint64_t )((flags)) & 0xff000000ULL) << 8 | ((__uint64_t)((flags )) & 0xff00000000ULL) >> 8 | ((__uint64_t)((flags)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)((flags) ) & 0xff000000000000ULL) >> 40 | ((__uint64_t)((flags )) & 0xff00000000000000ULL) >> 56) : __swap64md((flags ))))); | |||
1770 | ||||
1771 | last = prod; | |||
1772 | prod++; | |||
1773 | prod &= GEM_NTXDESC(64 * 16) - 1; | |||
1774 | ||||
1775 | nflags = 0; | |||
1776 | } | |||
1777 | GEM_DMA_WRITE(sc, &sc->sc_txdescs[last].gd_flags,(((sc)->sc_pci) ? (*(__uint64_t *)((&sc->sc_control_data ->gcd_txdescs[last].gd_flags)) = ((__uint64_t)((0x0000000040000000LL | flags)))) : (*(__uint64_t *)((&sc->sc_control_data-> gcd_txdescs[last].gd_flags)) = (__uint64_t)(__builtin_constant_p ((0x0000000040000000LL | flags)) ? (__uint64_t)((((__uint64_t )((0x0000000040000000LL | flags)) & 0xff) << 56) | ( (__uint64_t)((0x0000000040000000LL | flags)) & 0xff00ULL) << 40 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff0000ULL) << 24 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff000000ULL) << 8 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff00000000ULL) >> 8 | ((__uint64_t)( (0x0000000040000000LL | flags)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff000000000000ULL ) >> 40 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff00000000000000ULL) >> 56) : __swap64md((0x0000000040000000LL | flags))))) | |||
| ||||
1778 | GEM_TD_END_OF_PACKET | flags)(((sc)->sc_pci) ? (*(__uint64_t *)((&sc->sc_control_data ->gcd_txdescs[last].gd_flags)) = ((__uint64_t)((0x0000000040000000LL | flags)))) : (*(__uint64_t *)((&sc->sc_control_data-> gcd_txdescs[last].gd_flags)) = (__uint64_t)(__builtin_constant_p ((0x0000000040000000LL | flags)) ? (__uint64_t)((((__uint64_t )((0x0000000040000000LL | flags)) & 0xff) << 56) | ( (__uint64_t)((0x0000000040000000LL | flags)) & 0xff00ULL) << 40 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff0000ULL) << 24 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff000000ULL) << 8 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff00000000ULL) >> 8 | ((__uint64_t)( (0x0000000040000000LL | flags)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff000000000000ULL ) >> 40 | ((__uint64_t)((0x0000000040000000LL | flags)) & 0xff00000000000000ULL) >> 56) : __swap64md((0x0000000040000000LL | flags))))); | |||
1779 | ||||
1780 | used += map->dm_nsegs; | |||
1781 | sc->sc_txd[last].sd_mbuf = m; | |||
1782 | sc->sc_txd[first].sd_map = sc->sc_txd[last].sd_map; | |||
1783 | sc->sc_txd[last].sd_map = map; | |||
1784 | } | |||
1785 | ||||
1786 | bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc ->sc_cddmamap), (0), (sizeof(struct gem_desc) * (64 * 16)) , (0x08)) | |||
1787 | 0, sizeof(struct gem_desc) * GEM_NTXDESC,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc ->sc_cddmamap), (0), (sizeof(struct gem_desc) * (64 * 16)) , (0x08)) | |||
1788 | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc ->sc_cddmamap), (0), (sizeof(struct gem_desc) * (64 * 16)) , (0x08)); | |||
1789 | ||||
1790 | if (used == 0) | |||
1791 | return; | |||
1792 | ||||
1793 | /* Commit. */ | |||
1794 | sc->sc_tx_prod = prod; | |||
1795 | ||||
1796 | /* Transmit. */ | |||
1797 | bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK, prod)((sc->sc_bustag)->write_4((sc->sc_h1), (0x2000), (prod ))); | |||
1798 | ||||
1799 | /* Set timeout in case hardware has problems transmitting. */ | |||
1800 | ifp->if_timer = 5; | |||
1801 | } |