File: | dev/pci/if_vge.c |
Warning: | line 1079, column 4 Value stored to 'm' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: if_vge.c,v 1.75 2022/01/09 05:42:56 jsg Exp $ */ |
2 | /* $FreeBSD: if_vge.c,v 1.3 2004/09/11 22:13:25 wpaul Exp $ */ |
3 | /* |
4 | * Copyright (c) 2004 |
5 | * Bill Paul <wpaul@windriver.com>. All rights reserved. |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions |
9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. |
15 | * 3. All advertising materials mentioning features or use of this software |
16 | * must display the following acknowledgement: |
17 | * This product includes software developed by Bill Paul. |
18 | * 4. Neither the name of the author nor the names of any co-contributors |
19 | * may be used to endorse or promote products derived from this software |
20 | * without specific prior written permission. |
21 | * |
22 | * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND |
23 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
25 | * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD |
26 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
29 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
30 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
31 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
32 | * THE POSSIBILITY OF SUCH DAMAGE. |
33 | */ |
34 | |
35 | /* |
36 | * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. |
37 | * |
38 | * Written by Bill Paul <wpaul@windriver.com> |
39 | * Senior Networking Software Engineer |
40 | * Wind River Systems |
41 | * |
42 | * Ported to OpenBSD by Peter Valchev <pvalchev@openbsd.org> |
43 | */ |
44 | |
45 | /* |
46 | * The VIA Networking VT6122 is a 32bit, 33/66MHz PCI device that |
47 | * combines a tri-speed ethernet MAC and PHY, with the following |
48 | * features: |
49 | * |
50 | * o Jumbo frame support up to 16K |
51 | * o Transmit and receive flow control |
52 | * o IPv4 checksum offload |
53 | * o VLAN tag insertion and stripping |
54 | * o TCP large send |
55 | * o 64-bit multicast hash table filter |
56 | * o 64 entry CAM filter |
57 | * o 16K RX FIFO and 48K TX FIFO memory |
58 | * o Interrupt moderation |
59 | * |
60 | * The VT6122 supports up to four transmit DMA queues. The descriptors |
61 | * in the transmit ring can address up to 7 data fragments; frames which |
62 | * span more than 7 data buffers must be coalesced, but in general the |
63 | * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments |
64 | * long. The receive descriptors address only a single buffer. |
65 | * |
66 | * There are two peculiar design issues with the VT6122. One is that |
67 | * receive data buffers must be aligned on a 32-bit boundary. This is |
68 | * not a problem where the VT6122 is used as a LOM device in x86-based |
69 | * systems, but on architectures that generate unaligned access traps, we |
70 | * have to do some copying. |
71 | * |
72 | * The other issue has to do with the way 64-bit addresses are handled. |
73 | * The DMA descriptors only allow you to specify 48 bits of addressing |
74 | * information. The remaining 16 bits are specified using one of the |
75 | * I/O registers. If you only have a 32-bit system, then this isn't |
76 | * an issue, but if you have a 64-bit system and more than 4GB of |
77 | * memory, you must have to make sure your network data buffers reside |
78 | * in the same 48-bit 'segment.' |
79 | * |
80 | * Special thanks to Ryan Fu at VIA Networking for providing documentation |
81 | * and sample NICs for testing. |
82 | */ |
83 | |
84 | #include "bpfilter.h" |
85 | #include "vlan.h" |
86 | |
87 | #include <sys/param.h> |
88 | #include <sys/endian.h> |
89 | #include <sys/systm.h> |
90 | #include <sys/sockio.h> |
91 | #include <sys/mbuf.h> |
92 | #include <sys/malloc.h> |
93 | #include <sys/kernel.h> |
94 | #include <sys/device.h> |
95 | #include <sys/timeout.h> |
96 | #include <sys/socket.h> |
97 | |
98 | #include <net/if.h> |
99 | #include <net/if_media.h> |
100 | |
101 | #include <netinet/in.h> |
102 | #include <netinet/if_ether.h> |
103 | |
104 | #if NBPFILTER1 > 0 |
105 | #include <net/bpf.h> |
106 | #endif |
107 | |
108 | #include <dev/mii/miivar.h> |
109 | |
110 | #include <dev/pci/pcireg.h> |
111 | #include <dev/pci/pcivar.h> |
112 | #include <dev/pci/pcidevs.h> |
113 | |
114 | #include <dev/pci/if_vgereg.h> |
115 | #include <dev/pci/if_vgevar.h> |
116 | |
117 | int vge_probe (struct device *, void *, void *); |
118 | void vge_attach (struct device *, struct device *, void *); |
119 | int vge_detach (struct device *, int); |
120 | |
121 | int vge_encap (struct vge_softc *, struct mbuf *, int); |
122 | |
123 | int vge_allocmem (struct vge_softc *); |
124 | void vge_freemem (struct vge_softc *); |
125 | int vge_newbuf (struct vge_softc *, int, struct mbuf *); |
126 | int vge_rx_list_init (struct vge_softc *); |
127 | int vge_tx_list_init (struct vge_softc *); |
128 | void vge_rxeof (struct vge_softc *); |
129 | void vge_txeof (struct vge_softc *); |
130 | int vge_intr (void *); |
131 | void vge_tick (void *); |
132 | void vge_start (struct ifnet *); |
133 | int vge_ioctl (struct ifnet *, u_long, caddr_t); |
134 | int vge_init (struct ifnet *); |
135 | void vge_stop (struct vge_softc *); |
136 | void vge_watchdog (struct ifnet *); |
137 | int vge_ifmedia_upd (struct ifnet *); |
138 | void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *); |
139 | |
140 | #ifdef VGE_EEPROM |
141 | void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *); |
142 | #endif |
143 | void vge_read_eeprom (struct vge_softc *, caddr_t, int, int, int); |
144 | |
145 | void vge_miipoll_start (struct vge_softc *); |
146 | void vge_miipoll_stop (struct vge_softc *); |
147 | int vge_miibus_readreg (struct device *, int, int); |
148 | void vge_miibus_writereg (struct device *, int, int, int); |
149 | void vge_miibus_statchg (struct device *); |
150 | |
151 | void vge_cam_clear (struct vge_softc *); |
152 | int vge_cam_set (struct vge_softc *, uint8_t *); |
153 | void vge_iff (struct vge_softc *); |
154 | void vge_reset (struct vge_softc *); |
155 | |
156 | struct cfattach vge_ca = { |
157 | sizeof(struct vge_softc), vge_probe, vge_attach, vge_detach |
158 | }; |
159 | |
160 | struct cfdriver vge_cd = { |
161 | NULL((void *)0), "vge", DV_IFNET |
162 | }; |
163 | |
164 | #define VGE_PCI_LOIO0x10 0x10 |
165 | #define VGE_PCI_LOMEM0x14 0x14 |
166 | |
167 | int vge_debug = 0; |
168 | #define DPRINTF(x)if (vge_debug) printf x if (vge_debug) printf x |
169 | #define DPRINTFN(n, x)if (vge_debug >= (n)) printf x if (vge_debug >= (n)) printf x |
170 | |
171 | const struct pci_matchid vge_devices[] = { |
172 | { PCI_VENDOR_VIATECH0x1106, PCI_PRODUCT_VIATECH_VT612X0x3119 }, |
173 | }; |
174 | |
175 | #ifdef VGE_EEPROM |
176 | /* |
177 | * Read a word of data stored in the EEPROM at address 'addr.' |
178 | */ |
179 | void |
180 | vge_eeprom_getword(struct vge_softc *sc, int addr, u_int16_t *dest) |
181 | { |
182 | int i; |
183 | u_int16_t word = 0; |
184 | |
185 | /* |
186 | * Enter EEPROM embedded programming mode. In order to |
187 | * access the EEPROM at all, we first have to set the |
188 | * EELOAD bit in the CHIPCFG2 register. |
189 | */ |
190 | CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x7A), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x7A))) | (0x80)))); |
191 | CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x93), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x93))) | (0x40)))); |
192 | |
193 | /* Select the address of the word we want to read */ |
194 | CSR_WRITE_1(sc, VGE_EEADDR, addr)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x96), ( addr))); |
195 | |
196 | /* Issue read command */ |
197 | CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x97), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x97))) | (0x01)))); |
198 | |
199 | /* Wait for the done bit to be set. */ |
200 | for (i = 0; i < VGE_TIMEOUT10000; i++) { |
201 | if (CSR_READ_1(sc, VGE_EECMD)((sc->vge_btag)->read_1((sc->vge_bhandle), (0x97))) & VGE_EECMD_EDONE0x80) |
202 | break; |
203 | } |
204 | |
205 | if (i == VGE_TIMEOUT10000) { |
206 | printf("%s: EEPROM read timed out\n", sc->vge_dev.dv_xname); |
207 | *dest = 0; |
208 | return; |
209 | } |
210 | |
211 | /* Read the result */ |
212 | word = CSR_READ_2(sc, VGE_EERDDAT)((sc->vge_btag)->read_2((sc->vge_bhandle), (0x94))); |
213 | |
214 | /* Turn off EEPROM access mode. */ |
215 | CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x93), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x93))) & ~(0x40)))); |
216 | CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x7A), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x7A))) & ~(0x80)))); |
217 | |
218 | *dest = word; |
219 | } |
220 | #endif |
221 | |
222 | /* |
223 | * Read a sequence of words from the EEPROM. |
224 | */ |
225 | void |
226 | vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, |
227 | int swap) |
228 | { |
229 | int i; |
230 | #ifdef VGE_EEPROM |
231 | u_int16_t word = 0, *ptr; |
232 | |
233 | for (i = 0; i < cnt; i++) { |
234 | vge_eeprom_getword(sc, off + i, &word); |
235 | ptr = (u_int16_t *)(dest + (i * 2)); |
236 | if (swap) |
237 | *ptr = ntohs(word)(__uint16_t)(__builtin_constant_p(word) ? (__uint16_t)(((__uint16_t )(word) & 0xffU) << 8 | ((__uint16_t)(word) & 0xff00U ) >> 8) : __swap16md(word)); |
238 | else |
239 | *ptr = word; |
240 | } |
241 | #else |
242 | for (i = 0; i < ETHER_ADDR_LEN6; i++) |
243 | dest[i] = CSR_READ_1(sc, VGE_PAR0 + i)((sc->vge_btag)->read_1((sc->vge_bhandle), (0x00 + i ))); |
244 | #endif |
245 | } |
246 | |
247 | void |
248 | vge_miipoll_stop(struct vge_softc *sc) |
249 | { |
250 | int i; |
251 | |
252 | CSR_WRITE_1(sc, VGE_MIICMD, 0)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x70), ( 0))); |
253 | |
254 | for (i = 0; i < VGE_TIMEOUT10000; i++) { |
255 | DELAY(1)(*delay_func)(1); |
256 | if (CSR_READ_1(sc, VGE_MIISTS)((sc->vge_btag)->read_1((sc->vge_bhandle), (0x6D))) & VGE_MIISTS_IIDL0x80) |
257 | break; |
258 | } |
259 | |
260 | if (i == VGE_TIMEOUT10000) |
261 | printf("%s: failed to idle MII autopoll\n", sc->vge_dev.dv_xname); |
262 | } |
263 | |
264 | void |
265 | vge_miipoll_start(struct vge_softc *sc) |
266 | { |
267 | int i; |
268 | |
269 | /* First, make sure we're idle. */ |
270 | |
271 | CSR_WRITE_1(sc, VGE_MIICMD, 0)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x70), ( 0))); |
272 | CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x71), ( 0x80))); |
273 | |
274 | for (i = 0; i < VGE_TIMEOUT10000; i++) { |
275 | DELAY(1)(*delay_func)(1); |
276 | if (CSR_READ_1(sc, VGE_MIISTS)((sc->vge_btag)->read_1((sc->vge_bhandle), (0x6D))) & VGE_MIISTS_IIDL0x80) |
277 | break; |
278 | } |
279 | |
280 | if (i == VGE_TIMEOUT10000) { |
281 | printf("%s: failed to idle MII autopoll\n", sc->vge_dev.dv_xname); |
282 | return; |
283 | } |
284 | |
285 | /* Now enable auto poll mode. */ |
286 | |
287 | CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x70), ( 0x80))); |
288 | |
289 | /* And make sure it started. */ |
290 | |
291 | for (i = 0; i < VGE_TIMEOUT10000; i++) { |
292 | DELAY(1)(*delay_func)(1); |
293 | if ((CSR_READ_1(sc, VGE_MIISTS)((sc->vge_btag)->read_1((sc->vge_bhandle), (0x6D))) & VGE_MIISTS_IIDL0x80) == 0) |
294 | break; |
295 | } |
296 | |
297 | if (i == VGE_TIMEOUT10000) |
298 | printf("%s: failed to start MII autopoll\n", sc->vge_dev.dv_xname); |
299 | } |
300 | |
301 | int |
302 | vge_miibus_readreg(struct device *dev, int phy, int reg) |
303 | { |
304 | struct vge_softc *sc = (struct vge_softc *)dev; |
305 | int i, s; |
306 | u_int16_t rval = 0; |
307 | |
308 | if (phy != (CSR_READ_1(sc, VGE_MIICFG)((sc->vge_btag)->read_1((sc->vge_bhandle), (0x6C))) & 0x1F)) |
309 | return(0); |
310 | |
311 | s = splnet()splraise(0x7); |
312 | |
313 | vge_miipoll_stop(sc); |
314 | |
315 | /* Specify the register we want to read. */ |
316 | CSR_WRITE_1(sc, VGE_MIIADDR, reg)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x71), ( reg))); |
317 | |
318 | /* Issue read command. */ |
319 | CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x70), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x70))) | (0x40)))); |
320 | |
321 | /* Wait for the read command bit to self-clear. */ |
322 | for (i = 0; i < VGE_TIMEOUT10000; i++) { |
323 | DELAY(1)(*delay_func)(1); |
324 | if ((CSR_READ_1(sc, VGE_MIICMD)((sc->vge_btag)->read_1((sc->vge_bhandle), (0x70))) & VGE_MIICMD_RCMD0x40) == 0) |
325 | break; |
326 | } |
327 | |
328 | if (i == VGE_TIMEOUT10000) |
329 | printf("%s: MII read timed out\n", sc->vge_dev.dv_xname); |
330 | else |
331 | rval = CSR_READ_2(sc, VGE_MIIDATA)((sc->vge_btag)->read_2((sc->vge_bhandle), (0x72))); |
332 | |
333 | vge_miipoll_start(sc); |
334 | splx(s)spllower(s); |
335 | |
336 | return (rval); |
337 | } |
338 | |
339 | void |
340 | vge_miibus_writereg(struct device *dev, int phy, int reg, int data) |
341 | { |
342 | struct vge_softc *sc = (struct vge_softc *)dev; |
343 | int i, s; |
344 | |
345 | if (phy != (CSR_READ_1(sc, VGE_MIICFG)((sc->vge_btag)->read_1((sc->vge_bhandle), (0x6C))) & 0x1F)) |
346 | return; |
347 | |
348 | s = splnet()splraise(0x7); |
349 | vge_miipoll_stop(sc); |
350 | |
351 | /* Specify the register we want to write. */ |
352 | CSR_WRITE_1(sc, VGE_MIIADDR, reg)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x71), ( reg))); |
353 | |
354 | /* Specify the data we want to write. */ |
355 | CSR_WRITE_2(sc, VGE_MIIDATA, data)((sc->vge_btag)->write_2((sc->vge_bhandle), (0x72), ( data))); |
356 | |
357 | /* Issue write command. */ |
358 | CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x70), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x70))) | (0x20)))); |
359 | |
360 | /* Wait for the write command bit to self-clear. */ |
361 | for (i = 0; i < VGE_TIMEOUT10000; i++) { |
362 | DELAY(1)(*delay_func)(1); |
363 | if ((CSR_READ_1(sc, VGE_MIICMD)((sc->vge_btag)->read_1((sc->vge_bhandle), (0x70))) & VGE_MIICMD_WCMD0x20) == 0) |
364 | break; |
365 | } |
366 | |
367 | if (i == VGE_TIMEOUT10000) { |
368 | printf("%s: MII write timed out\n", sc->vge_dev.dv_xname); |
369 | } |
370 | |
371 | vge_miipoll_start(sc); |
372 | splx(s)spllower(s); |
373 | } |
374 | |
375 | void |
376 | vge_cam_clear(struct vge_softc *sc) |
377 | { |
378 | int i; |
379 | |
380 | /* |
381 | * Turn off all the mask bits. This tells the chip |
382 | * that none of the entries in the CAM filter are valid. |
383 | * desired entries will be enabled as we fill the filter in. |
384 | */ |
385 | |
386 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x69), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x69))) & ~(0xC0)))); |
387 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x69), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x69))) | (0x40)))); |
388 | CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x68), ( 0x80))); |
389 | for (i = 0; i < 8; i++) |
390 | CSR_WRITE_1(sc, VGE_CAM0 + i, 0)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x10 + i ), (0))); |
391 | |
392 | /* Clear the VLAN filter too. */ |
393 | |
394 | CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x68), ( 0x80|0x40|0))); |
395 | for (i = 0; i < 8; i++) |
396 | CSR_WRITE_1(sc, VGE_CAM0 + i, 0)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x10 + i ), (0))); |
397 | |
398 | CSR_WRITE_1(sc, VGE_CAMADDR, 0)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x68), ( 0))); |
399 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x69), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x69))) & ~(0xC0)))); |
400 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x69), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x69))) | (0x00)))); |
401 | |
402 | sc->vge_camidx = 0; |
403 | } |
404 | |
405 | int |
406 | vge_cam_set(struct vge_softc *sc, uint8_t *addr) |
407 | { |
408 | int i, error = 0; |
409 | |
410 | if (sc->vge_camidx == VGE_CAM_MAXADDRS64) |
411 | return(ENOSPC28); |
412 | |
413 | /* Select the CAM data page. */ |
414 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x69), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x69))) & ~(0xC0)))); |
415 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x69), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x69))) | (0x80)))); |
416 | |
417 | /* Set the filter entry we want to update and enable writing. */ |
418 | CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x68), ( 0x80|sc->vge_camidx))); |
419 | |
420 | /* Write the address to the CAM registers */ |
421 | for (i = 0; i < ETHER_ADDR_LEN6; i++) |
422 | CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i])((sc->vge_btag)->write_1((sc->vge_bhandle), (0x10 + i ), (addr[i]))); |
423 | |
424 | /* Issue a write command. */ |
425 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x69), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x69))) | (0x04)))); |
426 | |
427 | /* Wake for it to clear. */ |
428 | for (i = 0; i < VGE_TIMEOUT10000; i++) { |
429 | DELAY(1)(*delay_func)(1); |
430 | if ((CSR_READ_1(sc, VGE_CAMCTL)((sc->vge_btag)->read_1((sc->vge_bhandle), (0x69))) & VGE_CAMCTL_WRITE0x04) == 0) |
431 | break; |
432 | } |
433 | |
434 | if (i == VGE_TIMEOUT10000) { |
435 | printf("%s: setting CAM filter failed\n", sc->vge_dev.dv_xname); |
436 | error = EIO5; |
437 | goto fail; |
438 | } |
439 | |
440 | /* Select the CAM mask page. */ |
441 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x69), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x69))) & ~(0xC0)))); |
442 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x69), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x69))) | (0x40)))); |
443 | |
444 | /* Set the mask bit that enables this filter. */ |
445 | CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),((sc->vge_btag)->write_1((sc->vge_bhandle), (0x10 + ( sc->vge_camidx/8)), (((sc->vge_btag)->read_1((sc-> vge_bhandle), (0x10 + (sc->vge_camidx/8)))) | (1<<(sc ->vge_camidx & 7))))) |
446 | 1<<(sc->vge_camidx & 7))((sc->vge_btag)->write_1((sc->vge_bhandle), (0x10 + ( sc->vge_camidx/8)), (((sc->vge_btag)->read_1((sc-> vge_bhandle), (0x10 + (sc->vge_camidx/8)))) | (1<<(sc ->vge_camidx & 7))))); |
447 | |
448 | sc->vge_camidx++; |
449 | |
450 | fail: |
451 | /* Turn off access to CAM. */ |
452 | CSR_WRITE_1(sc, VGE_CAMADDR, 0)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x68), ( 0))); |
453 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x69), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x69))) & ~(0xC0)))); |
454 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x69), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x69))) | (0x00)))); |
455 | |
456 | return (error); |
457 | } |
458 | |
459 | /* |
460 | * We use the 64-entry CAM filter for perfect filtering. |
461 | * If there's more than 64 multicast addresses, we use the |
462 | * hash filter instead. |
463 | */ |
464 | void |
465 | vge_iff(struct vge_softc *sc) |
466 | { |
467 | struct arpcom *ac = &sc->arpcom; |
468 | struct ifnet *ifp = &ac->ac_if; |
469 | struct ether_multi *enm; |
470 | struct ether_multistep step; |
471 | u_int32_t h = 0, hashes[2]; |
472 | u_int8_t rxctl; |
473 | int error; |
474 | |
475 | vge_cam_clear(sc); |
476 | rxctl = CSR_READ_1(sc, VGE_RXCTL)((sc->vge_btag)->read_1((sc->vge_bhandle), (0x06))); |
477 | rxctl &= ~(VGE_RXCTL_RX_BCAST0x08 | VGE_RXCTL_RX_MCAST0x04 | |
478 | VGE_RXCTL_RX_PROMISC0x10 | VGE_RXCTL_RX_UCAST0x40); |
479 | bzero(hashes, sizeof(hashes))__builtin_bzero((hashes), (sizeof(hashes))); |
480 | ifp->if_flags &= ~IFF_ALLMULTI0x200; |
481 | |
482 | /* |
483 | * Always accept broadcast frames. |
484 | * Always accept frames destined to our station address. |
485 | */ |
486 | rxctl |= VGE_RXCTL_RX_BCAST0x08 | VGE_RXCTL_RX_UCAST0x40; |
487 | |
488 | if ((ifp->if_flags & IFF_PROMISC0x100) == 0) |
489 | rxctl |= VGE_RXCTL_RX_MCAST0x04; |
490 | |
491 | if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0) { |
492 | ifp->if_flags |= IFF_ALLMULTI0x200; |
493 | if (ifp->if_flags & IFF_PROMISC0x100) |
494 | rxctl |= VGE_RXCTL_RX_PROMISC0x10; |
495 | hashes[0] = hashes[1] = 0xFFFFFFFF; |
496 | } else if (ac->ac_multicnt > VGE_CAM_MAXADDRS64) { |
497 | ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first ); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step )).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while ( 0); |
498 | while (enm != NULL((void *)0)) { |
499 | h = ether_crc32_be(enm->enm_addrlo, |
500 | ETHER_ADDR_LEN6) >> 26; |
501 | |
502 | hashes[h >> 5] |= 1 << (h & 0x1f); |
503 | |
504 | ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm = (((enm))->enm_list.le_next); } while ( 0); |
505 | } |
506 | } else { |
507 | ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first ); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step )).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while ( 0); |
508 | while (enm != NULL((void *)0)) { |
509 | error = vge_cam_set(sc, enm->enm_addrlo); |
510 | if (error) |
511 | break; |
512 | |
513 | ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm = (((enm))->enm_list.le_next); } while ( 0); |
514 | } |
515 | } |
516 | |
517 | CSR_WRITE_4(sc, VGE_MAR0, hashes[0])((sc->vge_btag)->write_4((sc->vge_bhandle), (0x10), ( hashes[0]))); |
518 | CSR_WRITE_4(sc, VGE_MAR1, hashes[1])((sc->vge_btag)->write_4((sc->vge_bhandle), (0x14), ( hashes[1]))); |
519 | CSR_WRITE_1(sc, VGE_RXCTL, rxctl)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x06), ( rxctl))); |
520 | } |
521 | |
522 | void |
523 | vge_reset(struct vge_softc *sc) |
524 | { |
525 | int i; |
526 | |
527 | CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x09), ( 0x80))); |
528 | |
529 | for (i = 0; i < VGE_TIMEOUT10000; i++) { |
530 | DELAY(5)(*delay_func)(5); |
531 | if ((CSR_READ_1(sc, VGE_CRS1)((sc->vge_btag)->read_1((sc->vge_bhandle), (0x09))) & VGE_CR1_SOFTRESET0x80) == 0) |
532 | break; |
533 | } |
534 | |
535 | if (i == VGE_TIMEOUT10000) { |
536 | printf("%s: soft reset timed out", sc->vge_dev.dv_xname); |
537 | CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x0B), ( 0x40))); |
538 | DELAY(2000)(*delay_func)(2000); |
539 | } |
540 | |
541 | DELAY(5000)(*delay_func)(5000); |
542 | |
543 | CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x93), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x93))) | (0x20)))); |
544 | |
545 | for (i = 0; i < VGE_TIMEOUT10000; i++) { |
546 | DELAY(5)(*delay_func)(5); |
547 | if ((CSR_READ_1(sc, VGE_EECSR)((sc->vge_btag)->read_1((sc->vge_bhandle), (0x93))) & VGE_EECSR_RELOAD0x20) == 0) |
548 | break; |
549 | } |
550 | |
551 | CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x78), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x78))) & ~(0x01)))); |
552 | } |
553 | |
554 | /* |
555 | * Probe for a VIA gigabit chip. Check the PCI vendor and device |
556 | * IDs against our list and return a device name if we find a match. |
557 | */ |
558 | int |
559 | vge_probe(struct device *dev, void *match, void *aux) |
560 | { |
561 | return (pci_matchbyid((struct pci_attach_args *)aux, vge_devices, |
562 | nitems(vge_devices)(sizeof((vge_devices)) / sizeof((vge_devices)[0])))); |
563 | } |
564 | |
565 | /* |
566 | * Allocate memory for RX/TX rings |
567 | */ |
568 | int |
569 | vge_allocmem(struct vge_softc *sc) |
570 | { |
571 | int nseg, rseg; |
572 | int i, error; |
573 | |
574 | nseg = 32; |
575 | |
576 | /* Allocate DMA'able memory for the TX ring */ |
577 | |
578 | error = bus_dmamap_create(sc->sc_dmat, VGE_TX_LIST_SZ, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((256 * sizeof(struct vge_tx_desc))), (1), ((256 * sizeof(struct vge_tx_desc ))), (0), (0x0002), (&sc->vge_ldata.vge_tx_list_map)) |
579 | VGE_TX_LIST_SZ, 0, BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((256 * sizeof(struct vge_tx_desc))), (1), ((256 * sizeof(struct vge_tx_desc ))), (0), (0x0002), (&sc->vge_ldata.vge_tx_list_map)) |
580 | &sc->vge_ldata.vge_tx_list_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((256 * sizeof(struct vge_tx_desc))), (1), ((256 * sizeof(struct vge_tx_desc ))), (0), (0x0002), (&sc->vge_ldata.vge_tx_list_map)); |
581 | if (error) |
582 | return (ENOMEM12); |
583 | error = bus_dmamem_alloc(sc->sc_dmat, VGE_TX_LIST_SZ,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((256 * sizeof(struct vge_tx_desc))), (2), (0), (&sc->vge_ldata .vge_tx_listseg), (1), (&rseg), (0x0001)) |
584 | ETHER_ALIGN, 0,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((256 * sizeof(struct vge_tx_desc))), (2), (0), (&sc->vge_ldata .vge_tx_listseg), (1), (&rseg), (0x0001)) |
585 | &sc->vge_ldata.vge_tx_listseg, 1, &rseg, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((256 * sizeof(struct vge_tx_desc))), (2), (0), (&sc->vge_ldata .vge_tx_listseg), (1), (&rseg), (0x0001)); |
586 | if (error) { |
587 | printf("%s: can't alloc TX list\n", sc->vge_dev.dv_xname); |
588 | return (ENOMEM12); |
589 | } |
590 | |
591 | /* Load the map for the TX ring. */ |
592 | error = bus_dmamem_map(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc ->vge_ldata.vge_tx_listseg), (1), ((256 * sizeof(struct vge_tx_desc ))), ((caddr_t *)&sc->vge_ldata.vge_tx_list), (0x0001) ) |
593 | 1, VGE_TX_LIST_SZ,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc ->vge_ldata.vge_tx_listseg), (1), ((256 * sizeof(struct vge_tx_desc ))), ((caddr_t *)&sc->vge_ldata.vge_tx_list), (0x0001) ) |
594 | (caddr_t *)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc ->vge_ldata.vge_tx_listseg), (1), ((256 * sizeof(struct vge_tx_desc ))), ((caddr_t *)&sc->vge_ldata.vge_tx_list), (0x0001) ); |
595 | memset(sc->vge_ldata.vge_tx_list, 0, VGE_TX_LIST_SZ)__builtin_memset((sc->vge_ldata.vge_tx_list), (0), ((256 * sizeof(struct vge_tx_desc)))); |
596 | if (error) { |
597 | printf("%s: can't map TX dma buffers\n", |
598 | sc->vge_dev.dv_xname); |
599 | bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg, rseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& sc->vge_ldata.vge_tx_listseg), (rseg)); |
600 | return (ENOMEM12); |
601 | } |
602 | |
603 | error = bus_dmamap_load(sc->sc_dmat, sc->vge_ldata.vge_tx_list_map,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_list_map), (sc->vge_ldata.vge_tx_list), ( (256 * sizeof(struct vge_tx_desc))), (((void *)0)), (0x0001)) |
604 | sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_list_map), (sc->vge_ldata.vge_tx_list), ( (256 * sizeof(struct vge_tx_desc))), (((void *)0)), (0x0001)); |
605 | if (error) { |
606 | printf("%s: can't load TX dma map\n", sc->vge_dev.dv_xname); |
607 | bus_dmamap_destroy(sc->sc_dmat, sc->vge_ldata.vge_tx_list_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->vge_ldata.vge_tx_list_map)); |
608 | bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->vge_ldata.vge_tx_list,(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t )sc->vge_ldata.vge_tx_list), ((256 * sizeof(struct vge_tx_desc )))) |
609 | VGE_TX_LIST_SZ)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t )sc->vge_ldata.vge_tx_list), ((256 * sizeof(struct vge_tx_desc )))); |
610 | bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg, rseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& sc->vge_ldata.vge_tx_listseg), (rseg)); |
611 | return (ENOMEM12); |
612 | } |
613 | |
614 | /* Create DMA maps for TX buffers */ |
615 | |
616 | for (i = 0; i < VGE_TX_DESC_CNT256; i++) { |
617 | error = bus_dmamap_create(sc->sc_dmat, MCLBYTES * nseg,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 << 11) * nseg), (7), ((1 << 11)), (0), (0x0002), (&sc ->vge_ldata.vge_tx_dmamap[i])) |
618 | VGE_TX_FRAGS, MCLBYTES, 0, BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 << 11) * nseg), (7), ((1 << 11)), (0), (0x0002), (&sc ->vge_ldata.vge_tx_dmamap[i])) |
619 | &sc->vge_ldata.vge_tx_dmamap[i])(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 << 11) * nseg), (7), ((1 << 11)), (0), (0x0002), (&sc ->vge_ldata.vge_tx_dmamap[i])); |
620 | if (error) { |
621 | printf("%s: can't create DMA map for TX\n", |
622 | sc->vge_dev.dv_xname); |
623 | return (ENOMEM12); |
624 | } |
625 | } |
626 | |
627 | /* Allocate DMA'able memory for the RX ring */ |
628 | |
629 | error = bus_dmamap_create(sc->sc_dmat, VGE_RX_LIST_SZ, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((256 * sizeof(struct vge_rx_desc))), (1), ((256 * sizeof(struct vge_rx_desc ))), (0), (0x0002), (&sc->vge_ldata.vge_rx_list_map)) |
630 | VGE_RX_LIST_SZ, 0, BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((256 * sizeof(struct vge_rx_desc))), (1), ((256 * sizeof(struct vge_rx_desc ))), (0), (0x0002), (&sc->vge_ldata.vge_rx_list_map)) |
631 | &sc->vge_ldata.vge_rx_list_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((256 * sizeof(struct vge_rx_desc))), (1), ((256 * sizeof(struct vge_rx_desc ))), (0), (0x0002), (&sc->vge_ldata.vge_rx_list_map)); |
632 | if (error) |
633 | return (ENOMEM12); |
634 | error = bus_dmamem_alloc(sc->sc_dmat, VGE_RX_LIST_SZ, VGE_RING_ALIGN,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((256 * sizeof(struct vge_rx_desc))), (256), (0), (&sc->vge_ldata .vge_rx_listseg), (1), (&rseg), (0x0001)) |
635 | 0, &sc->vge_ldata.vge_rx_listseg, 1, &rseg, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((256 * sizeof(struct vge_rx_desc))), (256), (0), (&sc->vge_ldata .vge_rx_listseg), (1), (&rseg), (0x0001)); |
636 | if (error) { |
637 | printf("%s: can't alloc RX list\n", sc->vge_dev.dv_xname); |
638 | return (ENOMEM12); |
639 | } |
640 | |
641 | /* Load the map for the RX ring. */ |
642 | |
643 | error = bus_dmamem_map(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc ->vge_ldata.vge_rx_listseg), (1), ((256 * sizeof(struct vge_rx_desc ))), ((caddr_t *)&sc->vge_ldata.vge_rx_list), (0x0001) ) |
644 | 1, VGE_RX_LIST_SZ,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc ->vge_ldata.vge_rx_listseg), (1), ((256 * sizeof(struct vge_rx_desc ))), ((caddr_t *)&sc->vge_ldata.vge_rx_list), (0x0001) ) |
645 | (caddr_t *)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc ->vge_ldata.vge_rx_listseg), (1), ((256 * sizeof(struct vge_rx_desc ))), ((caddr_t *)&sc->vge_ldata.vge_rx_list), (0x0001) ); |
646 | memset(sc->vge_ldata.vge_rx_list, 0, VGE_RX_LIST_SZ)__builtin_memset((sc->vge_ldata.vge_rx_list), (0), ((256 * sizeof(struct vge_rx_desc)))); |
647 | if (error) { |
648 | printf("%s: can't map RX dma buffers\n", |
649 | sc->vge_dev.dv_xname); |
650 | bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg, rseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& sc->vge_ldata.vge_rx_listseg), (rseg)); |
651 | return (ENOMEM12); |
652 | } |
653 | error = bus_dmamap_load(sc->sc_dmat, sc->vge_ldata.vge_rx_list_map,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_list_map), (sc->vge_ldata.vge_rx_list), ( (256 * sizeof(struct vge_rx_desc))), (((void *)0)), (0x0001)) |
654 | sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_list_map), (sc->vge_ldata.vge_rx_list), ( (256 * sizeof(struct vge_rx_desc))), (((void *)0)), (0x0001)); |
655 | if (error) { |
656 | printf("%s: can't load RX dma map\n", sc->vge_dev.dv_xname); |
657 | bus_dmamap_destroy(sc->sc_dmat, sc->vge_ldata.vge_rx_list_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->vge_ldata.vge_rx_list_map)); |
658 | bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->vge_ldata.vge_rx_list,(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t )sc->vge_ldata.vge_rx_list), ((256 * sizeof(struct vge_rx_desc )))) |
659 | VGE_RX_LIST_SZ)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t )sc->vge_ldata.vge_rx_list), ((256 * sizeof(struct vge_rx_desc )))); |
660 | bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg, rseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& sc->vge_ldata.vge_rx_listseg), (rseg)); |
661 | return (ENOMEM12); |
662 | } |
663 | |
664 | /* Create DMA maps for RX buffers */ |
665 | |
666 | for (i = 0; i < VGE_RX_DESC_CNT256; i++) { |
667 | error = bus_dmamap_create(sc->sc_dmat, MCLBYTES * nseg, nseg,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 << 11) * nseg), (nseg), ((1 << 11)), (0), (0x0002), (& sc->vge_ldata.vge_rx_dmamap[i])) |
668 | MCLBYTES, 0, BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 << 11) * nseg), (nseg), ((1 << 11)), (0), (0x0002), (& sc->vge_ldata.vge_rx_dmamap[i])) |
669 | &sc->vge_ldata.vge_rx_dmamap[i])(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 << 11) * nseg), (nseg), ((1 << 11)), (0), (0x0002), (& sc->vge_ldata.vge_rx_dmamap[i])); |
670 | if (error) { |
671 | printf("%s: can't create DMA map for RX\n", |
672 | sc->vge_dev.dv_xname); |
673 | return (ENOMEM12); |
674 | } |
675 | } |
676 | |
677 | return (0); |
678 | } |
679 | |
680 | void |
681 | vge_freemem(struct vge_softc *sc) |
682 | { |
683 | int i; |
684 | |
685 | for (i = 0; i < VGE_RX_DESC_CNT256; i++) |
686 | bus_dmamap_destroy(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->vge_ldata.vge_rx_dmamap[i])) |
687 | sc->vge_ldata.vge_rx_dmamap[i])(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->vge_ldata.vge_rx_dmamap[i])); |
688 | |
689 | bus_dmamap_unload(sc->sc_dmat, sc->vge_ldata.vge_rx_list_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_list_map)); |
690 | bus_dmamap_destroy(sc->sc_dmat, sc->vge_ldata.vge_rx_list_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->vge_ldata.vge_rx_list_map)); |
691 | bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->vge_ldata.vge_rx_list,(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t )sc->vge_ldata.vge_rx_list), ((256 * sizeof(struct vge_rx_desc )))) |
692 | VGE_RX_LIST_SZ)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t )sc->vge_ldata.vge_rx_list), ((256 * sizeof(struct vge_rx_desc )))); |
693 | bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& sc->vge_ldata.vge_rx_listseg), (1)); |
694 | |
695 | for (i = 0; i < VGE_TX_DESC_CNT256; i++) |
696 | bus_dmamap_destroy(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->vge_ldata.vge_tx_dmamap[i])) |
697 | sc->vge_ldata.vge_tx_dmamap[i])(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->vge_ldata.vge_tx_dmamap[i])); |
698 | |
699 | bus_dmamap_unload(sc->sc_dmat, sc->vge_ldata.vge_tx_list_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_list_map)); |
700 | bus_dmamap_destroy(sc->sc_dmat, sc->vge_ldata.vge_tx_list_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->vge_ldata.vge_tx_list_map)); |
701 | bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->vge_ldata.vge_tx_list,(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t )sc->vge_ldata.vge_tx_list), ((256 * sizeof(struct vge_tx_desc )))) |
702 | VGE_TX_LIST_SZ)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t )sc->vge_ldata.vge_tx_list), ((256 * sizeof(struct vge_tx_desc )))); |
703 | bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& sc->vge_ldata.vge_tx_listseg), (1)); |
704 | } |
705 | |
706 | /* |
707 | * Attach the interface. Allocate softc structures, do ifmedia |
708 | * setup and ethernet/BPF attach. |
709 | */ |
710 | void |
711 | vge_attach(struct device *parent, struct device *self, void *aux) |
712 | { |
713 | u_char eaddr[ETHER_ADDR_LEN6]; |
714 | struct vge_softc *sc = (struct vge_softc *)self; |
715 | struct pci_attach_args *pa = aux; |
716 | pci_chipset_tag_t pc = pa->pa_pc; |
717 | pci_intr_handle_t ih; |
718 | const char *intrstr = NULL((void *)0); |
719 | struct ifnet *ifp; |
720 | int error = 0; |
721 | |
722 | /* |
723 | * Map control/status registers. |
724 | */ |
725 | if (pci_mapreg_map(pa, VGE_PCI_LOMEM0x14, PCI_MAPREG_TYPE_MEM0x00000000, 0, |
726 | &sc->vge_btag, &sc->vge_bhandle, NULL((void *)0), &sc->vge_bsize, 0)) { |
727 | if (pci_mapreg_map(pa, VGE_PCI_LOIO0x10, PCI_MAPREG_TYPE_IO0x00000001, 0, |
728 | &sc->vge_btag, &sc->vge_bhandle, NULL((void *)0), &sc->vge_bsize, 0)) { |
729 | printf(": can't map mem or i/o space\n"); |
730 | return; |
731 | } |
732 | } |
733 | |
734 | /* Allocate interrupt */ |
735 | if (pci_intr_map(pa, &ih)) { |
736 | printf(": couldn't map interrupt\n"); |
737 | return; |
738 | } |
739 | intrstr = pci_intr_string(pc, ih); |
740 | sc->vge_intrhand = pci_intr_establish(pc, ih, IPL_NET0x7, vge_intr, sc, |
741 | sc->vge_dev.dv_xname); |
742 | if (sc->vge_intrhand == NULL((void *)0)) { |
743 | printf(": couldn't establish interrupt"); |
744 | if (intrstr != NULL((void *)0)) |
745 | printf(" at %s", intrstr); |
746 | return; |
747 | } |
748 | printf(": %s", intrstr); |
749 | |
750 | sc->sc_dmat = pa->pa_dmat; |
751 | sc->sc_pc = pa->pa_pc; |
752 | |
753 | /* Reset the adapter. */ |
754 | vge_reset(sc); |
755 | |
756 | /* |
757 | * Get station address from the EEPROM. |
758 | */ |
759 | vge_read_eeprom(sc, eaddr, VGE_EE_EADDR0, 3, 1); |
760 | |
761 | bcopy(eaddr, &sc->arpcom.ac_enaddr, ETHER_ADDR_LEN6); |
762 | |
763 | printf(", address %s\n", |
764 | ether_sprintf(sc->arpcom.ac_enaddr)); |
765 | |
766 | error = vge_allocmem(sc); |
767 | |
768 | if (error) |
769 | return; |
770 | |
771 | ifp = &sc->arpcom.ac_if; |
772 | ifp->if_softc = sc; |
773 | ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000; |
774 | ifp->if_ioctl = vge_ioctl; |
775 | ifp->if_start = vge_start; |
776 | ifp->if_watchdog = vge_watchdog; |
777 | #ifdef VGE_JUMBO |
778 | ifp->if_hardmtu = VGE_JUMBO_MTU9000; |
779 | #endif |
780 | ifq_set_maxlen(&ifp->if_snd, VGE_IFQ_MAXLEN)((&ifp->if_snd)->ifq_maxlen = (64)); |
781 | |
782 | ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010 | IFCAP_CSUM_IPv40x00000001 | |
783 | IFCAP_CSUM_TCPv40x00000002 | IFCAP_CSUM_UDPv40x00000004; |
784 | |
785 | #if NVLAN1 > 0 |
786 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020; |
787 | #endif |
788 | |
789 | /* Set interface name */ |
790 | strlcpy(ifp->if_xname, sc->vge_dev.dv_xname, IFNAMSIZ16); |
791 | |
792 | /* Do MII setup */ |
793 | sc->sc_mii.mii_ifp = ifp; |
794 | sc->sc_mii.mii_readreg = vge_miibus_readreg; |
795 | sc->sc_mii.mii_writereg = vge_miibus_writereg; |
796 | sc->sc_mii.mii_statchg = vge_miibus_statchg; |
797 | ifmedia_init(&sc->sc_mii.mii_media, 0, |
798 | vge_ifmedia_upd, vge_ifmedia_sts); |
799 | mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY-1, |
800 | MII_OFFSET_ANY-1, MIIF_DOPAUSE0x0100); |
801 | if (LIST_FIRST(&sc->sc_mii.mii_phys)((&sc->sc_mii.mii_phys)->lh_first) == NULL((void *)0)) { |
802 | printf("%s: no PHY found!\n", sc->vge_dev.dv_xname); |
803 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_MANUAL1ULL, |
804 | 0, NULL((void *)0)); |
805 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_MANUAL1ULL); |
806 | } else |
807 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL); |
808 | |
809 | timeout_set(&sc->timer_handle, vge_tick, sc); |
810 | |
811 | /* |
812 | * Call MI attach routine. |
813 | */ |
814 | if_attach(ifp); |
815 | ether_ifattach(ifp); |
816 | } |
817 | |
818 | int |
819 | vge_detach(struct device *self, int flags) |
820 | { |
821 | struct vge_softc *sc = (void *)self; |
822 | struct ifnet *ifp = &sc->arpcom.ac_if; |
823 | |
824 | pci_intr_disestablish(sc->sc_pc, sc->vge_intrhand); |
825 | |
826 | vge_stop(sc); |
827 | |
828 | /* Detach all PHYs */ |
829 | mii_detach(&sc->sc_mii, MII_PHY_ANY-1, MII_OFFSET_ANY-1); |
830 | |
831 | /* Delete any remaining media. */ |
832 | ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY((uint64_t) -1)); |
833 | |
834 | ether_ifdetach(ifp); |
835 | if_detach(ifp); |
836 | |
837 | vge_freemem(sc); |
838 | |
839 | bus_space_unmap(sc->vge_btag, sc->vge_bhandle, sc->vge_bsize); |
840 | return (0); |
841 | } |
842 | |
843 | int |
844 | vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m) |
845 | { |
846 | struct mbuf *m_new = NULL((void *)0); |
847 | struct vge_rx_desc *r; |
848 | bus_dmamap_t rxmap = sc->vge_ldata.vge_rx_dmamap[idx]; |
849 | int i; |
850 | |
851 | if (m == NULL((void *)0)) { |
852 | /* Allocate a new mbuf */ |
853 | MGETHDR(m_new, M_DONTWAIT, MT_DATA)m_new = m_gethdr((0x0002), (1)); |
854 | if (m_new == NULL((void *)0)) |
855 | return (ENOBUFS55); |
856 | |
857 | /* Allocate a cluster */ |
858 | MCLGET(m_new, M_DONTWAIT)(void) m_clget((m_new), (0x0002), (1 << 11)); |
859 | if (!(m_new->m_flagsm_hdr.mh_flags & M_EXT0x0001)) { |
860 | m_freem(m_new); |
861 | return (ENOBUFS55); |
862 | } |
863 | |
864 | m = m_new; |
865 | } else |
866 | m->m_datam_hdr.mh_data = m->m_extM_dat.MH.MH_dat.MH_ext.ext_buf; |
867 | |
868 | m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = MCLBYTES(1 << 11); |
869 | /* Fix-up alignment so payload is doubleword-aligned */ |
870 | /* XXX m_adj(m, ETHER_ALIGN); */ |
871 | |
872 | if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( rxmap), (m), (0x0001))) |
873 | return (ENOBUFS55); |
874 | |
875 | if (rxmap->dm_nsegs > 1) |
876 | goto out; |
877 | |
878 | /* Map the segments into RX descriptors */ |
879 | r = &sc->vge_ldata.vge_rx_list[idx]; |
880 | |
881 | if (letoh32(r->vge_sts)((__uint32_t)(r->vge_sts)) & VGE_RDSTS_OWN0x80000000) { |
882 | printf("%s: tried to map a busy RX descriptor\n", |
883 | sc->vge_dev.dv_xname); |
884 | goto out; |
885 | } |
886 | r->vge_buflen = htole16(VGE_BUFLEN(rxmap->dm_segs[0].ds_len) | VGE_RXDESC_I)((__uint16_t)(((rxmap->dm_segs[0].ds_len) & 0x7FFF) | 0x8000 )); |
887 | r->vge_addrlo = htole32(VGE_ADDR_LO(rxmap->dm_segs[0].ds_addr))((__uint32_t)(((u_int64_t) (rxmap->dm_segs[0].ds_addr) & 0xFFFFFFFF))); |
888 | r->vge_addrhi = htole16(VGE_ADDR_HI(rxmap->dm_segs[0].ds_addr) & 0xFFFF)((__uint16_t)(((u_int64_t) (rxmap->dm_segs[0].ds_addr) >> 32) & 0xFFFF)); |
889 | r->vge_sts = htole32(0)((__uint32_t)(0)); |
890 | r->vge_ctl = htole32(0)((__uint32_t)(0)); |
891 | |
892 | /* |
893 | * Note: the manual fails to document the fact that for |
894 | * proper operation, the driver needs to replenish the RX |
895 | * DMA ring 4 descriptors at a time (rather than one at a |
896 | * time, like most chips). We can allocate the new buffers |
897 | * but we should not set the OWN bits until we're ready |
898 | * to hand back 4 of them in one shot. |
899 | */ |
900 | #define VGE_RXCHUNK4 4 |
901 | sc->vge_rx_consumed++; |
902 | if (sc->vge_rx_consumed == VGE_RXCHUNK4) { |
903 | for (i = idx; i != idx - sc->vge_rx_consumed; i--) |
904 | sc->vge_ldata.vge_rx_list[i].vge_sts |= |
905 | htole32(VGE_RDSTS_OWN)((__uint32_t)(0x80000000)); |
906 | sc->vge_rx_consumed = 0; |
907 | } |
908 | |
909 | sc->vge_ldata.vge_rx_mbuf[idx] = m; |
910 | |
911 | bus_dmamap_sync(sc->sc_dmat, rxmap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxmap ), (0), (rxmap->dm_mapsize), (0x01)) |
912 | rxmap->dm_mapsize, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxmap ), (0), (rxmap->dm_mapsize), (0x01)); |
913 | |
914 | return (0); |
915 | out: |
916 | DPRINTF(("vge_newbuf: out of memory\n"))if (vge_debug) printf ("vge_newbuf: out of memory\n"); |
917 | if (m_new != NULL((void *)0)) |
918 | m_freem(m_new); |
919 | return (ENOMEM12); |
920 | } |
921 | |
922 | int |
923 | vge_tx_list_init(struct vge_softc *sc) |
924 | { |
925 | bzero(sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ)__builtin_bzero((sc->vge_ldata.vge_tx_list), ((256 * sizeof (struct vge_tx_desc)))); |
926 | bzero(&sc->vge_ldata.vge_tx_mbuf,__builtin_bzero((&sc->vge_ldata.vge_tx_mbuf), ((256 * sizeof (struct mbuf *)))) |
927 | (VGE_TX_DESC_CNT * sizeof(struct mbuf *)))__builtin_bzero((&sc->vge_ldata.vge_tx_mbuf), ((256 * sizeof (struct mbuf *)))); |
928 | |
929 | bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_list_map), (0), (sc->vge_ldata.vge_tx_list_map ->dm_mapsize), (0x04)) |
930 | sc->vge_ldata.vge_tx_list_map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_list_map), (0), (sc->vge_ldata.vge_tx_list_map ->dm_mapsize), (0x04)) |
931 | sc->vge_ldata.vge_tx_list_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_list_map), (0), (sc->vge_ldata.vge_tx_list_map ->dm_mapsize), (0x04)) |
932 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_list_map), (0), (sc->vge_ldata.vge_tx_list_map ->dm_mapsize), (0x04)); |
933 | sc->vge_ldata.vge_tx_prodidx = 0; |
934 | sc->vge_ldata.vge_tx_considx = 0; |
935 | sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT256; |
936 | |
937 | return (0); |
938 | } |
939 | |
940 | /* Init RX descriptors and allocate mbufs with vge_newbuf() |
941 | * A ring is used, and last descriptor points to first. */ |
942 | int |
943 | vge_rx_list_init(struct vge_softc *sc) |
944 | { |
945 | int i; |
946 | |
947 | bzero(sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ)__builtin_bzero((sc->vge_ldata.vge_rx_list), ((256 * sizeof (struct vge_rx_desc)))); |
948 | bzero(&sc->vge_ldata.vge_rx_mbuf,__builtin_bzero((&sc->vge_ldata.vge_rx_mbuf), ((256 * sizeof (struct mbuf *)))) |
949 | (VGE_RX_DESC_CNT * sizeof(struct mbuf *)))__builtin_bzero((&sc->vge_ldata.vge_rx_mbuf), ((256 * sizeof (struct mbuf *)))); |
950 | |
951 | sc->vge_rx_consumed = 0; |
952 | |
953 | for (i = 0; i < VGE_RX_DESC_CNT256; i++) { |
954 | if (vge_newbuf(sc, i, NULL((void *)0)) == ENOBUFS55) |
955 | return (ENOBUFS55); |
956 | } |
957 | |
958 | /* Flush the RX descriptors */ |
959 | |
960 | bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_list_map), (0), (sc->vge_ldata.vge_rx_list_map ->dm_mapsize), (0x04|0x01)) |
961 | sc->vge_ldata.vge_rx_list_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_list_map), (0), (sc->vge_ldata.vge_rx_list_map ->dm_mapsize), (0x04|0x01)) |
962 | 0, sc->vge_ldata.vge_rx_list_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_list_map), (0), (sc->vge_ldata.vge_rx_list_map ->dm_mapsize), (0x04|0x01)) |
963 | BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_list_map), (0), (sc->vge_ldata.vge_rx_list_map ->dm_mapsize), (0x04|0x01)); |
964 | |
965 | sc->vge_ldata.vge_rx_prodidx = 0; |
966 | sc->vge_rx_consumed = 0; |
967 | sc->vge_head = sc->vge_tail = NULL((void *)0); |
968 | |
969 | return (0); |
970 | } |
971 | |
972 | /* |
973 | * RX handler. We support the reception of jumbo frames that have |
974 | * been fragmented across multiple 2K mbuf cluster buffers. |
975 | */ |
976 | void |
977 | vge_rxeof(struct vge_softc *sc) |
978 | { |
979 | struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 }; |
980 | struct mbuf *m; |
981 | struct ifnet *ifp; |
982 | int i, total_len; |
983 | int lim = 0; |
984 | struct vge_rx_desc *cur_rx; |
985 | u_int32_t rxstat, rxctl; |
986 | |
987 | ifp = &sc->arpcom.ac_if; |
988 | i = sc->vge_ldata.vge_rx_prodidx; |
989 | |
990 | /* Invalidate the descriptor memory */ |
991 | |
992 | bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_list_map), (0), (sc->vge_ldata.vge_rx_list_map ->dm_mapsize), (0x02)) |
993 | sc->vge_ldata.vge_rx_list_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_list_map), (0), (sc->vge_ldata.vge_rx_list_map ->dm_mapsize), (0x02)) |
994 | 0, sc->vge_ldata.vge_rx_list_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_list_map), (0), (sc->vge_ldata.vge_rx_list_map ->dm_mapsize), (0x02)) |
995 | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_list_map), (0), (sc->vge_ldata.vge_rx_list_map ->dm_mapsize), (0x02)); |
996 | |
997 | while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])(((__uint32_t)((&sc->vge_ldata.vge_rx_list[i])->vge_sts )) & 0x80000000)) { |
998 | struct mbuf *m0 = NULL((void *)0); |
999 | |
1000 | cur_rx = &sc->vge_ldata.vge_rx_list[i]; |
1001 | m = sc->vge_ldata.vge_rx_mbuf[i]; |
1002 | total_len = VGE_RXBYTES(cur_rx)((((__uint32_t)((cur_rx)->vge_sts)) & 0x3FFF0000) >> 16); |
1003 | rxstat = letoh32(cur_rx->vge_sts)((__uint32_t)(cur_rx->vge_sts)); |
1004 | rxctl = letoh32(cur_rx->vge_ctl)((__uint32_t)(cur_rx->vge_ctl)); |
1005 | |
1006 | /* Invalidate the RX mbuf and unload its map */ |
1007 | |
1008 | bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_dmamap[i]), (0), (sc->vge_ldata.vge_rx_dmamap [i]->dm_mapsize), (0x08)) |
1009 | sc->vge_ldata.vge_rx_dmamap[i],(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_dmamap[i]), (0), (sc->vge_ldata.vge_rx_dmamap [i]->dm_mapsize), (0x08)) |
1010 | 0, sc->vge_ldata.vge_rx_dmamap[i]->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_dmamap[i]), (0), (sc->vge_ldata.vge_rx_dmamap [i]->dm_mapsize), (0x08)) |
1011 | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_dmamap[i]), (0), (sc->vge_ldata.vge_rx_dmamap [i]->dm_mapsize), (0x08)); |
1012 | bus_dmamap_unload(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_dmamap[i])) |
1013 | sc->vge_ldata.vge_rx_dmamap[i])(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_dmamap[i])); |
1014 | |
1015 | /* |
1016 | * If the 'start of frame' bit is set, this indicates |
1017 | * either the first fragment in a multi-fragment receive, |
1018 | * or an intermediate fragment. Either way, we want to |
1019 | * accumulate the buffers. |
1020 | */ |
1021 | if (rxstat & VGE_RXPKT_SOF0x00000200) { |
1022 | DPRINTF(("vge_rxeof: SOF\n"))if (vge_debug) printf ("vge_rxeof: SOF\n"); |
1023 | m->m_lenm_hdr.mh_len = MCLBYTES(1 << 11); |
1024 | if (sc->vge_head == NULL((void *)0)) |
1025 | sc->vge_head = sc->vge_tail = m; |
1026 | else { |
1027 | m->m_flagsm_hdr.mh_flags &= ~M_PKTHDR0x0002; |
1028 | sc->vge_tail->m_nextm_hdr.mh_next = m; |
1029 | sc->vge_tail = m; |
1030 | } |
1031 | vge_newbuf(sc, i, NULL((void *)0)); |
1032 | VGE_RX_DESC_INC(i)(i = (i + 1) % 256); |
1033 | continue; |
1034 | } |
1035 | |
1036 | /* |
1037 | * Bad/error frames will have the RXOK bit cleared. |
1038 | * However, there's one error case we want to allow: |
1039 | * if a VLAN tagged frame arrives and the chip can't |
1040 | * match it against the CAM filter, it considers this |
1041 | * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. |
1042 | * We don't want to drop the frame though: our VLAN |
1043 | * filtering is done in software. |
1044 | */ |
1045 | if (!(rxstat & VGE_RDSTS_RXOK0x00008000) && !(rxstat & VGE_RDSTS_VIDM0x00000001) |
1046 | && !(rxstat & VGE_RDSTS_CSUMERR0x00000008)) { |
1047 | ifp->if_ierrorsif_data.ifi_ierrors++; |
1048 | /* |
1049 | * If this is part of a multi-fragment packet, |
1050 | * discard all the pieces. |
1051 | */ |
1052 | if (sc->vge_head != NULL((void *)0)) { |
1053 | m_freem(sc->vge_head); |
1054 | sc->vge_head = sc->vge_tail = NULL((void *)0); |
1055 | } |
1056 | vge_newbuf(sc, i, m); |
1057 | VGE_RX_DESC_INC(i)(i = (i + 1) % 256); |
1058 | continue; |
1059 | } |
1060 | |
1061 | /* |
1062 | * If allocating a replacement mbuf fails, |
1063 | * reload the current one. |
1064 | */ |
1065 | |
1066 | if (vge_newbuf(sc, i, NULL((void *)0)) == ENOBUFS55) { |
1067 | if (sc->vge_head != NULL((void *)0)) { |
1068 | m_freem(sc->vge_head); |
1069 | sc->vge_head = sc->vge_tail = NULL((void *)0); |
1070 | } |
1071 | |
1072 | m0 = m_devget(mtod(m, char *)((char *)((m)->m_hdr.mh_data)), |
1073 | total_len - ETHER_CRC_LEN4, ETHER_ALIGN2); |
1074 | vge_newbuf(sc, i, m); |
1075 | if (m0 == NULL((void *)0)) { |
1076 | ifp->if_ierrorsif_data.ifi_ierrors++; |
1077 | continue; |
1078 | } |
1079 | m = m0; |
Value stored to 'm' is never read | |
1080 | |
1081 | VGE_RX_DESC_INC(i)(i = (i + 1) % 256); |
1082 | continue; |
1083 | } |
1084 | |
1085 | VGE_RX_DESC_INC(i)(i = (i + 1) % 256); |
1086 | |
1087 | if (sc->vge_head != NULL((void *)0)) { |
1088 | m->m_lenm_hdr.mh_len = total_len % MCLBYTES(1 << 11); |
1089 | /* |
1090 | * Special case: if there's 4 bytes or less |
1091 | * in this buffer, the mbuf can be discarded: |
1092 | * the last 4 bytes is the CRC, which we don't |
1093 | * care about anyway. |
1094 | */ |
1095 | if (m->m_lenm_hdr.mh_len <= ETHER_CRC_LEN4) { |
1096 | sc->vge_tail->m_lenm_hdr.mh_len -= |
1097 | (ETHER_CRC_LEN4 - m->m_lenm_hdr.mh_len); |
1098 | m_freem(m); |
1099 | } else { |
1100 | m->m_lenm_hdr.mh_len -= ETHER_CRC_LEN4; |
1101 | m->m_flagsm_hdr.mh_flags &= ~M_PKTHDR0x0002; |
1102 | sc->vge_tail->m_nextm_hdr.mh_next = m; |
1103 | } |
1104 | m = sc->vge_head; |
1105 | sc->vge_head = sc->vge_tail = NULL((void *)0); |
1106 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = total_len - ETHER_CRC_LEN4; |
1107 | } else |
1108 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = |
1109 | (total_len - ETHER_CRC_LEN4); |
1110 | |
1111 | #ifdef __STRICT_ALIGNMENT |
1112 | bcopy(m->m_datam_hdr.mh_data, m->m_datam_hdr.mh_data + ETHER_ALIGN2, total_len); |
1113 | m->m_datam_hdr.mh_data += ETHER_ALIGN2; |
1114 | #endif |
1115 | /* Do RX checksumming */ |
1116 | |
1117 | /* Check IP header checksum */ |
1118 | if ((rxctl & VGE_RDCTL_IPPKT0x00040000) && |
1119 | (rxctl & VGE_RDCTL_IPCSUMOK0x00400000)) |
1120 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK0x0008; |
1121 | |
1122 | /* Check TCP/UDP checksum */ |
1123 | if ((rxctl & (VGE_RDCTL_TCPPKT0x00020000|VGE_RDCTL_UDPPKT0x00010000)) && |
1124 | (rxctl & VGE_RDCTL_PROTOCSUMOK0x00200000)) |
1125 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK0x0020 | M_UDP_CSUM_IN_OK0x0080; |
1126 | |
1127 | #if NVLAN1 > 0 |
1128 | if (rxstat & VGE_RDSTS_VTAG0x00000400) { |
1129 | m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag = swap16(rxctl & VGE_RDCTL_VLANID)(__uint16_t)(__builtin_constant_p(rxctl & 0x0000FFFF) ? ( __uint16_t)(((__uint16_t)(rxctl & 0x0000FFFF) & 0xffU ) << 8 | ((__uint16_t)(rxctl & 0x0000FFFF) & 0xff00U ) >> 8) : __swap16md(rxctl & 0x0000FFFF)); |
1130 | m->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020; |
1131 | } |
1132 | #endif |
1133 | |
1134 | ml_enqueue(&ml, m); |
1135 | |
1136 | lim++; |
1137 | if (lim == VGE_RX_DESC_CNT256) |
1138 | break; |
1139 | } |
1140 | |
1141 | if_input(ifp, &ml); |
1142 | |
1143 | /* Flush the RX DMA ring */ |
1144 | bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_list_map), (0), (sc->vge_ldata.vge_rx_list_map ->dm_mapsize), (0x04|0x01)) |
1145 | sc->vge_ldata.vge_rx_list_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_list_map), (0), (sc->vge_ldata.vge_rx_list_map ->dm_mapsize), (0x04|0x01)) |
1146 | 0, sc->vge_ldata.vge_rx_list_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_list_map), (0), (sc->vge_ldata.vge_rx_list_map ->dm_mapsize), (0x04|0x01)) |
1147 | BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_list_map), (0), (sc->vge_ldata.vge_rx_list_map ->dm_mapsize), (0x04|0x01)); |
1148 | |
1149 | sc->vge_ldata.vge_rx_prodidx = i; |
1150 | CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim)((sc->vge_btag)->write_2((sc->vge_bhandle), (0x5E), ( lim))); |
1151 | } |
1152 | |
1153 | void |
1154 | vge_txeof(struct vge_softc *sc) |
1155 | { |
1156 | struct ifnet *ifp; |
1157 | u_int32_t txstat; |
1158 | int idx; |
1159 | |
1160 | ifp = &sc->arpcom.ac_if; |
1161 | idx = sc->vge_ldata.vge_tx_considx; |
1162 | |
1163 | /* Invalidate the TX descriptor list */ |
1164 | |
1165 | bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_list_map), (0), (sc->vge_ldata.vge_tx_list_map ->dm_mapsize), (0x02)) |
1166 | sc->vge_ldata.vge_tx_list_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_list_map), (0), (sc->vge_ldata.vge_tx_list_map ->dm_mapsize), (0x02)) |
1167 | 0, sc->vge_ldata.vge_tx_list_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_list_map), (0), (sc->vge_ldata.vge_tx_list_map ->dm_mapsize), (0x02)) |
1168 | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_list_map), (0), (sc->vge_ldata.vge_tx_list_map ->dm_mapsize), (0x02)); |
1169 | |
1170 | /* Transmitted frames can be now free'd from the TX list */ |
1171 | while (idx != sc->vge_ldata.vge_tx_prodidx) { |
1172 | txstat = letoh32(sc->vge_ldata.vge_tx_list[idx].vge_sts)((__uint32_t)(sc->vge_ldata.vge_tx_list[idx].vge_sts)); |
1173 | if (txstat & VGE_TDSTS_OWN0x80000000) |
1174 | break; |
1175 | |
1176 | m_freem(sc->vge_ldata.vge_tx_mbuf[idx]); |
1177 | sc->vge_ldata.vge_tx_mbuf[idx] = NULL((void *)0); |
1178 | bus_dmamap_unload(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_dmamap[idx])) |
1179 | sc->vge_ldata.vge_tx_dmamap[idx])(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_dmamap[idx])); |
1180 | if (txstat & (VGE_TDSTS_EXCESSCOLL0x00000080|VGE_TDSTS_COLL0x00000010)) |
1181 | ifp->if_collisionsif_data.ifi_collisions++; |
1182 | if (txstat & VGE_TDSTS_TXERR0x00008000) |
1183 | ifp->if_oerrorsif_data.ifi_oerrors++; |
1184 | |
1185 | sc->vge_ldata.vge_tx_free++; |
1186 | VGE_TX_DESC_INC(idx)(idx = (idx + 1) % 256); |
1187 | } |
1188 | |
1189 | /* No changes made to the TX ring, so no flush needed */ |
1190 | |
1191 | if (idx != sc->vge_ldata.vge_tx_considx) { |
1192 | sc->vge_ldata.vge_tx_considx = idx; |
1193 | ifq_clr_oactive(&ifp->if_snd); |
1194 | ifp->if_timer = 0; |
1195 | } |
1196 | |
1197 | /* |
1198 | * If not all descriptors have been released reaped yet, |
1199 | * reload the timer so that we will eventually get another |
1200 | * interrupt that will cause us to re-enter this routine. |
1201 | * This is done in case the transmitter has gone idle. |
1202 | */ |
1203 | if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT256) |
1204 | CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x09), ( 0x20))); |
1205 | } |
1206 | |
1207 | void |
1208 | vge_tick(void *xsc) |
1209 | { |
1210 | struct vge_softc *sc = xsc; |
1211 | struct ifnet *ifp = &sc->arpcom.ac_if; |
1212 | struct mii_data *mii = &sc->sc_mii; |
1213 | int s; |
1214 | |
1215 | s = splnet()splraise(0x7); |
1216 | |
1217 | mii_tick(mii); |
1218 | |
1219 | if (sc->vge_link) { |
1220 | if (!(mii->mii_media_status & IFM_ACTIVE0x0000000000000002ULL)) { |
1221 | sc->vge_link = 0; |
1222 | ifp->if_link_stateif_data.ifi_link_state = LINK_STATE_DOWN2; |
1223 | if_link_state_change(ifp); |
1224 | } |
1225 | } else { |
1226 | if (mii->mii_media_status & IFM_ACTIVE0x0000000000000002ULL && |
1227 | IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) != IFM_NONE2ULL) { |
1228 | sc->vge_link = 1; |
1229 | if (mii->mii_media_status & IFM_FDX0x0000010000000000ULL) |
1230 | ifp->if_link_stateif_data.ifi_link_state = LINK_STATE_FULL_DUPLEX6; |
1231 | else |
1232 | ifp->if_link_stateif_data.ifi_link_state = LINK_STATE_HALF_DUPLEX5; |
1233 | if_link_state_change(ifp); |
1234 | if (!ifq_empty(&ifp->if_snd)(((&ifp->if_snd)->ifq_len) == 0)) |
1235 | vge_start(ifp); |
1236 | } |
1237 | } |
1238 | timeout_add_sec(&sc->timer_handle, 1); |
1239 | splx(s)spllower(s); |
1240 | } |
1241 | |
1242 | int |
1243 | vge_intr(void *arg) |
1244 | { |
1245 | struct vge_softc *sc = arg; |
1246 | struct ifnet *ifp; |
1247 | u_int32_t status; |
1248 | int claimed = 0; |
1249 | |
1250 | ifp = &sc->arpcom.ac_if; |
1251 | |
1252 | if (!(ifp->if_flags & IFF_UP0x1)) |
1253 | return 0; |
1254 | |
1255 | /* Disable interrupts */ |
1256 | CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x0F), ( 0x02))); |
1257 | |
1258 | for (;;) { |
1259 | status = CSR_READ_4(sc, VGE_ISR)((sc->vge_btag)->read_4((sc->vge_bhandle), (0x24))); |
1260 | DPRINTFN(3, ("vge_intr: status=%#x\n", status))if (vge_debug >= (3)) printf ("vge_intr: status=%#x\n", status ); |
1261 | |
1262 | /* If the card has gone away the read returns 0xffffffff. */ |
1263 | if (status == 0xFFFFFFFF) |
1264 | break; |
1265 | |
1266 | if (status) { |
1267 | CSR_WRITE_4(sc, VGE_ISR, status)((sc->vge_btag)->write_4((sc->vge_bhandle), (0x24), ( status))); |
1268 | } |
1269 | |
1270 | if ((status & VGE_INTRS(0x00000010|0x00000004|0x00100000| 0x00001000|0x00080000| 0x00008000 |0x00002000| 0x01000000|0x02000000| 0x00200000|0x00010000)) == 0) |
1271 | break; |
1272 | |
1273 | claimed = 1; |
1274 | |
1275 | if (status & (VGE_ISR_RXOK0x00000004|VGE_ISR_RXOK_HIPRIO0x00000001)) |
1276 | vge_rxeof(sc); |
1277 | |
1278 | if (status & (VGE_ISR_RXOFLOW0x00001000|VGE_ISR_RXNODESC0x00002000)) { |
1279 | DPRINTFN(2, ("vge_intr: RX error, recovering\n"))if (vge_debug >= (2)) printf ("vge_intr: RX error, recovering\n" ); |
1280 | vge_rxeof(sc); |
1281 | CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x32), ( 0x0001))); |
1282 | CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x32), ( 0x0004))); |
1283 | } |
1284 | |
1285 | if (status & (VGE_ISR_TXOK00x00000010|VGE_ISR_TIMER00x00010000)) |
1286 | vge_txeof(sc); |
1287 | |
1288 | if (status & (VGE_ISR_TXDMA_STALL0x02000000|VGE_ISR_RXDMA_STALL0x01000000)) { |
1289 | DPRINTFN(2, ("DMA_STALL\n"))if (vge_debug >= (2)) printf ("DMA_STALL\n"); |
1290 | vge_init(ifp); |
1291 | } |
1292 | |
1293 | if (status & VGE_ISR_LINKSTS0x00008000) { |
1294 | timeout_del(&sc->timer_handle); |
1295 | vge_tick(sc); |
1296 | } |
1297 | } |
1298 | |
1299 | /* Re-enable interrupts */ |
1300 | CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x0B), ( 0x02))); |
1301 | |
1302 | if (!ifq_empty(&ifp->if_snd)(((&ifp->if_snd)->ifq_len) == 0)) |
1303 | vge_start(ifp); |
1304 | |
1305 | return (claimed); |
1306 | } |
1307 | |
1308 | /* |
1309 | * Encapsulate an mbuf chain into the TX ring by combining it w/ |
1310 | * the descriptors. |
1311 | */ |
1312 | int |
1313 | vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx) |
1314 | { |
1315 | bus_dmamap_t txmap; |
1316 | struct vge_tx_desc *d = NULL((void *)0); |
1317 | struct vge_tx_frag *f; |
1318 | int error, frag; |
1319 | u_int32_t vge_flags; |
1320 | unsigned int len; |
1321 | |
1322 | vge_flags = 0; |
1323 | |
1324 | if (m_head->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_IPV4_CSUM_OUT0x0001) |
1325 | vge_flags |= VGE_TDCTL_IPCSUM0x00100000; |
1326 | if (m_head->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_TCP_CSUM_OUT0x0002) |
1327 | vge_flags |= VGE_TDCTL_TCPCSUM0x00040000; |
1328 | if (m_head->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_UDP_CSUM_OUT0x0004) |
1329 | vge_flags |= VGE_TDCTL_UDPCSUM0x00080000; |
1330 | |
1331 | txmap = sc->vge_ldata.vge_tx_dmamap[idx]; |
1332 | error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( txmap), (m_head), (0x0001)) |
1333 | m_head, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( txmap), (m_head), (0x0001)); |
1334 | switch (error) { |
1335 | case 0: |
1336 | break; |
1337 | case EFBIG27: /* mbuf chain is too fragmented */ |
1338 | if ((error = m_defrag(m_head, M_DONTWAIT0x0002)) == 0 && |
1339 | (error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m_head,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( txmap), (m_head), (0x0001)) |
1340 | BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( txmap), (m_head), (0x0001))) == 0) |
1341 | break; |
1342 | default: |
1343 | return (error); |
1344 | } |
1345 | |
1346 | d = &sc->vge_ldata.vge_tx_list[idx]; |
1347 | /* If owned by chip, fail */ |
1348 | if (letoh32(d->vge_sts)((__uint32_t)(d->vge_sts)) & VGE_TDSTS_OWN0x80000000) |
1349 | return (ENOBUFS55); |
1350 | |
1351 | for (frag = 0; frag < txmap->dm_nsegs; frag++) { |
1352 | f = &d->vge_frag[frag]; |
1353 | f->vge_buflen = htole16(VGE_BUFLEN(txmap->dm_segs[frag].ds_len))((__uint16_t)(((txmap->dm_segs[frag].ds_len) & 0x7FFF) )); |
1354 | f->vge_addrlo = htole32(VGE_ADDR_LO(txmap->dm_segs[frag].ds_addr))((__uint32_t)(((u_int64_t) (txmap->dm_segs[frag].ds_addr) & 0xFFFFFFFF))); |
1355 | f->vge_addrhi = htole16(VGE_ADDR_HI(txmap->dm_segs[frag].ds_addr) & 0xFFFF)((__uint16_t)(((u_int64_t) (txmap->dm_segs[frag].ds_addr) >> 32) & 0xFFFF)); |
1356 | } |
1357 | |
1358 | /* This chip does not do auto-padding */ |
1359 | if (m_head->m_pkthdrM_dat.MH.MH_pkthdr.len < VGE_MIN_FRAMELEN60) { |
1360 | f = &d->vge_frag[frag]; |
1361 | |
1362 | f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN -((__uint16_t)(((60 - m_head->M_dat.MH.MH_pkthdr.len) & 0x7FFF))) |
1363 | m_head->m_pkthdr.len))((__uint16_t)(((60 - m_head->M_dat.MH.MH_pkthdr.len) & 0x7FFF))); |
1364 | f->vge_addrlo = htole32(VGE_ADDR_LO(txmap->dm_segs[0].ds_addr))((__uint32_t)(((u_int64_t) (txmap->dm_segs[0].ds_addr) & 0xFFFFFFFF))); |
1365 | f->vge_addrhi = htole16(VGE_ADDR_HI(txmap->dm_segs[0].ds_addr) & 0xFFFF)((__uint16_t)(((u_int64_t) (txmap->dm_segs[0].ds_addr) >> 32) & 0xFFFF)); |
1366 | len = VGE_MIN_FRAMELEN60; |
1367 | frag++; |
1368 | } else |
1369 | len = m_head->m_pkthdrM_dat.MH.MH_pkthdr.len; |
1370 | |
1371 | /* For some reason, we need to tell the card fragment + 1 */ |
1372 | frag++; |
1373 | |
1374 | bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txmap ), (0), (txmap->dm_mapsize), (0x04)) |
1375 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txmap ), (0), (txmap->dm_mapsize), (0x04)); |
1376 | |
1377 | d->vge_sts = htole32(len << 16)((__uint32_t)(len << 16)); |
1378 | d->vge_ctl = htole32(vge_flags|(frag << 28) | VGE_TD_LS_NORM)((__uint32_t)(vge_flags|(frag << 28) | 0x03000000)); |
1379 | |
1380 | if (len > ETHERMTU(1518 - ((6 * 2) + 2) - 4) + ETHER_HDR_LEN((6 * 2) + 2)) |
1381 | d->vge_ctl |= htole32(VGE_TDCTL_JUMBO)((__uint32_t)(0x00020000)); |
1382 | |
1383 | #if NVLAN1 > 0 |
1384 | /* Set up hardware VLAN tagging. */ |
1385 | if (m_head->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020) { |
1386 | d->vge_ctl |= htole32(m_head->m_pkthdr.ether_vtag |((__uint32_t)(m_head->M_dat.MH.MH_pkthdr.ether_vtag | 0x00200000 )) |
1387 | VGE_TDCTL_VTAG)((__uint32_t)(m_head->M_dat.MH.MH_pkthdr.ether_vtag | 0x00200000 )); |
1388 | } |
1389 | #endif |
1390 | |
1391 | sc->vge_ldata.vge_tx_dmamap[idx] = txmap; |
1392 | sc->vge_ldata.vge_tx_mbuf[idx] = m_head; |
1393 | sc->vge_ldata.vge_tx_free--; |
1394 | sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN)((__uint32_t)(0x80000000)); |
1395 | |
1396 | idx++; |
1397 | return (0); |
1398 | } |
1399 | |
1400 | /* |
1401 | * Main transmit routine. |
1402 | */ |
1403 | void |
1404 | vge_start(struct ifnet *ifp) |
1405 | { |
1406 | struct vge_softc *sc; |
1407 | struct mbuf *m_head = NULL((void *)0); |
1408 | int idx, pidx = 0; |
1409 | |
1410 | sc = ifp->if_softc; |
1411 | |
1412 | if (!sc->vge_link || ifq_is_oactive(&ifp->if_snd)) |
1413 | return; |
1414 | |
1415 | if (ifq_empty(&ifp->if_snd)(((&ifp->if_snd)->ifq_len) == 0)) |
1416 | return; |
1417 | |
1418 | idx = sc->vge_ldata.vge_tx_prodidx; |
1419 | |
1420 | pidx = idx - 1; |
1421 | if (pidx < 0) |
1422 | pidx = VGE_TX_DESC_CNT256 - 1; |
1423 | |
1424 | for (;;) { |
1425 | if (sc->vge_ldata.vge_tx_mbuf[idx] != NULL((void *)0)) { |
1426 | ifq_set_oactive(&ifp->if_snd); |
1427 | break; |
1428 | } |
1429 | |
1430 | m_head = ifq_dequeue(&ifp->if_snd); |
1431 | if (m_head == NULL((void *)0)) |
1432 | break; |
1433 | |
1434 | if (vge_encap(sc, m_head, idx)) { |
1435 | m_freem(m_head); |
1436 | ifp->if_oerrorsif_data.ifi_oerrors++; |
1437 | continue; |
1438 | } |
1439 | |
1440 | /* |
1441 | * If there's a BPF listener, bounce a copy of this frame |
1442 | * to him. |
1443 | */ |
1444 | #if NBPFILTER1 > 0 |
1445 | if (ifp->if_bpf) |
1446 | bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT(1 << 1)); |
1447 | #endif |
1448 | |
1449 | sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |= |
1450 | htole16(VGE_TXDESC_Q)((__uint16_t)(0x8000)); |
1451 | |
1452 | pidx = idx; |
1453 | VGE_TX_DESC_INC(idx)(idx = (idx + 1) % 256); |
1454 | } |
1455 | |
1456 | if (idx == sc->vge_ldata.vge_tx_prodidx) { |
1457 | return; |
1458 | } |
1459 | |
1460 | /* Flush the TX descriptors */ |
1461 | |
1462 | bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_list_map), (0), (sc->vge_ldata.vge_tx_list_map ->dm_mapsize), (0x04|0x01)) |
1463 | sc->vge_ldata.vge_tx_list_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_list_map), (0), (sc->vge_ldata.vge_tx_list_map ->dm_mapsize), (0x04|0x01)) |
1464 | 0, sc->vge_ldata.vge_tx_list_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_list_map), (0), (sc->vge_ldata.vge_tx_list_map ->dm_mapsize), (0x04|0x01)) |
1465 | BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_list_map), (0), (sc->vge_ldata.vge_tx_list_map ->dm_mapsize), (0x04|0x01)); |
1466 | |
1467 | /* Issue a transmit command. */ |
1468 | CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0)((sc->vge_btag)->write_2((sc->vge_bhandle), (0x30), ( 0x0004))); |
1469 | |
1470 | sc->vge_ldata.vge_tx_prodidx = idx; |
1471 | |
1472 | /* |
1473 | * Use the countdown timer for interrupt moderation. |
1474 | * 'TX done' interrupts are disabled. Instead, we reset the |
1475 | * countdown timer, which will begin counting until it hits |
1476 | * the value in the SSTIMER register, and then trigger an |
1477 | * interrupt. Each time we set the TIMER0_ENABLE bit, the |
1478 | * the timer count is reloaded. Only when the transmitter |
1479 | * is idle will the timer hit 0 and an interrupt fire. |
1480 | */ |
1481 | CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x09), ( 0x20))); |
1482 | |
1483 | /* |
1484 | * Set a timeout in case the chip goes out to lunch. |
1485 | */ |
1486 | ifp->if_timer = 5; |
1487 | } |
1488 | |
1489 | int |
1490 | vge_init(struct ifnet *ifp) |
1491 | { |
1492 | struct vge_softc *sc = ifp->if_softc; |
1493 | int i; |
1494 | |
1495 | /* |
1496 | * Cancel pending I/O and free all RX/TX buffers. |
1497 | */ |
1498 | vge_stop(sc); |
1499 | vge_reset(sc); |
1500 | |
1501 | /* Initialize RX descriptors list */ |
1502 | if (vge_rx_list_init(sc) == ENOBUFS55) { |
1503 | printf("%s: init failed: no memory for RX buffers\n", |
1504 | sc->vge_dev.dv_xname); |
1505 | vge_stop(sc); |
1506 | return (ENOBUFS55); |
1507 | } |
1508 | /* Initialize TX descriptors */ |
1509 | if (vge_tx_list_init(sc) == ENOBUFS55) { |
1510 | printf("%s: init failed: no memory for TX buffers\n", |
1511 | sc->vge_dev.dv_xname); |
1512 | vge_stop(sc); |
1513 | return (ENOBUFS55); |
1514 | } |
1515 | |
1516 | /* Set our station address */ |
1517 | for (i = 0; i < ETHER_ADDR_LEN6; i++) |
1518 | CSR_WRITE_1(sc, VGE_PAR0 + i, sc->arpcom.ac_enaddr[i])((sc->vge_btag)->write_1((sc->vge_bhandle), (0x00 + i ), (sc->arpcom.ac_enaddr[i]))); |
1519 | |
1520 | /* Set receive FIFO threshold */ |
1521 | CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x7E), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x7E))) & ~(0x30)))); |
1522 | CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x7E), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x7E))) | (0x00)))); |
1523 | |
1524 | if (ifp->if_capabilitiesif_data.ifi_capabilities & IFCAP_VLAN_HWTAGGING0x00000020) { |
1525 | /* |
1526 | * Allow transmission and reception of VLAN tagged |
1527 | * frames. |
1528 | */ |
1529 | CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_VTAGOPT)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x7E), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x7E))) & ~(0x06)))); |
1530 | CSR_SETBIT_1(sc, VGE_RXCFG, VGE_VTAG_OPT2)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x7E), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x7E))) | (0x04)))); |
1531 | } |
1532 | |
1533 | /* Set DMA burst length */ |
1534 | CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x7C), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x7C))) & ~(0x07)))); |
1535 | CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x7C), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x7C))) | (0x04)))); |
1536 | |
1537 | CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x7F), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x7F))) | (0x80|0x02)))); |
1538 | |
1539 | /* Set collision backoff algorithm */ |
1540 | CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|((sc->vge_btag)->write_1((sc->vge_bhandle), (0x79), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x79))) & ~(0x08| 0x04|0x02|0x01)))) |
1541 | VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x79), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x79))) & ~(0x08| 0x04|0x02|0x01)))); |
1542 | CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x79), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x79))) | (0x10)))); |
1543 | |
1544 | /* Disable LPSEL field in priority resolution */ |
1545 | CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x9F), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x9F))) | (0x08)))); |
1546 | |
1547 | /* |
1548 | * Load the addresses of the DMA queues into the chip. |
1549 | * Note that we only use one transmit queue. |
1550 | */ |
1551 | CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,((sc->vge_btag)->write_4((sc->vge_bhandle), (0x40), ( ((u_int64_t) (sc->vge_ldata.vge_tx_listseg.ds_addr) & 0xFFFFFFFF )))) |
1552 | VGE_ADDR_LO(sc->vge_ldata.vge_tx_listseg.ds_addr))((sc->vge_btag)->write_4((sc->vge_bhandle), (0x40), ( ((u_int64_t) (sc->vge_ldata.vge_tx_listseg.ds_addr) & 0xFFFFFFFF )))); |
1553 | CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1)((sc->vge_btag)->write_2((sc->vge_bhandle), (0x52), ( 256 - 1))); |
1554 | |
1555 | CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,((sc->vge_btag)->write_4((sc->vge_bhandle), (0x38), ( ((u_int64_t) (sc->vge_ldata.vge_rx_listseg.ds_addr) & 0xFFFFFFFF )))) |
1556 | VGE_ADDR_LO(sc->vge_ldata.vge_rx_listseg.ds_addr))((sc->vge_btag)->write_4((sc->vge_bhandle), (0x38), ( ((u_int64_t) (sc->vge_ldata.vge_rx_listseg.ds_addr) & 0xFFFFFFFF )))); |
1557 | CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1)((sc->vge_btag)->write_2((sc->vge_bhandle), (0x50), ( 256 - 1))); |
1558 | CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT)((sc->vge_btag)->write_2((sc->vge_bhandle), (0x5E), ( 256))); |
1559 | |
1560 | /* Enable and wake up the RX descriptor queue */ |
1561 | CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x32), ( 0x0001))); |
1562 | CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x32), ( 0x0004))); |
1563 | |
1564 | /* Enable the TX descriptor queue */ |
1565 | CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0)((sc->vge_btag)->write_2((sc->vge_bhandle), (0x30), ( 0x0001))); |
1566 | |
1567 | /* Set up the receive filter -- allow large frames for VLANs. */ |
1568 | CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_GIANT)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x06), ( 0x20))); |
1569 | |
1570 | /* Program promiscuous mode and multicast filters. */ |
1571 | vge_iff(sc); |
1572 | |
1573 | /* Initialize pause timer. */ |
1574 | CSR_WRITE_2(sc, VGE_TX_PAUSE_TIMER, 0xFFFF)((sc->vge_btag)->write_2((sc->vge_bhandle), (0x5C), ( 0xFFFF))); |
1575 | /* |
1576 | * Initialize flow control parameters. |
1577 | * TX XON high threshold : 48 |
1578 | * TX pause low threshold : 24 |
1579 | * Disable half-duplex flow control |
1580 | */ |
1581 | CSR_WRITE_1(sc, VGE_CRC2, 0xFF)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x0E), ( 0xFF))); |
1582 | CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_XON_ENABLE | 0x0B)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x0A), ( 0x80 | 0x0B))); |
1583 | |
1584 | /* Enable jumbo frame reception (if desired) */ |
1585 | |
1586 | /* Start the MAC. */ |
1587 | CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x0C), ( 0x02))); |
1588 | CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x09), ( 0x08))); |
1589 | CSR_WRITE_1(sc, VGE_CRS0,((sc->vge_btag)->write_1((sc->vge_bhandle), (0x08), ( 0x08|0x04|0x01))) |
1590 | VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x08), ( 0x08|0x04|0x01))); |
1591 | |
1592 | /* |
1593 | * Configure one-shot timer for microsecond |
1594 | * resolution and load it for 500 usecs. |
1595 | */ |
1596 | CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x9F), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x9F))) | (0x02)))); |
1597 | CSR_WRITE_2(sc, VGE_SSTIMER, 400)((sc->vge_btag)->write_2((sc->vge_bhandle), (0x74), ( 400))); |
1598 | |
1599 | /* |
1600 | * Configure interrupt moderation for receive. Enable |
1601 | * the holdoff counter and load it, and set the RX |
1602 | * suppression count to the number of descriptors we |
1603 | * want to allow before triggering an interrupt. |
1604 | * The holdoff timer is in units of 20 usecs. |
1605 | */ |
1606 | |
1607 | #ifdef notyet |
1608 | CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x21), ( 0x40))); |
1609 | /* Select the interrupt holdoff timer page. */ |
1610 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x69), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x69))) & ~(0xC0)))); |
1611 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x69), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x69))) | (0x00)))); |
1612 | CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x20), ( 10))); /* ~200 usecs */ |
1613 | |
1614 | /* Enable use of the holdoff timer. */ |
1615 | CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x0B), ( 0x04))); |
1616 | CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x21), ( 0x01))); |
1617 | |
1618 | /* Select the RX suppression threshold page. */ |
1619 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x69), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x69))) & ~(0xC0)))); |
1620 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x69), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x69))) | (0x80)))); |
1621 | CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x20), ( 64))); /* interrupt after 64 packets */ |
1622 | |
1623 | /* Restore the page select bits. */ |
1624 | CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x69), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x69))) & ~(0xC0)))); |
1625 | CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x69), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x69))) | (0x00)))); |
1626 | #endif |
1627 | |
1628 | /* |
1629 | * Enable interrupts. |
1630 | */ |
1631 | CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS)((sc->vge_btag)->write_4((sc->vge_bhandle), (0x28), ( (0x00000010|0x00000004|0x00100000| 0x00001000|0x00080000| 0x00008000 |0x00002000| 0x01000000|0x02000000| 0x00200000|0x00010000)))); |
1632 | CSR_WRITE_4(sc, VGE_ISR, 0)((sc->vge_btag)->write_4((sc->vge_bhandle), (0x24), ( 0))); |
1633 | CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x0B), ( 0x02))); |
1634 | |
1635 | /* Restore BMCR state */ |
1636 | mii_mediachg(&sc->sc_mii); |
1637 | |
1638 | ifp->if_flags |= IFF_RUNNING0x40; |
1639 | ifq_clr_oactive(&ifp->if_snd); |
1640 | |
1641 | sc->vge_link = 0; |
1642 | |
1643 | if (!timeout_pending(&sc->timer_handle)((&sc->timer_handle)->to_flags & 0x02)) |
1644 | timeout_add_sec(&sc->timer_handle, 1); |
1645 | |
1646 | return (0); |
1647 | } |
1648 | |
1649 | /* |
1650 | * Set media options. |
1651 | */ |
1652 | int |
1653 | vge_ifmedia_upd(struct ifnet *ifp) |
1654 | { |
1655 | struct vge_softc *sc = ifp->if_softc; |
1656 | |
1657 | return (mii_mediachg(&sc->sc_mii)); |
1658 | } |
1659 | |
1660 | /* |
1661 | * Report current media status. |
1662 | */ |
1663 | void |
1664 | vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) |
1665 | { |
1666 | struct vge_softc *sc = ifp->if_softc; |
1667 | |
1668 | mii_pollstat(&sc->sc_mii); |
1669 | ifmr->ifm_active = sc->sc_mii.mii_media_active; |
1670 | ifmr->ifm_status = sc->sc_mii.mii_media_status; |
1671 | } |
1672 | |
1673 | void |
1674 | vge_miibus_statchg(struct device *dev) |
1675 | { |
1676 | struct vge_softc *sc = (struct vge_softc *)dev; |
1677 | struct mii_data *mii; |
1678 | struct ifmedia_entry *ife; |
1679 | |
1680 | mii = &sc->sc_mii; |
1681 | ife = mii->mii_media.ifm_cur; |
1682 | |
1683 | /* |
1684 | * If the user manually selects a media mode, we need to turn |
1685 | * on the forced MAC mode bit in the DIAGCTL register. If the |
1686 | * user happens to choose a full duplex mode, we also need to |
1687 | * set the 'force full duplex' bit. This applies only to |
1688 | * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC |
1689 | * mode is disabled, and in 1000baseT mode, full duplex is |
1690 | * always implied, so we turn on the forced mode bit but leave |
1691 | * the FDX bit cleared. |
1692 | */ |
1693 | |
1694 | switch (IFM_SUBTYPE(ife->ifm_media)((ife->ifm_media) & 0x00000000000000ffULL)) { |
1695 | case IFM_AUTO0ULL: |
1696 | CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x9F), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x9F))) & ~(0x10)))); |
1697 | CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x9F), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x9F))) & ~(0x40)))); |
1698 | break; |
1699 | case IFM_1000_T16: |
1700 | CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x9F), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x9F))) | (0x10)))); |
1701 | CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x9F), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x9F))) & ~(0x40)))); |
1702 | break; |
1703 | case IFM_100_TX6: |
1704 | case IFM_10_T3: |
1705 | CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x9F), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x9F))) | (0x10)))); |
1706 | if ((ife->ifm_media & IFM_GMASK0x00ffff0000000000ULL) == IFM_FDX0x0000010000000000ULL) { |
1707 | CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x9F), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x9F))) | (0x40)))); |
1708 | } else { |
1709 | CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x9F), ( ((sc->vge_btag)->read_1((sc->vge_bhandle), (0x9F))) & ~(0x40)))); |
1710 | } |
1711 | break; |
1712 | default: |
1713 | printf("%s: unknown media type: %llx\n", |
1714 | sc->vge_dev.dv_xname, IFM_SUBTYPE(ife->ifm_media)((ife->ifm_media) & 0x00000000000000ffULL)); |
1715 | break; |
1716 | } |
1717 | |
1718 | /* |
1719 | * 802.3x flow control |
1720 | */ |
1721 | CSR_WRITE_1(sc, VGE_CRC2, VGE_CR2_FDX_TXFLOWCTL_ENABLE |((sc->vge_btag)->write_1((sc->vge_bhandle), (0x0E), ( 0x40 | 0x20))) |
1722 | VGE_CR2_FDX_RXFLOWCTL_ENABLE)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x0E), ( 0x40 | 0x20))); |
1723 | if ((IFM_OPTIONS(mii->mii_media_active)((mii->mii_media_active) & (0x00000000ffff0000ULL|0x00ffff0000000000ULL )) & IFM_ETH_TXPAUSE0x0000000000040000ULL) != 0) |
1724 | CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_FDX_TXFLOWCTL_ENABLE)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x0A), ( 0x40))); |
1725 | if ((IFM_OPTIONS(mii->mii_media_active)((mii->mii_media_active) & (0x00000000ffff0000ULL|0x00ffff0000000000ULL )) & IFM_ETH_RXPAUSE0x0000000000020000ULL) != 0) |
1726 | CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_FDX_RXFLOWCTL_ENABLE)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x0A), ( 0x20))); |
1727 | } |
1728 | |
1729 | int |
1730 | vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) |
1731 | { |
1732 | struct vge_softc *sc = ifp->if_softc; |
1733 | struct ifreq *ifr = (struct ifreq *) data; |
1734 | int s, error = 0; |
1735 | |
1736 | s = splnet()splraise(0x7); |
1737 | |
1738 | switch (command) { |
1739 | case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((12))): |
1740 | ifp->if_flags |= IFF_UP0x1; |
1741 | if (!(ifp->if_flags & IFF_RUNNING0x40)) |
1742 | vge_init(ifp); |
1743 | break; |
1744 | |
1745 | case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))): |
1746 | if (ifp->if_flags & IFF_UP0x1) { |
1747 | if (ifp->if_flags & IFF_RUNNING0x40) |
1748 | error = ENETRESET52; |
1749 | else |
1750 | vge_init(ifp); |
1751 | } else { |
1752 | if (ifp->if_flags & IFF_RUNNING0x40) |
1753 | vge_stop(sc); |
1754 | } |
1755 | break; |
1756 | |
1757 | case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifmediareq) & 0x1fff) << 16) | ((('i')) << 8) | ((56))): |
1758 | case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((55))): |
1759 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); |
1760 | break; |
1761 | |
1762 | default: |
1763 | error = ether_ioctl(ifp, &sc->arpcom, command, data); |
1764 | } |
1765 | |
1766 | if (error == ENETRESET52) { |
1767 | if (ifp->if_flags & IFF_RUNNING0x40) |
1768 | vge_iff(sc); |
1769 | error = 0; |
1770 | } |
1771 | |
1772 | splx(s)spllower(s); |
1773 | return (error); |
1774 | } |
1775 | |
1776 | void |
1777 | vge_watchdog(struct ifnet *ifp) |
1778 | { |
1779 | struct vge_softc *sc = ifp->if_softc; |
1780 | int s; |
1781 | |
1782 | s = splnet()splraise(0x7); |
1783 | printf("%s: watchdog timeout\n", sc->vge_dev.dv_xname); |
1784 | ifp->if_oerrorsif_data.ifi_oerrors++; |
1785 | |
1786 | vge_txeof(sc); |
1787 | vge_rxeof(sc); |
1788 | |
1789 | vge_init(ifp); |
1790 | |
1791 | splx(s)spllower(s); |
1792 | } |
1793 | |
1794 | /* |
1795 | * Stop the adapter and free any mbufs allocated to the |
1796 | * RX and TX lists. |
1797 | */ |
1798 | void |
1799 | vge_stop(struct vge_softc *sc) |
1800 | { |
1801 | int i; |
1802 | struct ifnet *ifp; |
1803 | |
1804 | ifp = &sc->arpcom.ac_if; |
1805 | ifp->if_timer = 0; |
1806 | |
1807 | timeout_del(&sc->timer_handle); |
1808 | |
1809 | ifp->if_flags &= ~IFF_RUNNING0x40; |
1810 | ifq_clr_oactive(&ifp->if_snd); |
1811 | |
1812 | CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x0F), ( 0x02))); |
1813 | CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x08), ( 0x02))); |
1814 | CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF)((sc->vge_btag)->write_4((sc->vge_bhandle), (0x24), ( 0xFFFFFFFF))); |
1815 | CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF)((sc->vge_btag)->write_2((sc->vge_bhandle), (0x34), ( 0xFFFF))); |
1816 | CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF)((sc->vge_btag)->write_1((sc->vge_bhandle), (0x36), ( 0xFF))); |
1817 | CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0)((sc->vge_btag)->write_4((sc->vge_bhandle), (0x38), ( 0))); |
1818 | |
1819 | if (sc->vge_head != NULL((void *)0)) { |
1820 | m_freem(sc->vge_head); |
1821 | sc->vge_head = sc->vge_tail = NULL((void *)0); |
1822 | } |
1823 | |
1824 | /* Free the TX list buffers. */ |
1825 | for (i = 0; i < VGE_TX_DESC_CNT256; i++) { |
1826 | if (sc->vge_ldata.vge_tx_mbuf[i] != NULL((void *)0)) { |
1827 | bus_dmamap_unload(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_dmamap[i])) |
1828 | sc->vge_ldata.vge_tx_dmamap[i])(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc-> vge_ldata.vge_tx_dmamap[i])); |
1829 | m_freem(sc->vge_ldata.vge_tx_mbuf[i]); |
1830 | sc->vge_ldata.vge_tx_mbuf[i] = NULL((void *)0); |
1831 | } |
1832 | } |
1833 | |
1834 | /* Free the RX list buffers. */ |
1835 | for (i = 0; i < VGE_RX_DESC_CNT256; i++) { |
1836 | if (sc->vge_ldata.vge_rx_mbuf[i] != NULL((void *)0)) { |
1837 | bus_dmamap_unload(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_dmamap[i])) |
1838 | sc->vge_ldata.vge_rx_dmamap[i])(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc-> vge_ldata.vge_rx_dmamap[i])); |
1839 | m_freem(sc->vge_ldata.vge_rx_mbuf[i]); |
1840 | sc->vge_ldata.vge_rx_mbuf[i] = NULL((void *)0); |
1841 | } |
1842 | } |
1843 | } |