File: | dev/pci/if_bge.c |
Warning: | line 3669, column 3 Value stored to 'cur_tx' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: if_bge.c,v 1.397 2022/01/09 05:42:46 jsg Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2001 Wind River Systems |
5 | * Copyright (c) 1997, 1998, 1999, 2001 |
6 | * Bill Paul <wpaul@windriver.com>. All rights reserved. |
7 | * |
8 | * Redistribution and use in source and binary forms, with or without |
9 | * modification, are permitted provided that the following conditions |
10 | * are met: |
11 | * 1. Redistributions of source code must retain the above copyright |
12 | * notice, this list of conditions and the following disclaimer. |
13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. |
16 | * 3. All advertising materials mentioning features or use of this software |
17 | * must display the following acknowledgement: |
18 | * This product includes software developed by Bill Paul. |
19 | * 4. Neither the name of the author nor the names of any co-contributors |
20 | * may be used to endorse or promote products derived from this software |
21 | * without specific prior written permission. |
22 | * |
23 | * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND |
24 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
25 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
26 | * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD |
27 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
28 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
29 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
30 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
31 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
32 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
33 | * THE POSSIBILITY OF SUCH DAMAGE. |
34 | * |
35 | * $FreeBSD: if_bge.c,v 1.25 2002/11/14 23:54:49 sam Exp $ |
36 | */ |
37 | |
38 | /* |
39 | * Broadcom BCM57xx/BCM590x family ethernet driver for OpenBSD. |
40 | * |
41 | * Written by Bill Paul <wpaul@windriver.com> |
42 | * Senior Engineer, Wind River Systems |
43 | */ |
44 | |
45 | /* |
46 | * The Broadcom BCM5700 is based on technology originally developed by |
47 | * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet |
48 | * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has |
49 | * two on-board MIPS R4000 CPUs and can have as much as 16MB of external |
50 | * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo |
51 | * frames, highly configurable RX filtering, and 16 RX and TX queues |
52 | * (which, along with RX filter rules, can be used for QOS applications). |
53 | * Other features, such as TCP segmentation, may be available as part |
54 | * of value-added firmware updates. Unlike the Tigon I and Tigon II, |
55 | * firmware images can be stored in hardware and need not be compiled |
56 | * into the driver. |
57 | * |
58 | * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will |
59 | * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. |
60 | * |
61 | * The BCM5701 is a single-chip solution incorporating both the BCM5700 |
62 | * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 |
63 | * does not support external SSRAM. |
64 | * |
65 | * Broadcom also produces a variation of the BCM5700 under the "Altima" |
66 | * brand name, which is functionally similar but lacks PCI-X support. |
67 | * |
68 | * Without external SSRAM, you can only have at most 4 TX rings, |
69 | * and the use of the mini RX ring is disabled. This seems to imply |
70 | * that these features are simply not available on the BCM5701. As a |
71 | * result, this driver does not implement any support for the mini RX |
72 | * ring. |
73 | */ |
74 | |
75 | #include "bpfilter.h" |
76 | #include "vlan.h" |
77 | |
78 | #include <sys/param.h> |
79 | #include <sys/systm.h> |
80 | #include <sys/sockio.h> |
81 | #include <sys/mbuf.h> |
82 | #include <sys/malloc.h> |
83 | #include <sys/kernel.h> |
84 | #include <sys/device.h> |
85 | #include <sys/timeout.h> |
86 | #include <sys/socket.h> |
87 | #include <sys/atomic.h> |
88 | |
89 | #include <net/if.h> |
90 | #include <net/if_media.h> |
91 | |
92 | #include <netinet/in.h> |
93 | #include <netinet/if_ether.h> |
94 | |
95 | #if NBPFILTER1 > 0 |
96 | #include <net/bpf.h> |
97 | #endif |
98 | |
99 | #if defined(__sparc64__) || defined(__HAVE_FDT) |
100 | #include <dev/ofw/openfirm.h> |
101 | #endif |
102 | |
103 | #include <dev/pci/pcireg.h> |
104 | #include <dev/pci/pcivar.h> |
105 | #include <dev/pci/pcidevs.h> |
106 | |
107 | #include <dev/mii/mii.h> |
108 | #include <dev/mii/miivar.h> |
109 | #include <dev/mii/miidevs.h> |
110 | #include <dev/mii/brgphyreg.h> |
111 | |
112 | #include <dev/pci/if_bgereg.h> |
113 | |
114 | #define ETHER_MIN_NOPAD(64 - 4) (ETHER_MIN_LEN64 - ETHER_CRC_LEN4) /* i.e., 60 */ |
115 | |
116 | const struct bge_revision * bge_lookup_rev(u_int32_t); |
117 | int bge_can_use_msi(struct bge_softc *); |
118 | int bge_probe(struct device *, void *, void *); |
119 | void bge_attach(struct device *, struct device *, void *); |
120 | int bge_detach(struct device *, int); |
121 | int bge_activate(struct device *, int); |
122 | |
123 | struct cfattach bge_ca = { |
124 | sizeof(struct bge_softc), bge_probe, bge_attach, bge_detach, |
125 | bge_activate |
126 | }; |
127 | |
128 | struct cfdriver bge_cd = { |
129 | NULL((void *)0), "bge", DV_IFNET |
130 | }; |
131 | |
132 | void bge_txeof(struct bge_softc *); |
133 | void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *); |
134 | void bge_rxeof(struct bge_softc *); |
135 | |
136 | void bge_tick(void *); |
137 | void bge_stats_update(struct bge_softc *); |
138 | void bge_stats_update_regs(struct bge_softc *); |
139 | int bge_cksum_pad(struct mbuf *); |
140 | int bge_encap(struct bge_softc *, struct mbuf *, int *); |
141 | int bge_compact_dma_runt(struct mbuf *); |
142 | |
143 | int bge_intr(void *); |
144 | void bge_start(struct ifqueue *); |
145 | int bge_ioctl(struct ifnet *, u_long, caddr_t); |
146 | int bge_rxrinfo(struct bge_softc *, struct if_rxrinfo *); |
147 | void bge_init(void *); |
148 | void bge_stop_block(struct bge_softc *, bus_size_t, u_int32_t); |
149 | void bge_stop(struct bge_softc *, int); |
150 | void bge_watchdog(struct ifnet *); |
151 | int bge_ifmedia_upd(struct ifnet *); |
152 | void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); |
153 | |
154 | u_int8_t bge_nvram_getbyte(struct bge_softc *, int, u_int8_t *); |
155 | int bge_read_nvram(struct bge_softc *, caddr_t, int, int); |
156 | u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *); |
157 | int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); |
158 | |
159 | void bge_iff(struct bge_softc *); |
160 | |
161 | int bge_newbuf_jumbo(struct bge_softc *, int); |
162 | int bge_init_rx_ring_jumbo(struct bge_softc *); |
163 | void bge_fill_rx_ring_jumbo(struct bge_softc *); |
164 | void bge_free_rx_ring_jumbo(struct bge_softc *); |
165 | |
166 | int bge_newbuf(struct bge_softc *, int); |
167 | int bge_init_rx_ring_std(struct bge_softc *); |
168 | void bge_rxtick(void *); |
169 | void bge_fill_rx_ring_std(struct bge_softc *); |
170 | void bge_free_rx_ring_std(struct bge_softc *); |
171 | |
172 | void bge_free_tx_ring(struct bge_softc *); |
173 | int bge_init_tx_ring(struct bge_softc *); |
174 | |
175 | void bge_chipinit(struct bge_softc *); |
176 | int bge_blockinit(struct bge_softc *); |
177 | u_int32_t bge_dma_swap_options(struct bge_softc *); |
178 | int bge_phy_addr(struct bge_softc *); |
179 | |
180 | u_int32_t bge_readmem_ind(struct bge_softc *, int); |
181 | void bge_writemem_ind(struct bge_softc *, int, int); |
182 | void bge_writereg_ind(struct bge_softc *, int, int); |
183 | void bge_writembx(struct bge_softc *, int, int); |
184 | |
185 | int bge_miibus_readreg(struct device *, int, int); |
186 | void bge_miibus_writereg(struct device *, int, int, int); |
187 | void bge_miibus_statchg(struct device *); |
188 | |
189 | #define BGE_RESET_SHUTDOWN0 0 |
190 | #define BGE_RESET_START1 1 |
191 | #define BGE_RESET_SUSPEND2 2 |
192 | void bge_sig_post_reset(struct bge_softc *, int); |
193 | void bge_sig_legacy(struct bge_softc *, int); |
194 | void bge_sig_pre_reset(struct bge_softc *, int); |
195 | void bge_stop_fw(struct bge_softc *, int); |
196 | void bge_reset(struct bge_softc *); |
197 | void bge_link_upd(struct bge_softc *); |
198 | |
199 | void bge_ape_lock_init(struct bge_softc *); |
200 | void bge_ape_read_fw_ver(struct bge_softc *); |
201 | int bge_ape_lock(struct bge_softc *, int); |
202 | void bge_ape_unlock(struct bge_softc *, int); |
203 | void bge_ape_send_event(struct bge_softc *, uint32_t); |
204 | void bge_ape_driver_state_change(struct bge_softc *, int); |
205 | |
206 | #ifdef BGE_DEBUG |
207 | #define DPRINTF(x) do { if (bgedebug) printf x; } while (0) |
208 | #define DPRINTFN(n,x) do { if (bgedebug >= (n)) printf x; } while (0) |
209 | int bgedebug = 0; |
210 | #else |
211 | #define DPRINTF(x) |
212 | #define DPRINTFN(n,x) |
213 | #endif |
214 | |
215 | /* |
216 | * Various supported device vendors/types and their names. Note: the |
217 | * spec seems to indicate that the hardware still has Alteon's vendor |
218 | * ID burned into it, though it will always be overridden by the vendor |
219 | * ID in the EEPROM. Just to be safe, we cover all possibilities. |
220 | */ |
221 | const struct pci_matchid bge_devices[] = { |
222 | { PCI_VENDOR_ALTEON0x12ae, PCI_PRODUCT_ALTEON_BCM57000x0003 }, |
223 | { PCI_VENDOR_ALTEON0x12ae, PCI_PRODUCT_ALTEON_BCM57010x0004 }, |
224 | |
225 | { PCI_VENDOR_ALTIMA0x173b, PCI_PRODUCT_ALTIMA_AC10000x03e8 }, |
226 | { PCI_VENDOR_ALTIMA0x173b, PCI_PRODUCT_ALTIMA_AC10010x03e9 }, |
227 | { PCI_VENDOR_ALTIMA0x173b, PCI_PRODUCT_ALTIMA_AC10030x03eb }, |
228 | { PCI_VENDOR_ALTIMA0x173b, PCI_PRODUCT_ALTIMA_AC91000x03ea }, |
229 | |
230 | { PCI_VENDOR_APPLE0x106b, PCI_PRODUCT_APPLE_BCM57010x1645 }, |
231 | |
232 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57000x1644 }, |
233 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57010x1645 }, |
234 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57020x1646 }, |
235 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5702_ALT0x16c6 }, |
236 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5702X0x16a6 }, |
237 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57030x1647 }, |
238 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5703_ALT0x16c7 }, |
239 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5703X0x16a7 }, |
240 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5704C0x1648 }, |
241 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5704S0x16a8 }, |
242 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5704S_ALT0x1649 }, |
243 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57050x1653 }, |
244 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5705F0x166e }, |
245 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5705K0x1654 }, |
246 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5705M0x165d }, |
247 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5705M_ALT0x165e }, |
248 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57140x1668 }, |
249 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5714S0x1669 }, |
250 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57150x1678 }, |
251 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5715S0x1679 }, |
252 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57170x1655 }, |
253 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5717C0x1665 }, |
254 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57180x1656 }, |
255 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57190x1657 }, |
256 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57200x165f }, |
257 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57210x1659 }, |
258 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57220x165a }, |
259 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57230x165b }, |
260 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57250x1643 }, |
261 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57270x16f3 }, |
262 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57510x1677 }, |
263 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5751F0x167e }, |
264 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5751M0x167d }, |
265 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57520x1600 }, |
266 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5752M0x1601 }, |
267 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57530x16f7 }, |
268 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5753F0x16fe }, |
269 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5753M0x16fd }, |
270 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57540x167a }, |
271 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5754M0x1672 }, |
272 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57550x167b }, |
273 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5755M0x1673 }, |
274 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57560x1674 }, |
275 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57610x1681 }, |
276 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5761E0x1680 }, |
277 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5761S0x1688 }, |
278 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5761SE0x1689 }, |
279 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57620x1687 }, |
280 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57640x1684 }, |
281 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57800x166a }, |
282 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5780S0x166b }, |
283 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57810x16dd }, |
284 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57820x1696 }, |
285 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57840x1698 }, |
286 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5785F0x16a0 }, |
287 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5785G0x1699 }, |
288 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57860x169a }, |
289 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57870x169b }, |
290 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5787F0x167f }, |
291 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5787M0x1693 }, |
292 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57880x169c }, |
293 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57890x169d }, |
294 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM59010x170d }, |
295 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5901A20x170e }, |
296 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5903M0x16ff }, |
297 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM59060x1712 }, |
298 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5906M0x1713 }, |
299 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577600x1690 }, |
300 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577610x16b0 }, |
301 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577620x1682 }, |
302 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577640x1642 }, |
303 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577650x16b4 }, |
304 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577660x1686 }, |
305 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577670x1683 }, |
306 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577800x1692 }, |
307 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577810x16b1 }, |
308 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577820x16b7 }, |
309 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577850x16b5 }, |
310 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577860x16b3 }, |
311 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577870x1641 }, |
312 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577880x1691 }, |
313 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577900x1694 }, |
314 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577910x16b2 }, |
315 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577950x16b6 }, |
316 | |
317 | { PCI_VENDOR_FUJITSU0x10cf, PCI_PRODUCT_FUJITSU_PW008GE40x11a2 }, |
318 | { PCI_VENDOR_FUJITSU0x10cf, PCI_PRODUCT_FUJITSU_PW008GE50x11a1 }, |
319 | { PCI_VENDOR_FUJITSU0x10cf, PCI_PRODUCT_FUJITSU_PP250_450_LAN0x11cc }, |
320 | |
321 | { PCI_VENDOR_SCHNEIDERKOCH0x1148, PCI_PRODUCT_SCHNEIDERKOCH_SK9D210x4400 }, |
322 | |
323 | { PCI_VENDOR_3COM0x10b7, PCI_PRODUCT_3COM_3C9960x0003 } |
324 | }; |
325 | |
326 | #define BGE_IS_JUMBO_CAPABLE(sc)((sc)->bge_flags & 0x00000100) ((sc)->bge_flags & BGE_JUMBO_CAPABLE0x00000100) |
327 | #define BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000) ((sc)->bge_flags & BGE_5700_FAMILY0x00010000) |
328 | #define BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000) ((sc)->bge_flags & BGE_5705_PLUS0x00001000) |
329 | #define BGE_IS_5714_FAMILY(sc)((sc)->bge_flags & 0x00008000) ((sc)->bge_flags & BGE_5714_FAMILY0x00008000) |
330 | #define BGE_IS_575X_PLUS(sc)((sc)->bge_flags & 0x00002000) ((sc)->bge_flags & BGE_575X_PLUS0x00002000) |
331 | #define BGE_IS_5755_PLUS(sc)((sc)->bge_flags & 0x00004000) ((sc)->bge_flags & BGE_5755_PLUS0x00004000) |
332 | #define BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000) ((sc)->bge_flags & BGE_5717_PLUS0x00020000) |
333 | #define BGE_IS_57765_PLUS(sc)((sc)->bge_flags & 0x00040000) ((sc)->bge_flags & BGE_57765_PLUS0x00040000) |
334 | |
335 | static const struct bge_revision { |
336 | u_int32_t br_chipid; |
337 | const char *br_name; |
338 | } bge_revisions[] = { |
339 | { BGE_CHIPID_BCM5700_A00x7000, "BCM5700 A0" }, |
340 | { BGE_CHIPID_BCM5700_A10x7001, "BCM5700 A1" }, |
341 | { BGE_CHIPID_BCM5700_B00x7100, "BCM5700 B0" }, |
342 | { BGE_CHIPID_BCM5700_B10x7101, "BCM5700 B1" }, |
343 | { BGE_CHIPID_BCM5700_B20x7102, "BCM5700 B2" }, |
344 | { BGE_CHIPID_BCM5700_B30x7103, "BCM5700 B3" }, |
345 | { BGE_CHIPID_BCM5700_ALTIMA0x7104, "BCM5700 Altima" }, |
346 | { BGE_CHIPID_BCM5700_C00x7200, "BCM5700 C0" }, |
347 | { BGE_CHIPID_BCM5701_A00x0000, "BCM5701 A0" }, |
348 | { BGE_CHIPID_BCM5701_B00x0100, "BCM5701 B0" }, |
349 | { BGE_CHIPID_BCM5701_B20x0102, "BCM5701 B2" }, |
350 | { BGE_CHIPID_BCM5701_B50x0105, "BCM5701 B5" }, |
351 | /* the 5702 and 5703 share the same ASIC ID */ |
352 | { BGE_CHIPID_BCM5703_A00x1000, "BCM5702/5703 A0" }, |
353 | { BGE_CHIPID_BCM5703_A10x1001, "BCM5702/5703 A1" }, |
354 | { BGE_CHIPID_BCM5703_A20x1002, "BCM5702/5703 A2" }, |
355 | { BGE_CHIPID_BCM5703_A30x1003, "BCM5702/5703 A3" }, |
356 | { BGE_CHIPID_BCM5703_B00x1100, "BCM5702/5703 B0" }, |
357 | { BGE_CHIPID_BCM5704_A00x2000, "BCM5704 A0" }, |
358 | { BGE_CHIPID_BCM5704_A10x2001, "BCM5704 A1" }, |
359 | { BGE_CHIPID_BCM5704_A20x2002, "BCM5704 A2" }, |
360 | { BGE_CHIPID_BCM5704_A30x2003, "BCM5704 A3" }, |
361 | { BGE_CHIPID_BCM5704_B00x2100, "BCM5704 B0" }, |
362 | { BGE_CHIPID_BCM5705_A00x3000, "BCM5705 A0" }, |
363 | { BGE_CHIPID_BCM5705_A10x3001, "BCM5705 A1" }, |
364 | { BGE_CHIPID_BCM5705_A20x3002, "BCM5705 A2" }, |
365 | { BGE_CHIPID_BCM5705_A30x3003, "BCM5705 A3" }, |
366 | { BGE_CHIPID_BCM5750_A00x4000, "BCM5750 A0" }, |
367 | { BGE_CHIPID_BCM5750_A10x4001, "BCM5750 A1" }, |
368 | { BGE_CHIPID_BCM5750_A30x4003, "BCM5750 A3" }, |
369 | { BGE_CHIPID_BCM5750_B00x4010, "BCM5750 B0" }, |
370 | { BGE_CHIPID_BCM5750_B10x4101, "BCM5750 B1" }, |
371 | { BGE_CHIPID_BCM5750_C00x4200, "BCM5750 C0" }, |
372 | { BGE_CHIPID_BCM5750_C10x4201, "BCM5750 C1" }, |
373 | { BGE_CHIPID_BCM5750_C20x4202, "BCM5750 C2" }, |
374 | { BGE_CHIPID_BCM5714_A00x5000, "BCM5714 A0" }, |
375 | { BGE_CHIPID_BCM5752_A00x6000, "BCM5752 A0" }, |
376 | { BGE_CHIPID_BCM5752_A10x6001, "BCM5752 A1" }, |
377 | { BGE_CHIPID_BCM5752_A20x6002, "BCM5752 A2" }, |
378 | { BGE_CHIPID_BCM5714_B00x8000, "BCM5714 B0" }, |
379 | { BGE_CHIPID_BCM5714_B30x8003, "BCM5714 B3" }, |
380 | { BGE_CHIPID_BCM5715_A00x9000, "BCM5715 A0" }, |
381 | { BGE_CHIPID_BCM5715_A10x9001, "BCM5715 A1" }, |
382 | { BGE_CHIPID_BCM5715_A30x9003, "BCM5715 A3" }, |
383 | { BGE_CHIPID_BCM5717_A00x05717000, "BCM5717 A0" }, |
384 | { BGE_CHIPID_BCM5717_B00x05717100, "BCM5717 B0" }, |
385 | { BGE_CHIPID_BCM5719_A00x05719000, "BCM5719 A0" }, |
386 | { BGE_CHIPID_BCM5719_A10x05719001, "BCM5719 A1" }, |
387 | { BGE_CHIPID_BCM5720_A00x05720000, "BCM5720 A0" }, |
388 | { BGE_CHIPID_BCM5755_A00xa000, "BCM5755 A0" }, |
389 | { BGE_CHIPID_BCM5755_A10xa001, "BCM5755 A1" }, |
390 | { BGE_CHIPID_BCM5755_A20xa002, "BCM5755 A2" }, |
391 | { BGE_CHIPID_BCM5755_C00xa200, "BCM5755 C0" }, |
392 | { BGE_CHIPID_BCM5761_A00x5761000, "BCM5761 A0" }, |
393 | { BGE_CHIPID_BCM5761_A10x5761100, "BCM5761 A1" }, |
394 | { BGE_CHIPID_BCM5762_A00x05762000, "BCM5762 A0" }, |
395 | { BGE_CHIPID_BCM5762_B00x05762100, "BCM5762 B0" }, |
396 | { BGE_CHIPID_BCM5784_A00x5784000, "BCM5784 A0" }, |
397 | { BGE_CHIPID_BCM5784_A10x5784100, "BCM5784 A1" }, |
398 | /* the 5754 and 5787 share the same ASIC ID */ |
399 | { BGE_CHIPID_BCM5787_A00xb000, "BCM5754/5787 A0" }, |
400 | { BGE_CHIPID_BCM5787_A10xb001, "BCM5754/5787 A1" }, |
401 | { BGE_CHIPID_BCM5787_A20xb002, "BCM5754/5787 A2" }, |
402 | { BGE_CHIPID_BCM5906_A10xc001, "BCM5906 A1" }, |
403 | { BGE_CHIPID_BCM5906_A20xc002, "BCM5906 A2" }, |
404 | { BGE_CHIPID_BCM57765_A00x57785000, "BCM57765 A0" }, |
405 | { BGE_CHIPID_BCM57765_B00x57785100, "BCM57765 B0" }, |
406 | { BGE_CHIPID_BCM57766_A00x57766000, "BCM57766 A0" }, |
407 | { BGE_CHIPID_BCM57766_A10x57766001, "BCM57766 A1" }, |
408 | { BGE_CHIPID_BCM57780_A00x57780000, "BCM57780 A0" }, |
409 | { BGE_CHIPID_BCM57780_A10x57780001, "BCM57780 A1" }, |
410 | |
411 | { 0, NULL((void *)0) } |
412 | }; |
413 | |
414 | /* |
415 | * Some defaults for major revisions, so that newer steppings |
416 | * that we don't know about have a shot at working. |
417 | */ |
418 | static const struct bge_revision bge_majorrevs[] = { |
419 | { BGE_ASICREV_BCM57000x07, "unknown BCM5700" }, |
420 | { BGE_ASICREV_BCM57010x00, "unknown BCM5701" }, |
421 | /* 5702 and 5703 share the same ASIC ID */ |
422 | { BGE_ASICREV_BCM57030x01, "unknown BCM5703" }, |
423 | { BGE_ASICREV_BCM57040x02, "unknown BCM5704" }, |
424 | { BGE_ASICREV_BCM57050x03, "unknown BCM5705" }, |
425 | { BGE_ASICREV_BCM57500x04, "unknown BCM5750" }, |
426 | { BGE_ASICREV_BCM57140x09, "unknown BCM5714" }, |
427 | { BGE_ASICREV_BCM5714_A00x05, "unknown BCM5714" }, |
428 | { BGE_ASICREV_BCM57520x06, "unknown BCM5752" }, |
429 | { BGE_ASICREV_BCM57800x08, "unknown BCM5780" }, |
430 | { BGE_ASICREV_BCM57550x0a, "unknown BCM5755" }, |
431 | { BGE_ASICREV_BCM57610x5761, "unknown BCM5761" }, |
432 | { BGE_ASICREV_BCM57840x5784, "unknown BCM5784" }, |
433 | { BGE_ASICREV_BCM57850x5785, "unknown BCM5785" }, |
434 | /* 5754 and 5787 share the same ASIC ID */ |
435 | { BGE_ASICREV_BCM57870x0b, "unknown BCM5754/5787" }, |
436 | { BGE_ASICREV_BCM59060x0c, "unknown BCM5906" }, |
437 | { BGE_ASICREV_BCM577650x57785, "unknown BCM57765" }, |
438 | { BGE_ASICREV_BCM577660x57766, "unknown BCM57766" }, |
439 | { BGE_ASICREV_BCM577800x57780, "unknown BCM57780" }, |
440 | { BGE_ASICREV_BCM57170x5717, "unknown BCM5717" }, |
441 | { BGE_ASICREV_BCM57190x5719, "unknown BCM5719" }, |
442 | { BGE_ASICREV_BCM57200x5720, "unknown BCM5720" }, |
443 | { BGE_ASICREV_BCM57620x5762, "unknown BCM5762" }, |
444 | |
445 | { 0, NULL((void *)0) } |
446 | }; |
447 | |
448 | u_int32_t |
449 | bge_readmem_ind(struct bge_softc *sc, int off) |
450 | { |
451 | struct pci_attach_args *pa = &(sc->bge_pa); |
452 | u_int32_t val; |
453 | |
454 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c && |
455 | off >= BGE_STATS_BLOCK0x00000300 && off < BGE_SEND_RING_1_TO_40x00004000) |
456 | return (0); |
457 | |
458 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR0x7C, off); |
459 | val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA0x84); |
460 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR0x7C, 0); |
461 | return (val); |
462 | } |
463 | |
464 | void |
465 | bge_writemem_ind(struct bge_softc *sc, int off, int val) |
466 | { |
467 | struct pci_attach_args *pa = &(sc->bge_pa); |
468 | |
469 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c && |
470 | off >= BGE_STATS_BLOCK0x00000300 && off < BGE_SEND_RING_1_TO_40x00004000) |
471 | return; |
472 | |
473 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR0x7C, off); |
474 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA0x84, val); |
475 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR0x7C, 0); |
476 | } |
477 | |
478 | void |
479 | bge_writereg_ind(struct bge_softc *sc, int off, int val) |
480 | { |
481 | struct pci_attach_args *pa = &(sc->bge_pa); |
482 | |
483 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR0x78, off); |
484 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA0x80, val); |
485 | } |
486 | |
487 | void |
488 | bge_writembx(struct bge_softc *sc, int off, int val) |
489 | { |
490 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) |
491 | off += BGE_LPMBX_IRQ0_HI0x5800 - BGE_MBX_IRQ0_HI0x0200; |
492 | |
493 | CSR_WRITE_4(sc, off, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (off), ( val))); |
494 | } |
495 | |
496 | /* |
497 | * Clear all stale locks and select the lock for this driver instance. |
498 | */ |
499 | void |
500 | bge_ape_lock_init(struct bge_softc *sc) |
501 | { |
502 | struct pci_attach_args *pa = &(sc->bge_pa); |
503 | uint32_t bit, regbase; |
504 | int i; |
505 | |
506 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57610x5761) |
507 | regbase = BGE_APE_LOCK_GRANT0x004C; |
508 | else |
509 | regbase = BGE_APE_PER_LOCK_GRANT0x8420; |
510 | |
511 | /* Clear any stale locks. */ |
512 | for (i = BGE_APE_LOCK_PHY00; i <= BGE_APE_LOCK_GPIO7; i++) { |
513 | switch (i) { |
514 | case BGE_APE_LOCK_PHY00: |
515 | case BGE_APE_LOCK_PHY12: |
516 | case BGE_APE_LOCK_PHY23: |
517 | case BGE_APE_LOCK_PHY35: |
518 | bit = BGE_APE_LOCK_GRANT_DRIVER00x00001000; |
519 | break; |
520 | default: |
521 | if (pa->pa_function == 0) |
522 | bit = BGE_APE_LOCK_GRANT_DRIVER00x00001000; |
523 | else |
524 | bit = (1 << pa->pa_function); |
525 | } |
526 | APE_WRITE_4(sc, regbase + 4 * i, bit)((sc->bge_apetag)->write_4((sc->bge_apehandle), (regbase + 4 * i), (bit))); |
527 | } |
528 | |
529 | /* Select the PHY lock based on the device's function number. */ |
530 | switch (pa->pa_function) { |
531 | case 0: |
532 | sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY00; |
533 | break; |
534 | case 1: |
535 | sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY12; |
536 | break; |
537 | case 2: |
538 | sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY23; |
539 | break; |
540 | case 3: |
541 | sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY35; |
542 | break; |
543 | default: |
544 | printf("%s: PHY lock not supported on function %d\n", |
545 | sc->bge_dev.dv_xname, pa->pa_function); |
546 | break; |
547 | } |
548 | } |
549 | |
550 | /* |
551 | * Check for APE firmware, set flags, and print version info. |
552 | */ |
553 | void |
554 | bge_ape_read_fw_ver(struct bge_softc *sc) |
555 | { |
556 | const char *fwtype; |
557 | uint32_t apedata, features; |
558 | |
559 | /* Check for a valid APE signature in shared memory. */ |
560 | apedata = APE_READ_4(sc, BGE_APE_SEG_SIG)((sc->bge_apetag)->read_4((sc->bge_apehandle), (0x4000 ))); |
561 | if (apedata != BGE_APE_SEG_SIG_MAGIC0x41504521) { |
562 | sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE0x00000002; |
563 | return; |
564 | } |
565 | |
566 | /* Check if APE firmware is running. */ |
567 | apedata = APE_READ_4(sc, BGE_APE_FW_STATUS)((sc->bge_apetag)->read_4((sc->bge_apehandle), (0x400C ))); |
568 | if ((apedata & BGE_APE_FW_STATUS_READY0x00000100) == 0) { |
569 | printf("%s: APE signature found but FW status not ready! " |
570 | "0x%08x\n", sc->bge_dev.dv_xname, apedata); |
571 | return; |
572 | } |
573 | |
574 | sc->bge_mfw_flags |= BGE_MFW_ON_APE0x00000002; |
575 | |
576 | /* Fetch the APE firmware type and version. */ |
577 | apedata = APE_READ_4(sc, BGE_APE_FW_VERSION)((sc->bge_apetag)->read_4((sc->bge_apehandle), (0x4018 ))); |
578 | features = APE_READ_4(sc, BGE_APE_FW_FEATURES)((sc->bge_apetag)->read_4((sc->bge_apehandle), (0x4010 ))); |
579 | if ((features & BGE_APE_FW_FEATURE_NCSI0x00000002) != 0) { |
580 | sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI0x00000004; |
581 | fwtype = "NCSI"; |
582 | } else if ((features & BGE_APE_FW_FEATURE_DASH0x00000001) != 0) { |
583 | sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH0x00000008; |
584 | fwtype = "DASH"; |
585 | } else |
586 | fwtype = "UNKN"; |
587 | |
588 | /* Print the APE firmware version. */ |
589 | printf(", APE firmware %s %d.%d.%d.%d", fwtype, |
590 | (apedata & BGE_APE_FW_VERSION_MAJMSK0xFF000000) >> BGE_APE_FW_VERSION_MAJSFT24, |
591 | (apedata & BGE_APE_FW_VERSION_MINMSK0x00FF0000) >> BGE_APE_FW_VERSION_MINSFT16, |
592 | (apedata & BGE_APE_FW_VERSION_REVMSK0x0000FF00) >> BGE_APE_FW_VERSION_REVSFT8, |
593 | (apedata & BGE_APE_FW_VERSION_BLDMSK0x000000FF)); |
594 | } |
595 | |
596 | int |
597 | bge_ape_lock(struct bge_softc *sc, int locknum) |
598 | { |
599 | struct pci_attach_args *pa = &(sc->bge_pa); |
600 | uint32_t bit, gnt, req, status; |
601 | int i, off; |
602 | |
603 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE0x00000002) == 0) |
604 | return (0); |
605 | |
606 | /* Lock request/grant registers have different bases. */ |
607 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57610x5761) { |
608 | req = BGE_APE_LOCK_REQ0x002C; |
609 | gnt = BGE_APE_LOCK_GRANT0x004C; |
610 | } else { |
611 | req = BGE_APE_PER_LOCK_REQ0x8400; |
612 | gnt = BGE_APE_PER_LOCK_GRANT0x8420; |
613 | } |
614 | |
615 | off = 4 * locknum; |
616 | |
617 | switch (locknum) { |
618 | case BGE_APE_LOCK_GPIO7: |
619 | /* Lock required when using GPIO. */ |
620 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57610x5761) |
621 | return (0); |
622 | if (pa->pa_function == 0) |
623 | bit = BGE_APE_LOCK_REQ_DRIVER00x00001000; |
624 | else |
625 | bit = (1 << pa->pa_function); |
626 | break; |
627 | case BGE_APE_LOCK_GRC1: |
628 | /* Lock required to reset the device. */ |
629 | if (pa->pa_function == 0) |
630 | bit = BGE_APE_LOCK_REQ_DRIVER00x00001000; |
631 | else |
632 | bit = (1 << pa->pa_function); |
633 | break; |
634 | case BGE_APE_LOCK_MEM4: |
635 | /* Lock required when accessing certain APE memory. */ |
636 | if (pa->pa_function == 0) |
637 | bit = BGE_APE_LOCK_REQ_DRIVER00x00001000; |
638 | else |
639 | bit = (1 << pa->pa_function); |
640 | break; |
641 | case BGE_APE_LOCK_PHY00: |
642 | case BGE_APE_LOCK_PHY12: |
643 | case BGE_APE_LOCK_PHY23: |
644 | case BGE_APE_LOCK_PHY35: |
645 | /* Lock required when accessing PHYs. */ |
646 | bit = BGE_APE_LOCK_REQ_DRIVER00x00001000; |
647 | break; |
648 | default: |
649 | return (EINVAL22); |
650 | } |
651 | |
652 | /* Request a lock. */ |
653 | APE_WRITE_4(sc, req + off, bit)((sc->bge_apetag)->write_4((sc->bge_apehandle), (req + off), (bit))); |
654 | |
655 | /* Wait up to 1 second to acquire lock. */ |
656 | for (i = 0; i < 20000; i++) { |
657 | status = APE_READ_4(sc, gnt + off)((sc->bge_apetag)->read_4((sc->bge_apehandle), (gnt + off))); |
658 | if (status == bit) |
659 | break; |
660 | DELAY(50)(*delay_func)(50); |
661 | } |
662 | |
663 | /* Handle any errors. */ |
664 | if (status != bit) { |
665 | printf("%s: APE lock %d request failed! " |
666 | "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n", |
667 | sc->bge_dev.dv_xname, |
668 | locknum, req + off, bit & 0xFFFF, gnt + off, |
669 | status & 0xFFFF); |
670 | /* Revoke the lock request. */ |
671 | APE_WRITE_4(sc, gnt + off, bit)((sc->bge_apetag)->write_4((sc->bge_apehandle), (gnt + off), (bit))); |
672 | return (EBUSY16); |
673 | } |
674 | |
675 | return (0); |
676 | } |
677 | |
678 | void |
679 | bge_ape_unlock(struct bge_softc *sc, int locknum) |
680 | { |
681 | struct pci_attach_args *pa = &(sc->bge_pa); |
682 | uint32_t bit, gnt; |
683 | int off; |
684 | |
685 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE0x00000002) == 0) |
686 | return; |
687 | |
688 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57610x5761) |
689 | gnt = BGE_APE_LOCK_GRANT0x004C; |
690 | else |
691 | gnt = BGE_APE_PER_LOCK_GRANT0x8420; |
692 | |
693 | off = 4 * locknum; |
694 | |
695 | switch (locknum) { |
696 | case BGE_APE_LOCK_GPIO7: |
697 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57610x5761) |
698 | return; |
699 | if (pa->pa_function == 0) |
700 | bit = BGE_APE_LOCK_GRANT_DRIVER00x00001000; |
701 | else |
702 | bit = (1 << pa->pa_function); |
703 | break; |
704 | case BGE_APE_LOCK_GRC1: |
705 | if (pa->pa_function == 0) |
706 | bit = BGE_APE_LOCK_GRANT_DRIVER00x00001000; |
707 | else |
708 | bit = (1 << pa->pa_function); |
709 | break; |
710 | case BGE_APE_LOCK_MEM4: |
711 | if (pa->pa_function == 0) |
712 | bit = BGE_APE_LOCK_GRANT_DRIVER00x00001000; |
713 | else |
714 | bit = (1 << pa->pa_function); |
715 | break; |
716 | case BGE_APE_LOCK_PHY00: |
717 | case BGE_APE_LOCK_PHY12: |
718 | case BGE_APE_LOCK_PHY23: |
719 | case BGE_APE_LOCK_PHY35: |
720 | bit = BGE_APE_LOCK_GRANT_DRIVER00x00001000; |
721 | break; |
722 | default: |
723 | return; |
724 | } |
725 | |
726 | APE_WRITE_4(sc, gnt + off, bit)((sc->bge_apetag)->write_4((sc->bge_apehandle), (gnt + off), (bit))); |
727 | } |
728 | |
729 | /* |
730 | * Send an event to the APE firmware. |
731 | */ |
732 | void |
733 | bge_ape_send_event(struct bge_softc *sc, uint32_t event) |
734 | { |
735 | uint32_t apedata; |
736 | int i; |
737 | |
738 | /* NCSI does not support APE events. */ |
739 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE0x00000002) == 0) |
740 | return; |
741 | |
742 | /* Wait up to 1ms for APE to service previous event. */ |
743 | for (i = 10; i > 0; i--) { |
744 | if (bge_ape_lock(sc, BGE_APE_LOCK_MEM4) != 0) |
745 | break; |
746 | apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS)((sc->bge_apetag)->read_4((sc->bge_apehandle), (0x4300 ))); |
747 | if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING0x80000000) == 0) { |
748 | APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4300 ), (event | 0x80000000))) |
749 | BGE_APE_EVENT_STATUS_EVENT_PENDING)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4300 ), (event | 0x80000000))); |
750 | bge_ape_unlock(sc, BGE_APE_LOCK_MEM4); |
751 | APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x000C ), (0x00000001))); |
752 | break; |
753 | } |
754 | bge_ape_unlock(sc, BGE_APE_LOCK_MEM4); |
755 | DELAY(100)(*delay_func)(100); |
756 | } |
757 | if (i == 0) { |
758 | printf("%s: APE event 0x%08x send timed out\n", |
759 | sc->bge_dev.dv_xname, event); |
760 | } |
761 | } |
762 | |
763 | void |
764 | bge_ape_driver_state_change(struct bge_softc *sc, int kind) |
765 | { |
766 | uint32_t apedata, event; |
767 | |
768 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE0x00000002) == 0) |
769 | return; |
770 | |
771 | switch (kind) { |
772 | case BGE_RESET_START1: |
773 | /* If this is the first load, clear the load counter. */ |
774 | apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG)((sc->bge_apetag)->read_4((sc->bge_apehandle), (0x4200 ))); |
775 | if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC0x484F5354) |
776 | APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4208 ), (0))); |
777 | else { |
778 | apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT)((sc->bge_apetag)->read_4((sc->bge_apehandle), (0x4208 ))); |
779 | APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4208 ), (++apedata))); |
780 | } |
781 | APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4200 ), (0x484F5354))) |
782 | BGE_APE_HOST_SEG_SIG_MAGIC)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4200 ), (0x484F5354))); |
783 | APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4204 ), (0x00000020))) |
784 | BGE_APE_HOST_SEG_LEN_MAGIC)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4204 ), (0x00000020))); |
785 | |
786 | /* Add some version info if bge(4) supports it. */ |
787 | APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x420C ), ((0xF6000000 | ((1) & 0xffd) << 16 | ((0) & 0xff ) << 8)))) |
788 | BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0))((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x420C ), ((0xF6000000 | ((1) & 0xffd) << 16 | ((0) & 0xff ) << 8)))); |
789 | APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4210 ), (0x00000001))) |
790 | BGE_APE_HOST_BEHAV_NO_PHYLOCK)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4210 ), (0x00000001))); |
791 | APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4214 ), (0))) |
792 | BGE_APE_HOST_HEARTBEAT_INT_DISABLE)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4214 ), (0))); |
793 | APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x421C ), (0x00000001))) |
794 | BGE_APE_HOST_DRVR_STATE_START)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x421C ), (0x00000001))); |
795 | event = BGE_APE_EVENT_STATUS_STATE_START0x00010000; |
796 | break; |
797 | case BGE_RESET_SHUTDOWN0: |
798 | APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x421C ), (0x00000002))) |
799 | BGE_APE_HOST_DRVR_STATE_UNLOAD)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x421C ), (0x00000002))); |
800 | event = BGE_APE_EVENT_STATUS_STATE_UNLOAD0x00020000; |
801 | break; |
802 | case BGE_RESET_SUSPEND2: |
803 | event = BGE_APE_EVENT_STATUS_STATE_SUSPEND0x00040000; |
804 | break; |
805 | default: |
806 | return; |
807 | } |
808 | |
809 | bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT0x00000010 | |
810 | BGE_APE_EVENT_STATUS_STATE_CHNGE0x00000500); |
811 | } |
812 | |
813 | |
814 | u_int8_t |
815 | bge_nvram_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest) |
816 | { |
817 | u_int32_t access, byte = 0; |
818 | int i; |
819 | |
820 | /* Lock. */ |
821 | CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x7020) , (0x00000002))); |
822 | for (i = 0; i < 8000; i++) { |
823 | if (CSR_READ_4(sc, BGE_NVRAM_SWARB)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x7020)) ) & BGE_NVRAMSWARB_GNT10x00000200) |
824 | break; |
825 | DELAY(20)(*delay_func)(20); |
826 | } |
827 | if (i == 8000) |
828 | return (1); |
829 | |
830 | /* Enable access. */ |
831 | access = CSR_READ_4(sc, BGE_NVRAM_ACCESS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x7024)) ); |
832 | CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x7024) , (access | 0x00000001))); |
833 | |
834 | CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x700c) , (addr & 0xfffffffc))); |
835 | CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x7000) , ((0x00000080|0x00000100| 0x00000010|0x00000008)))); |
836 | for (i = 0; i < BGE_TIMEOUT100000 * 10; i++) { |
837 | DELAY(10)(*delay_func)(10); |
838 | if (CSR_READ_4(sc, BGE_NVRAM_CMD)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x7000)) ) & BGE_NVRAMCMD_DONE0x00000008) { |
839 | DELAY(10)(*delay_func)(10); |
840 | break; |
841 | } |
842 | } |
843 | |
844 | if (i == BGE_TIMEOUT100000 * 10) { |
845 | printf("%s: nvram read timed out\n", sc->bge_dev.dv_xname); |
846 | return (1); |
847 | } |
848 | |
849 | /* Get result. */ |
850 | byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x7010)) ); |
851 | |
852 | *dest = (swap32(byte)(__uint32_t)(__builtin_constant_p(byte) ? (__uint32_t)(((__uint32_t )(byte) & 0xff) << 24 | ((__uint32_t)(byte) & 0xff00 ) << 8 | ((__uint32_t)(byte) & 0xff0000) >> 8 | ((__uint32_t)(byte) & 0xff000000) >> 24) : __swap32md (byte)) >> ((addr % 4) * 8)) & 0xFF; |
853 | |
854 | /* Disable access. */ |
855 | CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x7024) , (access))); |
856 | |
857 | /* Unlock. */ |
858 | CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x7020) , (0x00000020))); |
859 | CSR_READ_4(sc, BGE_NVRAM_SWARB)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x7020)) ); |
860 | |
861 | return (0); |
862 | } |
863 | |
864 | /* |
865 | * Read a sequence of bytes from NVRAM. |
866 | */ |
867 | |
868 | int |
869 | bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt) |
870 | { |
871 | int err = 0, i; |
872 | u_int8_t byte = 0; |
873 | |
874 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM59060x0c) |
875 | return (1); |
876 | |
877 | for (i = 0; i < cnt; i++) { |
878 | err = bge_nvram_getbyte(sc, off + i, &byte); |
879 | if (err) |
880 | break; |
881 | *(dest + i) = byte; |
882 | } |
883 | |
884 | return (err ? 1 : 0); |
885 | } |
886 | |
887 | /* |
888 | * Read a byte of data stored in the EEPROM at address 'addr.' The |
889 | * BCM570x supports both the traditional bitbang interface and an |
890 | * auto access interface for reading the EEPROM. We use the auto |
891 | * access method. |
892 | */ |
893 | u_int8_t |
894 | bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest) |
895 | { |
896 | int i; |
897 | u_int32_t byte = 0; |
898 | |
899 | /* |
900 | * Enable use of auto EEPROM access so we can avoid |
901 | * having to use the bitbang method. |
902 | */ |
903 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6808) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6808 ))) | (0x01000000))))); |
904 | |
905 | /* Reset the EEPROM, load the clock period. */ |
906 | CSR_WRITE_4(sc, BGE_EE_ADDR,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6838) , (0x20000000|((0x60 & 0x1FF) << 16)))) |
907 | BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL))((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6838) , (0x20000000|((0x60 & 0x1FF) << 16)))); |
908 | DELAY(20)(*delay_func)(20); |
909 | |
910 | /* Issue the read EEPROM command. */ |
911 | CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6838) , ((((0x60 & 0x1FF) << 16)|((0 & 7) << 26 )| 0x02000000|0x80000000|0x40000000) | addr))); |
912 | |
913 | /* Wait for completion */ |
914 | for(i = 0; i < BGE_TIMEOUT100000 * 10; i++) { |
915 | DELAY(10)(*delay_func)(10); |
916 | if (CSR_READ_4(sc, BGE_EE_ADDR)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6838)) ) & BGE_EEADDR_DONE0x40000000) |
917 | break; |
918 | } |
919 | |
920 | if (i == BGE_TIMEOUT100000 * 10) { |
921 | printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname); |
922 | return (1); |
923 | } |
924 | |
925 | /* Get result. */ |
926 | byte = CSR_READ_4(sc, BGE_EE_DATA)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x683C)) ); |
927 | |
928 | *dest = (byte >> ((addr % 4) * 8)) & 0xFF; |
929 | |
930 | return (0); |
931 | } |
932 | |
933 | /* |
934 | * Read a sequence of bytes from the EEPROM. |
935 | */ |
936 | int |
937 | bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) |
938 | { |
939 | int i, error = 0; |
940 | u_int8_t byte = 0; |
941 | |
942 | for (i = 0; i < cnt; i++) { |
943 | error = bge_eeprom_getbyte(sc, off + i, &byte); |
944 | if (error) |
945 | break; |
946 | *(dest + i) = byte; |
947 | } |
948 | |
949 | return (error ? 1 : 0); |
950 | } |
951 | |
952 | int |
953 | bge_miibus_readreg(struct device *dev, int phy, int reg) |
954 | { |
955 | struct bge_softc *sc = (struct bge_softc *)dev; |
956 | u_int32_t val, autopoll; |
957 | int i; |
958 | |
959 | if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) |
960 | return (0); |
961 | |
962 | /* Reading with autopolling on may trigger PCI errors */ |
963 | autopoll = CSR_READ_4(sc, BGE_MI_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0454)) ); |
964 | if (autopoll & BGE_MIMODE_AUTOPOLL0x00000010) { |
965 | BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL)((sc)->bge_sts &= ~(0x00000004)); |
966 | BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0454) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0454 ))) & ~(0x00000010))))); |
967 | DELAY(80)(*delay_func)(80); |
968 | } |
969 | |
970 | CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|((sc->bge_btag)->write_4((sc->bge_bhandle), (0x044C) , (0x08000000|0x20000000| ((phy & 0x1F) << 21)|((reg & 0x1F) << 16)))) |
971 | BGE_MIPHY(phy)|BGE_MIREG(reg))((sc->bge_btag)->write_4((sc->bge_bhandle), (0x044C) , (0x08000000|0x20000000| ((phy & 0x1F) << 21)|((reg & 0x1F) << 16)))); |
972 | CSR_READ_4(sc, BGE_MI_COMM)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x044C)) ); /* force write */ |
973 | |
974 | for (i = 0; i < 200; i++) { |
975 | delay(1)(*delay_func)(1); |
976 | val = CSR_READ_4(sc, BGE_MI_COMM)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x044C)) ); |
977 | if (!(val & BGE_MICOMM_BUSY0x20000000)) |
978 | break; |
979 | delay(10)(*delay_func)(10); |
980 | } |
981 | |
982 | if (i == 200) { |
983 | printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); |
984 | val = 0; |
985 | goto done; |
986 | } |
987 | |
988 | val = CSR_READ_4(sc, BGE_MI_COMM)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x044C)) ); |
989 | |
990 | done: |
991 | if (autopoll & BGE_MIMODE_AUTOPOLL0x00000010) { |
992 | BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL)((sc)->bge_sts |= (0x00000004)); |
993 | BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0454) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0454 ))) | (0x00000010))))); |
994 | DELAY(80)(*delay_func)(80); |
995 | } |
996 | |
997 | bge_ape_unlock(sc, sc->bge_phy_ape_lock); |
998 | |
999 | if (val & BGE_MICOMM_READFAIL0x10000000) |
1000 | return (0); |
1001 | |
1002 | return (val & 0xFFFF); |
1003 | } |
1004 | |
1005 | void |
1006 | bge_miibus_writereg(struct device *dev, int phy, int reg, int val) |
1007 | { |
1008 | struct bge_softc *sc = (struct bge_softc *)dev; |
1009 | u_int32_t autopoll; |
1010 | int i; |
1011 | |
1012 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c && |
1013 | (reg == MII_100T2CR0x09 || reg == BRGPHY_MII_AUXCTL0x18)) |
1014 | return; |
1015 | |
1016 | if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) |
1017 | return; |
1018 | |
1019 | /* Reading with autopolling on may trigger PCI errors */ |
1020 | autopoll = CSR_READ_4(sc, BGE_MI_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0454)) ); |
1021 | if (autopoll & BGE_MIMODE_AUTOPOLL0x00000010) { |
1022 | DELAY(40)(*delay_func)(40); |
1023 | BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL)((sc)->bge_sts &= ~(0x00000004)); |
1024 | BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0454) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0454 ))) & ~(0x00000010))))); |
1025 | DELAY(40)(*delay_func)(40); /* 40 usec is supposed to be adequate */ |
1026 | } |
1027 | |
1028 | CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|((sc->bge_btag)->write_4((sc->bge_bhandle), (0x044C) , (0x04000000|0x20000000| ((phy & 0x1F) << 21)|((reg & 0x1F) << 16)|val))) |
1029 | BGE_MIPHY(phy)|BGE_MIREG(reg)|val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x044C) , (0x04000000|0x20000000| ((phy & 0x1F) << 21)|((reg & 0x1F) << 16)|val))); |
1030 | CSR_READ_4(sc, BGE_MI_COMM)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x044C)) ); /* force write */ |
1031 | |
1032 | for (i = 0; i < 200; i++) { |
1033 | delay(1)(*delay_func)(1); |
1034 | if (!(CSR_READ_4(sc, BGE_MI_COMM)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x044C)) ) & BGE_MICOMM_BUSY0x20000000)) |
1035 | break; |
1036 | delay(10)(*delay_func)(10); |
1037 | } |
1038 | |
1039 | if (autopoll & BGE_MIMODE_AUTOPOLL0x00000010) { |
1040 | BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL)((sc)->bge_sts |= (0x00000004)); |
1041 | BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0454) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0454 ))) | (0x00000010))))); |
1042 | DELAY(40)(*delay_func)(40); |
1043 | } |
1044 | |
1045 | bge_ape_unlock(sc, sc->bge_phy_ape_lock); |
1046 | |
1047 | if (i == 200) { |
1048 | printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); |
1049 | } |
1050 | } |
1051 | |
1052 | void |
1053 | bge_miibus_statchg(struct device *dev) |
1054 | { |
1055 | struct bge_softc *sc = (struct bge_softc *)dev; |
1056 | struct mii_data *mii = &sc->bge_mii; |
1057 | u_int32_t mac_mode, rx_mode, tx_mode; |
1058 | |
1059 | /* |
1060 | * Get flow control negotiation result. |
1061 | */ |
1062 | if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media)((mii->mii_media.ifm_cur->ifm_media) & 0x00000000000000ffULL ) == IFM_AUTO0ULL && |
1063 | (mii->mii_media_active & IFM_ETH_FMASK(0x0000040000000000ULL|0x0000000000020000ULL|0x0000000000040000ULL )) != sc->bge_flowflags) |
1064 | sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK(0x0000040000000000ULL|0x0000000000020000ULL|0x0000000000040000ULL ); |
1065 | |
1066 | if (!BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001)) && |
1067 | mii->mii_media_status & IFM_ACTIVE0x0000000000000002ULL && |
1068 | IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) != IFM_NONE2ULL) |
1069 | BGE_STS_SETBIT(sc, BGE_STS_LINK)((sc)->bge_sts |= (0x00000001)); |
1070 | else if (BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001)) && |
1071 | (!(mii->mii_media_status & IFM_ACTIVE0x0000000000000002ULL) || |
1072 | IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) == IFM_NONE2ULL)) |
1073 | BGE_STS_CLRBIT(sc, BGE_STS_LINK)((sc)->bge_sts &= ~(0x00000001)); |
1074 | |
1075 | if (!BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001))) |
1076 | return; |
1077 | |
1078 | /* Set the port mode (MII/GMII) to match the link speed. */ |
1079 | mac_mode = CSR_READ_4(sc, BGE_MAC_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400)) ) & |
1080 | ~(BGE_MACMODE_PORTMODE0x0000000C | BGE_MACMODE_HALF_DUPLEX0x00000002); |
1081 | tx_mode = CSR_READ_4(sc, BGE_TX_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x045C)) ); |
1082 | rx_mode = CSR_READ_4(sc, BGE_RX_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0468)) ); |
1083 | |
1084 | if (IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) == IFM_1000_T16 || |
1085 | IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) == IFM_1000_SX11) |
1086 | mac_mode |= BGE_PORTMODE_GMII0x00000008; |
1087 | else |
1088 | mac_mode |= BGE_PORTMODE_MII0x00000004; |
1089 | |
1090 | /* Set MAC flow control behavior to match link flow control settings. */ |
1091 | tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE0x00000010; |
1092 | rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE0x00000004; |
1093 | if (mii->mii_media_active & IFM_FDX0x0000010000000000ULL) { |
1094 | if (sc->bge_flowflags & IFM_ETH_TXPAUSE0x0000000000040000ULL) |
1095 | tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE0x00000010; |
1096 | if (sc->bge_flowflags & IFM_ETH_RXPAUSE0x0000000000020000ULL) |
1097 | rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE0x00000004; |
1098 | } else |
1099 | mac_mode |= BGE_MACMODE_HALF_DUPLEX0x00000002; |
1100 | |
1101 | CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0400) , (mac_mode))); |
1102 | DELAY(40)(*delay_func)(40); |
1103 | CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x045C) , (tx_mode))); |
1104 | CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0468) , (rx_mode))); |
1105 | } |
1106 | |
1107 | /* |
1108 | * Initialize a standard receive ring descriptor. |
1109 | */ |
1110 | int |
1111 | bge_newbuf(struct bge_softc *sc, int i) |
1112 | { |
1113 | bus_dmamap_t dmap = sc->bge_cdata.bge_rx_std_map[i]; |
1114 | struct bge_rx_bd *r = &sc->bge_rdata->bge_rx_std_ring[i]; |
1115 | struct mbuf *m; |
1116 | int error; |
1117 | |
1118 | m = MCLGETL(NULL, M_DONTWAIT, sc->bge_rx_std_len)m_clget((((void *)0)), (0x0002), (sc->bge_rx_std_len)); |
1119 | if (!m) |
1120 | return (ENOBUFS55); |
1121 | m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = sc->bge_rx_std_len; |
1122 | if (!(sc->bge_flags & BGE_RX_ALIGNBUG0x00000008)) |
1123 | m_adj(m, ETHER_ALIGN2); |
1124 | |
1125 | error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,(*(sc->bge_dmatag)->_dmamap_load_mbuf)((sc->bge_dmatag ), (dmap), (m), (0x0200|0x0001)) |
1126 | BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc->bge_dmatag)->_dmamap_load_mbuf)((sc->bge_dmatag ), (dmap), (m), (0x0200|0x0001)); |
1127 | if (error) { |
1128 | m_freem(m); |
1129 | return (ENOBUFS55); |
1130 | } |
1131 | |
1132 | bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmap), (0), (dmap->dm_mapsize), (0x01)) |
1133 | BUS_DMASYNC_PREREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmap), (0), (dmap->dm_mapsize), (0x01)); |
1134 | sc->bge_cdata.bge_rx_std_chain[i] = m; |
1135 | |
1136 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x08)) |
1137 | offsetof(struct bge_ring_data, bge_rx_std_ring) +(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x08)) |
1138 | i * sizeof (struct bge_rx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x08)) |
1139 | sizeof (struct bge_rx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x08)) |
1140 | BUS_DMASYNC_POSTWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x08)); |
1141 | |
1142 | BGE_HOSTADDR(r->bge_addr, dmap->dm_segs[0].ds_addr)do { (r->bge_addr).bge_addr_lo = ((u_int64_t) (dmap->dm_segs [0].ds_addr) & 0xffffffff); if (sizeof(bus_addr_t) == 8) ( r->bge_addr).bge_addr_hi = ((u_int64_t) (dmap->dm_segs[ 0].ds_addr) >> 32); else (r->bge_addr).bge_addr_hi = 0; } while(0); |
1143 | r->bge_flags = BGE_RXBDFLAG_END0x0004; |
1144 | r->bge_len = m->m_lenm_hdr.mh_len; |
1145 | r->bge_idx = i; |
1146 | |
1147 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x04)) |
1148 | offsetof(struct bge_ring_data, bge_rx_std_ring) +(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x04)) |
1149 | i * sizeof (struct bge_rx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x04)) |
1150 | sizeof (struct bge_rx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x04)) |
1151 | BUS_DMASYNC_PREWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x04)); |
1152 | |
1153 | return (0); |
1154 | } |
1155 | |
1156 | /* |
1157 | * Initialize a Jumbo receive ring descriptor. |
1158 | */ |
1159 | int |
1160 | bge_newbuf_jumbo(struct bge_softc *sc, int i) |
1161 | { |
1162 | bus_dmamap_t dmap = sc->bge_cdata.bge_rx_jumbo_map[i]; |
1163 | struct bge_ext_rx_bd *r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; |
1164 | struct mbuf *m; |
1165 | int error; |
1166 | |
1167 | m = MCLGETL(NULL, M_DONTWAIT, BGE_JLEN)m_clget((((void *)0)), (0x0002), (((9022 + 2) + (sizeof(u_int64_t ) - ((9022 + 2) % sizeof(u_int64_t)))))); |
1168 | if (!m) |
1169 | return (ENOBUFS55); |
1170 | m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = BGE_JUMBO_FRAMELEN9022; |
1171 | if (!(sc->bge_flags & BGE_RX_ALIGNBUG0x00000008)) |
1172 | m_adj(m, ETHER_ALIGN2); |
1173 | |
1174 | error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,(*(sc->bge_dmatag)->_dmamap_load_mbuf)((sc->bge_dmatag ), (dmap), (m), (0x0200|0x0001)) |
1175 | BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc->bge_dmatag)->_dmamap_load_mbuf)((sc->bge_dmatag ), (dmap), (m), (0x0200|0x0001)); |
1176 | if (error) { |
1177 | m_freem(m); |
1178 | return (ENOBUFS55); |
1179 | } |
1180 | |
1181 | bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmap), (0), (dmap->dm_mapsize), (0x01)) |
1182 | BUS_DMASYNC_PREREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmap), (0), (dmap->dm_mapsize), (0x01)); |
1183 | sc->bge_cdata.bge_rx_jumbo_chain[i] = m; |
1184 | |
1185 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x08)) |
1186 | offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x08)) |
1187 | i * sizeof (struct bge_ext_rx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x08)) |
1188 | sizeof (struct bge_ext_rx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x08)) |
1189 | BUS_DMASYNC_POSTWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x08)); |
1190 | |
1191 | /* |
1192 | * Fill in the extended RX buffer descriptor. |
1193 | */ |
1194 | r->bge_bd.bge_flags = BGE_RXBDFLAG_JUMBO_RING0x0020 | BGE_RXBDFLAG_END0x0004; |
1195 | r->bge_bd.bge_idx = i; |
1196 | r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; |
1197 | switch (dmap->dm_nsegs) { |
1198 | case 4: |
1199 | BGE_HOSTADDR(r->bge_addr3, dmap->dm_segs[3].ds_addr)do { (r->bge_addr3).bge_addr_lo = ((u_int64_t) (dmap->dm_segs [3].ds_addr) & 0xffffffff); if (sizeof(bus_addr_t) == 8) ( r->bge_addr3).bge_addr_hi = ((u_int64_t) (dmap->dm_segs [3].ds_addr) >> 32); else (r->bge_addr3).bge_addr_hi = 0; } while(0); |
1200 | r->bge_len3 = dmap->dm_segs[3].ds_len; |
1201 | /* FALLTHROUGH */ |
1202 | case 3: |
1203 | BGE_HOSTADDR(r->bge_addr2, dmap->dm_segs[2].ds_addr)do { (r->bge_addr2).bge_addr_lo = ((u_int64_t) (dmap->dm_segs [2].ds_addr) & 0xffffffff); if (sizeof(bus_addr_t) == 8) ( r->bge_addr2).bge_addr_hi = ((u_int64_t) (dmap->dm_segs [2].ds_addr) >> 32); else (r->bge_addr2).bge_addr_hi = 0; } while(0); |
1204 | r->bge_len2 = dmap->dm_segs[2].ds_len; |
1205 | /* FALLTHROUGH */ |
1206 | case 2: |
1207 | BGE_HOSTADDR(r->bge_addr1, dmap->dm_segs[1].ds_addr)do { (r->bge_addr1).bge_addr_lo = ((u_int64_t) (dmap->dm_segs [1].ds_addr) & 0xffffffff); if (sizeof(bus_addr_t) == 8) ( r->bge_addr1).bge_addr_hi = ((u_int64_t) (dmap->dm_segs [1].ds_addr) >> 32); else (r->bge_addr1).bge_addr_hi = 0; } while(0); |
1208 | r->bge_len1 = dmap->dm_segs[1].ds_len; |
1209 | /* FALLTHROUGH */ |
1210 | case 1: |
1211 | BGE_HOSTADDR(r->bge_bd.bge_addr, dmap->dm_segs[0].ds_addr)do { (r->bge_bd.bge_addr).bge_addr_lo = ((u_int64_t) (dmap ->dm_segs[0].ds_addr) & 0xffffffff); if (sizeof(bus_addr_t ) == 8) (r->bge_bd.bge_addr).bge_addr_hi = ((u_int64_t) (dmap ->dm_segs[0].ds_addr) >> 32); else (r->bge_bd.bge_addr ).bge_addr_hi = 0; } while(0); |
1212 | r->bge_bd.bge_len = dmap->dm_segs[0].ds_len; |
1213 | break; |
1214 | default: |
1215 | panic("%s: %d segments", __func__, dmap->dm_nsegs); |
1216 | } |
1217 | |
1218 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x04)) |
1219 | offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x04)) |
1220 | i * sizeof (struct bge_ext_rx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x04)) |
1221 | sizeof (struct bge_ext_rx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x04)) |
1222 | BUS_DMASYNC_PREWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x04)); |
1223 | |
1224 | return (0); |
1225 | } |
1226 | |
1227 | int |
1228 | bge_init_rx_ring_std(struct bge_softc *sc) |
1229 | { |
1230 | int i; |
1231 | |
1232 | if (ISSET(sc->bge_flags, BGE_RXRING_VALID)((sc->bge_flags) & (0x00000002))) |
1233 | return (0); |
1234 | |
1235 | for (i = 0; i < BGE_STD_RX_RING_CNT512; i++) { |
1236 | if (bus_dmamap_create(sc->bge_dmatag, sc->bge_rx_std_len, 1,(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (sc->bge_rx_std_len), (1), (sc->bge_rx_std_len), (0), (0x0001 | 0x0002), (&sc->bge_cdata.bge_rx_std_map[i]) ) |
1237 | sc->bge_rx_std_len, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (sc->bge_rx_std_len), (1), (sc->bge_rx_std_len), (0), (0x0001 | 0x0002), (&sc->bge_cdata.bge_rx_std_map[i]) ) |
1238 | &sc->bge_cdata.bge_rx_std_map[i])(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (sc->bge_rx_std_len), (1), (sc->bge_rx_std_len), (0), (0x0001 | 0x0002), (&sc->bge_cdata.bge_rx_std_map[i]) ) != 0) { |
1239 | printf("%s: unable to create dmamap for slot %d\n", |
1240 | sc->bge_dev.dv_xname, i); |
1241 | goto uncreate; |
1242 | } |
1243 | bzero(&sc->bge_rdata->bge_rx_std_ring[i],__builtin_bzero((&sc->bge_rdata->bge_rx_std_ring[i] ), (sizeof(struct bge_rx_bd))) |
1244 | sizeof(struct bge_rx_bd))__builtin_bzero((&sc->bge_rdata->bge_rx_std_ring[i] ), (sizeof(struct bge_rx_bd))); |
1245 | } |
1246 | |
1247 | sc->bge_std = BGE_STD_RX_RING_CNT512 - 1; |
1248 | |
1249 | /* lwm must be greater than the replenish threshold */ |
1250 | if_rxr_init(&sc->bge_std_ring, 17, BGE_STD_RX_RING_CNT512); |
1251 | bge_fill_rx_ring_std(sc); |
1252 | |
1253 | SET(sc->bge_flags, BGE_RXRING_VALID)((sc->bge_flags) |= (0x00000002)); |
1254 | |
1255 | return (0); |
1256 | |
1257 | uncreate: |
1258 | while (--i) { |
1259 | bus_dmamap_destroy(sc->bge_dmatag,(*(sc->bge_dmatag)->_dmamap_destroy)((sc->bge_dmatag ), (sc->bge_cdata.bge_rx_std_map[i])) |
1260 | sc->bge_cdata.bge_rx_std_map[i])(*(sc->bge_dmatag)->_dmamap_destroy)((sc->bge_dmatag ), (sc->bge_cdata.bge_rx_std_map[i])); |
1261 | } |
1262 | return (1); |
1263 | } |
1264 | |
1265 | /* |
1266 | * When the refill timeout for a ring is active, that ring is so empty |
1267 | * that no more packets can be received on it, so the interrupt handler |
1268 | * will not attempt to refill it, meaning we don't need to protect against |
1269 | * interrupts here. |
1270 | */ |
1271 | |
1272 | void |
1273 | bge_rxtick(void *arg) |
1274 | { |
1275 | struct bge_softc *sc = arg; |
1276 | |
1277 | if (ISSET(sc->bge_flags, BGE_RXRING_VALID)((sc->bge_flags) & (0x00000002)) && |
1278 | if_rxr_inuse(&sc->bge_std_ring)((&sc->bge_std_ring)->rxr_alive) <= 8) |
1279 | bge_fill_rx_ring_std(sc); |
1280 | } |
1281 | |
1282 | void |
1283 | bge_rxtick_jumbo(void *arg) |
1284 | { |
1285 | struct bge_softc *sc = arg; |
1286 | |
1287 | if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)((sc->bge_flags) & (0x00000004)) && |
1288 | if_rxr_inuse(&sc->bge_jumbo_ring)((&sc->bge_jumbo_ring)->rxr_alive) <= 8) |
1289 | bge_fill_rx_ring_jumbo(sc); |
1290 | } |
1291 | |
1292 | void |
1293 | bge_fill_rx_ring_std(struct bge_softc *sc) |
1294 | { |
1295 | int i; |
1296 | int post = 0; |
1297 | u_int slots; |
1298 | |
1299 | i = sc->bge_std; |
1300 | for (slots = if_rxr_get(&sc->bge_std_ring, BGE_STD_RX_RING_CNT512); |
1301 | slots > 0; slots--) { |
1302 | BGE_INC(i, BGE_STD_RX_RING_CNT)(i) = (i + 1) % 512; |
1303 | |
1304 | if (bge_newbuf(sc, i) != 0) |
1305 | break; |
1306 | |
1307 | sc->bge_std = i; |
1308 | post = 1; |
1309 | } |
1310 | if_rxr_put(&sc->bge_std_ring, slots)do { (&sc->bge_std_ring)->rxr_alive -= (slots); } while (0); |
1311 | |
1312 | if (post) |
1313 | bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO0x026C, sc->bge_std); |
1314 | |
1315 | /* |
1316 | * bge always needs more than 8 packets on the ring. if we cant do |
1317 | * that now, then try again later. |
1318 | */ |
1319 | if (if_rxr_inuse(&sc->bge_std_ring)((&sc->bge_std_ring)->rxr_alive) <= 8) |
1320 | timeout_add(&sc->bge_rxtimeout, 1); |
1321 | } |
1322 | |
1323 | void |
1324 | bge_free_rx_ring_std(struct bge_softc *sc) |
1325 | { |
1326 | bus_dmamap_t dmap; |
1327 | struct mbuf *m; |
1328 | int i; |
1329 | |
1330 | if (!ISSET(sc->bge_flags, BGE_RXRING_VALID)((sc->bge_flags) & (0x00000002))) |
1331 | return; |
1332 | |
1333 | for (i = 0; i < BGE_STD_RX_RING_CNT512; i++) { |
1334 | dmap = sc->bge_cdata.bge_rx_std_map[i]; |
1335 | m = sc->bge_cdata.bge_rx_std_chain[i]; |
1336 | if (m != NULL((void *)0)) { |
1337 | bus_dmamap_sync(sc->bge_dmatag, dmap, 0,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmap), (0), (dmap->dm_mapsize), (0x02)) |
1338 | dmap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmap), (0), (dmap->dm_mapsize), (0x02)); |
1339 | bus_dmamap_unload(sc->bge_dmatag, dmap)(*(sc->bge_dmatag)->_dmamap_unload)((sc->bge_dmatag) , (dmap)); |
1340 | m_freem(m); |
1341 | sc->bge_cdata.bge_rx_std_chain[i] = NULL((void *)0); |
1342 | } |
1343 | bus_dmamap_destroy(sc->bge_dmatag, dmap)(*(sc->bge_dmatag)->_dmamap_destroy)((sc->bge_dmatag ), (dmap)); |
1344 | sc->bge_cdata.bge_rx_std_map[i] = NULL((void *)0); |
1345 | bzero(&sc->bge_rdata->bge_rx_std_ring[i],__builtin_bzero((&sc->bge_rdata->bge_rx_std_ring[i] ), (sizeof(struct bge_rx_bd))) |
1346 | sizeof(struct bge_rx_bd))__builtin_bzero((&sc->bge_rdata->bge_rx_std_ring[i] ), (sizeof(struct bge_rx_bd))); |
1347 | } |
1348 | |
1349 | CLR(sc->bge_flags, BGE_RXRING_VALID)((sc->bge_flags) &= ~(0x00000002)); |
1350 | } |
1351 | |
1352 | int |
1353 | bge_init_rx_ring_jumbo(struct bge_softc *sc) |
1354 | { |
1355 | volatile struct bge_rcb *rcb; |
1356 | int i; |
1357 | |
1358 | if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)((sc->bge_flags) & (0x00000004))) |
1359 | return (0); |
1360 | |
1361 | for (i = 0; i < BGE_JUMBO_RX_RING_CNT256; i++) { |
1362 | if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN, 4, BGE_JLEN, 0,(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (((9022 + 2) + (sizeof(u_int64_t) - ((9022 + 2) % sizeof(u_int64_t ))))), (4), (((9022 + 2) + (sizeof(u_int64_t) - ((9022 + 2) % sizeof(u_int64_t))))), (0), (0x0001 | 0x0002), (&sc-> bge_cdata.bge_rx_jumbo_map[i])) |
1363 | BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (((9022 + 2) + (sizeof(u_int64_t) - ((9022 + 2) % sizeof(u_int64_t ))))), (4), (((9022 + 2) + (sizeof(u_int64_t) - ((9022 + 2) % sizeof(u_int64_t))))), (0), (0x0001 | 0x0002), (&sc-> bge_cdata.bge_rx_jumbo_map[i])) |
1364 | &sc->bge_cdata.bge_rx_jumbo_map[i])(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (((9022 + 2) + (sizeof(u_int64_t) - ((9022 + 2) % sizeof(u_int64_t ))))), (4), (((9022 + 2) + (sizeof(u_int64_t) - ((9022 + 2) % sizeof(u_int64_t))))), (0), (0x0001 | 0x0002), (&sc-> bge_cdata.bge_rx_jumbo_map[i])) != 0) { |
1365 | printf("%s: unable to create dmamap for slot %d\n", |
1366 | sc->bge_dev.dv_xname, i); |
1367 | goto uncreate; |
1368 | } |
1369 | bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],__builtin_bzero((&sc->bge_rdata->bge_rx_jumbo_ring[ i]), (sizeof(struct bge_ext_rx_bd))) |
1370 | sizeof(struct bge_ext_rx_bd))__builtin_bzero((&sc->bge_rdata->bge_rx_jumbo_ring[ i]), (sizeof(struct bge_ext_rx_bd))); |
1371 | } |
1372 | |
1373 | sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT256 - 1; |
1374 | |
1375 | /* lwm must be greater than the replenish threshold */ |
1376 | if_rxr_init(&sc->bge_jumbo_ring, 17, BGE_JUMBO_RX_RING_CNT256); |
1377 | bge_fill_rx_ring_jumbo(sc); |
1378 | |
1379 | SET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)((sc->bge_flags) |= (0x00000004)); |
1380 | |
1381 | rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; |
1382 | rcb->bge_maxlen_flags = |
1383 | BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD)((0) << 16 | (0x0001)); |
1384 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2448) , (rcb->bge_maxlen_flags))); |
1385 | |
1386 | return (0); |
1387 | |
1388 | uncreate: |
1389 | while (--i) { |
1390 | bus_dmamap_destroy(sc->bge_dmatag,(*(sc->bge_dmatag)->_dmamap_destroy)((sc->bge_dmatag ), (sc->bge_cdata.bge_rx_jumbo_map[i])) |
1391 | sc->bge_cdata.bge_rx_jumbo_map[i])(*(sc->bge_dmatag)->_dmamap_destroy)((sc->bge_dmatag ), (sc->bge_cdata.bge_rx_jumbo_map[i])); |
1392 | } |
1393 | return (1); |
1394 | } |
1395 | |
1396 | void |
1397 | bge_fill_rx_ring_jumbo(struct bge_softc *sc) |
1398 | { |
1399 | int i; |
1400 | int post = 0; |
1401 | u_int slots; |
1402 | |
1403 | i = sc->bge_jumbo; |
1404 | for (slots = if_rxr_get(&sc->bge_jumbo_ring, BGE_JUMBO_RX_RING_CNT256); |
1405 | slots > 0; slots--) { |
1406 | BGE_INC(i, BGE_JUMBO_RX_RING_CNT)(i) = (i + 1) % 256; |
1407 | |
1408 | if (bge_newbuf_jumbo(sc, i) != 0) |
1409 | break; |
1410 | |
1411 | sc->bge_jumbo = i; |
1412 | post = 1; |
1413 | } |
1414 | if_rxr_put(&sc->bge_jumbo_ring, slots)do { (&sc->bge_jumbo_ring)->rxr_alive -= (slots); } while (0); |
1415 | |
1416 | if (post) |
1417 | bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO0x0274, sc->bge_jumbo); |
1418 | |
1419 | /* |
1420 | * bge always needs more than 8 packets on the ring. if we cant do |
1421 | * that now, then try again later. |
1422 | */ |
1423 | if (if_rxr_inuse(&sc->bge_jumbo_ring)((&sc->bge_jumbo_ring)->rxr_alive) <= 8) |
1424 | timeout_add(&sc->bge_rxtimeout_jumbo, 1); |
1425 | } |
1426 | |
1427 | void |
1428 | bge_free_rx_ring_jumbo(struct bge_softc *sc) |
1429 | { |
1430 | bus_dmamap_t dmap; |
1431 | struct mbuf *m; |
1432 | int i; |
1433 | |
1434 | if (!ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)((sc->bge_flags) & (0x00000004))) |
1435 | return; |
1436 | |
1437 | for (i = 0; i < BGE_JUMBO_RX_RING_CNT256; i++) { |
1438 | dmap = sc->bge_cdata.bge_rx_jumbo_map[i]; |
1439 | m = sc->bge_cdata.bge_rx_jumbo_chain[i]; |
1440 | if (m != NULL((void *)0)) { |
1441 | bus_dmamap_sync(sc->bge_dmatag, dmap, 0,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmap), (0), (dmap->dm_mapsize), (0x02)) |
1442 | dmap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmap), (0), (dmap->dm_mapsize), (0x02)); |
1443 | bus_dmamap_unload(sc->bge_dmatag, dmap)(*(sc->bge_dmatag)->_dmamap_unload)((sc->bge_dmatag) , (dmap)); |
1444 | m_freem(m); |
1445 | sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL((void *)0); |
1446 | } |
1447 | bus_dmamap_destroy(sc->bge_dmatag, dmap)(*(sc->bge_dmatag)->_dmamap_destroy)((sc->bge_dmatag ), (dmap)); |
1448 | sc->bge_cdata.bge_rx_jumbo_map[i] = NULL((void *)0); |
1449 | bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],__builtin_bzero((&sc->bge_rdata->bge_rx_jumbo_ring[ i]), (sizeof(struct bge_ext_rx_bd))) |
1450 | sizeof(struct bge_ext_rx_bd))__builtin_bzero((&sc->bge_rdata->bge_rx_jumbo_ring[ i]), (sizeof(struct bge_ext_rx_bd))); |
1451 | } |
1452 | |
1453 | CLR(sc->bge_flags, BGE_JUMBO_RXRING_VALID)((sc->bge_flags) &= ~(0x00000004)); |
1454 | } |
1455 | |
1456 | void |
1457 | bge_free_tx_ring(struct bge_softc *sc) |
1458 | { |
1459 | int i; |
1460 | |
1461 | if (!(sc->bge_flags & BGE_TXRING_VALID0x00000001)) |
1462 | return; |
1463 | |
1464 | for (i = 0; i < BGE_TX_RING_CNT512; i++) { |
1465 | if (sc->bge_cdata.bge_tx_chain[i] != NULL((void *)0)) { |
1466 | m_freem(sc->bge_cdata.bge_tx_chain[i]); |
1467 | sc->bge_cdata.bge_tx_chain[i] = NULL((void *)0); |
1468 | sc->bge_cdata.bge_tx_map[i] = NULL((void *)0); |
1469 | } |
1470 | bzero(&sc->bge_rdata->bge_tx_ring[i],__builtin_bzero((&sc->bge_rdata->bge_tx_ring[i]), ( sizeof(struct bge_tx_bd))) |
1471 | sizeof(struct bge_tx_bd))__builtin_bzero((&sc->bge_rdata->bge_tx_ring[i]), ( sizeof(struct bge_tx_bd))); |
1472 | |
1473 | bus_dmamap_destroy(sc->bge_dmatag, sc->bge_txdma[i])(*(sc->bge_dmatag)->_dmamap_destroy)((sc->bge_dmatag ), (sc->bge_txdma[i])); |
1474 | } |
1475 | |
1476 | sc->bge_flags &= ~BGE_TXRING_VALID0x00000001; |
1477 | } |
1478 | |
1479 | int |
1480 | bge_init_tx_ring(struct bge_softc *sc) |
1481 | { |
1482 | int i; |
1483 | bus_size_t txsegsz, txmaxsegsz; |
1484 | |
1485 | if (sc->bge_flags & BGE_TXRING_VALID0x00000001) |
1486 | return (0); |
1487 | |
1488 | sc->bge_txcnt = 0; |
1489 | sc->bge_tx_saved_considx = 0; |
1490 | |
1491 | /* Initialize transmit producer index for host-memory send ring. */ |
1492 | sc->bge_tx_prodidx = 0; |
1493 | bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO0x0304, sc->bge_tx_prodidx); |
1494 | if (BGE_CHIPREV(sc->bge_chipid)((sc->bge_chipid) >> 8) == BGE_CHIPREV_5700_BX0x71) |
1495 | bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO0x0304, sc->bge_tx_prodidx); |
1496 | |
1497 | /* NIC-memory send ring not used; initialize to zero. */ |
1498 | bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO0x0384, 0); |
1499 | if (BGE_CHIPREV(sc->bge_chipid)((sc->bge_chipid) >> 8) == BGE_CHIPREV_5700_BX0x71) |
1500 | bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO0x0384, 0); |
1501 | |
1502 | if (BGE_IS_JUMBO_CAPABLE(sc)((sc)->bge_flags & 0x00000100)) { |
1503 | txsegsz = 4096; |
1504 | txmaxsegsz = BGE_JLEN((9022 + 2) + (sizeof(u_int64_t) - ((9022 + 2) % sizeof(u_int64_t )))); |
1505 | } else { |
1506 | txsegsz = MCLBYTES(1 << 11); |
1507 | txmaxsegsz = MCLBYTES(1 << 11); |
1508 | } |
1509 | |
1510 | for (i = 0; i < BGE_TX_RING_CNT512; i++) { |
1511 | if (bus_dmamap_create(sc->bge_dmatag, txmaxsegsz,(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (txmaxsegsz), (30), (txsegsz), (0), (0x0001), (&sc-> bge_txdma[i])) |
1512 | BGE_NTXSEG, txsegsz, 0, BUS_DMA_NOWAIT, &sc->bge_txdma[i])(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (txmaxsegsz), (30), (txsegsz), (0), (0x0001), (&sc-> bge_txdma[i]))) |
1513 | return (ENOBUFS55); |
1514 | } |
1515 | |
1516 | sc->bge_flags |= BGE_TXRING_VALID0x00000001; |
1517 | |
1518 | return (0); |
1519 | } |
1520 | |
1521 | void |
1522 | bge_iff(struct bge_softc *sc) |
1523 | { |
1524 | struct arpcom *ac = &sc->arpcom; |
1525 | struct ifnet *ifp = &ac->ac_if; |
1526 | struct ether_multi *enm; |
1527 | struct ether_multistep step; |
1528 | u_int8_t hashes[16]; |
1529 | u_int32_t h, rxmode; |
1530 | |
1531 | /* First, zot all the existing filters. */ |
1532 | rxmode = CSR_READ_4(sc, BGE_RX_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0468)) ) & ~BGE_RXMODE_RX_PROMISC0x00000100; |
1533 | ifp->if_flags &= ~IFF_ALLMULTI0x200; |
1534 | memset(hashes, 0x00, sizeof(hashes))__builtin_memset((hashes), (0x00), (sizeof(hashes))); |
1535 | |
1536 | if (ifp->if_flags & IFF_PROMISC0x100) { |
1537 | ifp->if_flags |= IFF_ALLMULTI0x200; |
1538 | rxmode |= BGE_RXMODE_RX_PROMISC0x00000100; |
1539 | } else if (ac->ac_multirangecnt > 0) { |
1540 | ifp->if_flags |= IFF_ALLMULTI0x200; |
1541 | memset(hashes, 0xff, sizeof(hashes))__builtin_memset((hashes), (0xff), (sizeof(hashes))); |
1542 | } else { |
1543 | ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first ); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step )).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while ( 0); |
1544 | while (enm != NULL((void *)0)) { |
1545 | h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN6); |
1546 | |
1547 | setbit(hashes, h & 0x7F)((hashes)[(h & 0x7F)>>3] |= 1<<((h & 0x7F )&(8 -1))); |
1548 | |
1549 | ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm = (((enm))->enm_list.le_next); } while ( 0); |
1550 | } |
1551 | } |
1552 | |
1553 | bus_space_write_raw_region_4(sc->bge_btag, sc->bge_bhandle, BGE_MAR0,((sc->bge_btag)->write_region_4((sc->bge_bhandle), ( 0x0470), (const u_int32_t *)(hashes), (sizeof(hashes)) >> 2)) |
1554 | hashes, sizeof(hashes))((sc->bge_btag)->write_region_4((sc->bge_bhandle), ( 0x0470), (const u_int32_t *)(hashes), (sizeof(hashes)) >> 2)); |
1555 | CSR_WRITE_4(sc, BGE_RX_MODE, rxmode)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0468) , (rxmode))); |
1556 | } |
1557 | |
1558 | void |
1559 | bge_sig_pre_reset(struct bge_softc *sc, int type) |
1560 | { |
1561 | /* no bge_asf_mode. */ |
1562 | |
1563 | if (type == BGE_RESET_START1 || type == BGE_RESET_SUSPEND2) |
1564 | bge_ape_driver_state_change(sc, type); |
1565 | } |
1566 | |
1567 | void |
1568 | bge_sig_post_reset(struct bge_softc *sc, int type) |
1569 | { |
1570 | /* no bge_asf_mode. */ |
1571 | |
1572 | if (type == BGE_RESET_SHUTDOWN0) |
1573 | bge_ape_driver_state_change(sc, type); |
1574 | } |
1575 | |
1576 | void |
1577 | bge_sig_legacy(struct bge_softc *sc, int type) |
1578 | { |
1579 | /* no bge_asf_mode. */ |
1580 | } |
1581 | |
1582 | void |
1583 | bge_stop_fw(struct bge_softc *sc, int type) |
1584 | { |
1585 | /* no bge_asf_mode. */ |
1586 | } |
1587 | |
1588 | u_int32_t |
1589 | bge_dma_swap_options(struct bge_softc *sc) |
1590 | { |
1591 | u_int32_t dma_options = BGE_DMA_SWAP_OPTIONS0x00000004| 0x00000010|0x00000020; |
1592 | |
1593 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720) { |
1594 | dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA0x00000040 | |
1595 | BGE_MODECTL_WORDSWAP_B2HRX_DATA0x00000080 | BGE_MODECTL_B2HRX_ENABLE0x00008000 | |
1596 | BGE_MODECTL_HTX2B_ENABLE0x00040000; |
1597 | } |
1598 | |
1599 | return (dma_options); |
1600 | } |
1601 | |
1602 | int |
1603 | bge_phy_addr(struct bge_softc *sc) |
1604 | { |
1605 | struct pci_attach_args *pa = &(sc->bge_pa); |
1606 | int phy_addr = 1; |
1607 | |
1608 | switch (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12)) { |
1609 | case BGE_ASICREV_BCM57170x5717: |
1610 | case BGE_ASICREV_BCM57190x5719: |
1611 | case BGE_ASICREV_BCM57200x5720: |
1612 | phy_addr = pa->pa_function; |
1613 | if (sc->bge_chipid != BGE_CHIPID_BCM5717_A00x05717000) { |
1614 | phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x05B4)) ) & |
1615 | BGE_SGDIGSTS_IS_SERDES0x00000100) ? 8 : 1; |
1616 | } else { |
1617 | phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x3664)) ) & |
1618 | BGE_CPMU_PHY_STRAP_IS_SERDES0x00000020) ? 8 : 1; |
1619 | } |
1620 | } |
1621 | |
1622 | return (phy_addr); |
1623 | } |
1624 | |
1625 | /* |
1626 | * Do endian, PCI and DMA initialization. |
1627 | */ |
1628 | void |
1629 | bge_chipinit(struct bge_softc *sc) |
1630 | { |
1631 | struct pci_attach_args *pa = &(sc->bge_pa); |
1632 | u_int32_t dma_rw_ctl, misc_ctl, mode_ctl; |
1633 | int i; |
1634 | |
1635 | /* Set endianness before we access any non-PCI registers. */ |
1636 | misc_ctl = BGE_INIT(0x00000008|0x00000001| 0x00000002|0x00000080); |
1637 | if (sc->bge_flags & BGE_TAGGED_STATUS0x00200000) |
1638 | misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS0x00000200; |
1639 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL0x68, |
1640 | misc_ctl); |
1641 | |
1642 | /* |
1643 | * Clear the MAC statistics block in the NIC's |
1644 | * internal memory. |
1645 | */ |
1646 | for (i = BGE_STATS_BLOCK0x00000300; |
1647 | i < BGE_STATS_BLOCK_END0x00000AFF + 1; i += sizeof(u_int32_t)) |
1648 | BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0)do { pci_conf_write(pa->pa_pc, pa->pa_tag, 0x7C, (0xFFFF8000 & i)); ((sc->bge_btag)->write_4((sc->bge_bhandle ), (0x00008000 + (i & 0x7FFF)), (0))); } while(0); |
1649 | |
1650 | for (i = BGE_STATUS_BLOCK0x00000B00; |
1651 | i < BGE_STATUS_BLOCK_END0x00000B4F + 1; i += sizeof(u_int32_t)) |
1652 | BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0)do { pci_conf_write(pa->pa_pc, pa->pa_tag, 0x7C, (0xFFFF8000 & i)); ((sc->bge_btag)->write_4((sc->bge_bhandle ), (0x00008000 + (i & 0x7FFF)), (0))); } while(0); |
1653 | |
1654 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM577650x57785 || |
1655 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM577660x57766) { |
1656 | /* |
1657 | * For the 57766 and non Ax versions of 57765, bootcode |
1658 | * needs to setup the PCIE Fast Training Sequence (FTS) |
1659 | * value to prevent transmit hangs. |
1660 | */ |
1661 | if (BGE_CHIPREV(sc->bge_chipid)((sc->bge_chipid) >> 8) != BGE_CHIPREV_57765_AX0x577850) { |
1662 | CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3668) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x3668 ))) | 0x00040000))) |
1663 | CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL) |((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3668) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x3668 ))) | 0x00040000))) |
1664 | BGE_CPMU_PADRNG_CTL_RDIV2)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3668) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x3668 ))) | 0x00040000))); |
1665 | } |
1666 | } |
1667 | |
1668 | /* |
1669 | * Set up the PCI DMA control register. |
1670 | */ |
1671 | dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6)((6) << 24) | |
1672 | BGE_PCIDMARWCTL_WR_CMD_SHIFT(7)((7) << 28); |
1673 | |
1674 | if (sc->bge_flags & BGE_PCIE0x00000020) { |
1675 | if (sc->bge_mps >= 256) |
1676 | dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7)((7) << 19); |
1677 | else |
1678 | dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3)((3) << 19); |
1679 | } else if (sc->bge_flags & BGE_PCIX0x00000010) { |
1680 | /* PCI-X bus */ |
1681 | if (BGE_IS_5714_FAMILY(sc)((sc)->bge_flags & 0x00008000)) { |
1682 | /* 256 bytes for read and write. */ |
1683 | dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2)((2) << 16) | |
1684 | BGE_PCIDMARWCTL_WR_WAT_SHIFT(2)((2) << 19); |
1685 | |
1686 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57800x08) |
1687 | dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL0x00004000; |
1688 | else |
1689 | dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL0x00008000; |
1690 | } else if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57040x02) { |
1691 | /* 1536 bytes for read, 384 bytes for write. */ |
1692 | dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7)((7) << 16) | |
1693 | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3)((3) << 19); |
1694 | } else { |
1695 | /* 384 bytes for read and write. */ |
1696 | dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3)((3) << 16) | |
1697 | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3)((3) << 19) | |
1698 | (0x0F); |
1699 | } |
1700 | |
1701 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57030x01 || |
1702 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57040x02) { |
1703 | u_int32_t tmp; |
1704 | |
1705 | /* Set ONEDMA_ATONCE for hardware workaround. */ |
1706 | tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x74))) & 0x1f; |
1707 | if (tmp == 6 || tmp == 7) |
1708 | dma_rw_ctl |= |
1709 | BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL0x00004000; |
1710 | |
1711 | /* Set PCI-X DMA write workaround. */ |
1712 | dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE0x00800000; |
1713 | } |
1714 | } else { |
1715 | /* Conventional PCI bus: 256 bytes for read and write. */ |
1716 | dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7)((7) << 16) | |
1717 | BGE_PCIDMARWCTL_WR_WAT_SHIFT(7)((7) << 19); |
1718 | |
1719 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57050x03 && |
1720 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57500x04) |
1721 | dma_rw_ctl |= 0x0F; |
1722 | } |
1723 | |
1724 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57000x07 || |
1725 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57010x00) |
1726 | dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM0x00400000 | |
1727 | BGE_PCIDMARWCTL_ASRT_ALL_BE0x00800000; |
1728 | |
1729 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57030x01 || |
1730 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57040x02) |
1731 | dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA0x000000FF; |
1732 | |
1733 | if (BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000)) { |
1734 | dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT0x00000001; |
1735 | if (sc->bge_chipid == BGE_CHIPID_BCM57765_A00x57785000) |
1736 | dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK0x00000380; |
1737 | |
1738 | /* |
1739 | * Enable HW workaround for controllers that misinterpret |
1740 | * a status tag update and leave interrupts permanently |
1741 | * disabled. |
1742 | */ |
1743 | if (!BGE_IS_57765_PLUS(sc)((sc)->bge_flags & 0x00040000) && |
1744 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57170x5717 && |
1745 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57620x5762) |
1746 | dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA0x00000080; |
1747 | } |
1748 | |
1749 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL0x6C, dma_rw_ctl); |
1750 | |
1751 | /* |
1752 | * Set up general mode register. |
1753 | */ |
1754 | mode_ctl = bge_dma_swap_options(sc); |
1755 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720 || |
1756 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762) { |
1757 | /* Retain Host-2-BMC settings written by APE firmware. */ |
1758 | mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6800)) ) & |
1759 | (BGE_MODECTL_BYTESWAP_B2HRX_DATA0x00000040 | |
1760 | BGE_MODECTL_WORDSWAP_B2HRX_DATA0x00000080 | |
1761 | BGE_MODECTL_B2HRX_ENABLE0x00008000 | BGE_MODECTL_HTX2B_ENABLE0x00040000); |
1762 | } |
1763 | mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR0x04000000 | BGE_MODECTL_HOST_SEND_BDS0x00020000 | |
1764 | BGE_MODECTL_TX_NO_PHDR_CSUM0x00100000; |
1765 | |
1766 | /* |
1767 | * BCM5701 B5 have a bug causing data corruption when using |
1768 | * 64-bit DMA reads, which can be terminated early and then |
1769 | * completed later as 32-bit accesses, in combination with |
1770 | * certain bridges. |
1771 | */ |
1772 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57010x00 && |
1773 | sc->bge_chipid == BGE_CHIPID_BCM5701_B50x0105) |
1774 | mode_ctl |= BGE_MODECTL_FORCE_PCI320x00008000; |
1775 | |
1776 | CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6800) , (mode_ctl))); |
1777 | |
1778 | /* |
1779 | * Disable memory write invalidate. Apparently it is not supported |
1780 | * properly by these devices. |
1781 | */ |
1782 | PCI_CLRBIT(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,pci_conf_write(pa->pa_pc, pa->pa_tag, 0x04, (pci_conf_read (pa->pa_pc, pa->pa_tag, 0x04) & ~(0x00000010))) |
1783 | PCI_COMMAND_INVALIDATE_ENABLE)pci_conf_write(pa->pa_pc, pa->pa_tag, 0x04, (pci_conf_read (pa->pa_pc, pa->pa_tag, 0x04) & ~(0x00000010))); |
1784 | |
1785 | #ifdef __brokenalpha__ |
1786 | /* |
1787 | * Must ensure that we do not cross an 8K (bytes) boundary |
1788 | * for DMA reads. Our highest limit is 1K bytes. This is a |
1789 | * restriction on some ALPHA platforms with early revision |
1790 | * 21174 PCI chipsets, such as the AlphaPC 164lx |
1791 | */ |
1792 | PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,pci_conf_write(pa->pa_pc, pa->pa_tag, 0x6C, (pci_conf_read (pa->pa_pc, pa->pa_tag, 0x6C) | (BGE_PCI_READ_BNDRY_1024 ))) |
1793 | BGE_PCI_READ_BNDRY_1024)pci_conf_write(pa->pa_pc, pa->pa_tag, 0x6C, (pci_conf_read (pa->pa_pc, pa->pa_tag, 0x6C) | (BGE_PCI_READ_BNDRY_1024 ))); |
1794 | #endif |
1795 | |
1796 | /* Set the timer prescaler (always 66MHz) */ |
1797 | CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6804) , ((0x41 << 1)))); |
1798 | |
1799 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) { |
1800 | DELAY(40)(*delay_func)(40); /* XXX */ |
1801 | |
1802 | /* Put PHY into ready state */ |
1803 | BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6804) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6804 ))) & ~(0x00200000))))); |
1804 | CSR_READ_4(sc, BGE_MISC_CFG)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6804)) ); /* Flush */ |
1805 | DELAY(40)(*delay_func)(40); |
1806 | } |
1807 | } |
1808 | |
1809 | int |
1810 | bge_blockinit(struct bge_softc *sc) |
1811 | { |
1812 | volatile struct bge_rcb *rcb; |
1813 | vaddr_t rcb_addr; |
1814 | bge_hostaddr taddr; |
1815 | u_int32_t dmactl, rdmareg, mimode, val; |
1816 | int i, limit; |
1817 | |
1818 | /* |
1819 | * Initialize the memory window pointer register so that |
1820 | * we can access the first 32K of internal NIC RAM. This will |
1821 | * allow us to set up the TX send ring RCBs and the RX return |
1822 | * ring RCBs, plus other things which live in NIC memory. |
1823 | */ |
1824 | CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x7C), ( 0))); |
1825 | |
1826 | /* Configure mbuf memory pool */ |
1827 | if (!BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000)) { |
1828 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4408) , (0x00008000))) |
1829 | BGE_BUFFPOOL_1)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4408) , (0x00008000))); |
1830 | |
1831 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57040x02) |
1832 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x440C) , (0x10000))); |
1833 | else |
1834 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x440C) , (0x18000))); |
1835 | |
1836 | /* Configure DMA resource pool */ |
1837 | CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x442C) , (0x00002000))) |
1838 | BGE_DMA_DESCRIPTORS)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x442C) , (0x00002000))); |
1839 | CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4430) , (0x2000))); |
1840 | } |
1841 | |
1842 | /* Configure mbuf pool watermarks */ |
1843 | /* new Broadcom docs strongly recommend these: */ |
1844 | if (BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000)) { |
1845 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4410) , (0x0))); |
1846 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4414) , (0x2a))); |
1847 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4418) , (0xa0))); |
1848 | } else if (BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000)) { |
1849 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4410) , (0x0))); |
1850 | |
1851 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) { |
1852 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4414) , (0x04))); |
1853 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4418) , (0x10))); |
1854 | } else { |
1855 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4414) , (0x10))); |
1856 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4418) , (0x60))); |
1857 | } |
1858 | } else { |
1859 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4410) , (0x50))); |
1860 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4414) , (0x20))); |
1861 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4418) , (0x60))); |
1862 | } |
1863 | |
1864 | /* Configure DMA resource watermarks */ |
1865 | CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4434) , (5))); |
1866 | CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4438) , (10))); |
1867 | |
1868 | /* Enable buffer manager */ |
1869 | val = BGE_BMANMODE_ENABLE0x00000002 | BGE_BMANMODE_LOMBUF_ATTN0x00000010; |
1870 | /* |
1871 | * Change the arbitration algorithm of TXMBUF read request to |
1872 | * round-robin instead of priority based for BCM5719. When |
1873 | * TXFIFO is almost empty, RDMA will hold its request until |
1874 | * TXFIFO is not almost empty. |
1875 | */ |
1876 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719) |
1877 | val |= BGE_BMANMODE_NO_TX_UNDERRUN0x80000000; |
1878 | CSR_WRITE_4(sc, BGE_BMAN_MODE, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4400) , (val))); |
1879 | |
1880 | /* Poll for buffer manager start indication */ |
1881 | for (i = 0; i < 2000; i++) { |
1882 | if (CSR_READ_4(sc, BGE_BMAN_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4400)) ) & BGE_BMANMODE_ENABLE0x00000002) |
1883 | break; |
1884 | DELAY(10)(*delay_func)(10); |
1885 | } |
1886 | |
1887 | if (i == 2000) { |
1888 | printf("%s: buffer manager failed to start\n", |
1889 | sc->bge_dev.dv_xname); |
1890 | return (ENXIO6); |
1891 | } |
1892 | |
1893 | /* Enable flow-through queues */ |
1894 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x5C00) , (0xFFFFFFFF))); |
1895 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x5C00) , (0))); |
1896 | |
1897 | /* Wait until queue initialization is complete */ |
1898 | for (i = 0; i < 2000; i++) { |
1899 | if (CSR_READ_4(sc, BGE_FTQ_RESET)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x5C00)) ) == 0) |
1900 | break; |
1901 | DELAY(10)(*delay_func)(10); |
1902 | } |
1903 | |
1904 | if (i == 2000) { |
1905 | printf("%s: flow-through queue init failed\n", |
1906 | sc->bge_dev.dv_xname); |
1907 | return (ENXIO6); |
1908 | } |
1909 | |
1910 | /* |
1911 | * Summary of rings supported by the controller: |
1912 | * |
1913 | * Standard Receive Producer Ring |
1914 | * - This ring is used to feed receive buffers for "standard" |
1915 | * sized frames (typically 1536 bytes) to the controller. |
1916 | * |
1917 | * Jumbo Receive Producer Ring |
1918 | * - This ring is used to feed receive buffers for jumbo sized |
1919 | * frames (i.e. anything bigger than the "standard" frames) |
1920 | * to the controller. |
1921 | * |
1922 | * Mini Receive Producer Ring |
1923 | * - This ring is used to feed receive buffers for "mini" |
1924 | * sized frames to the controller. |
1925 | * - This feature required external memory for the controller |
1926 | * but was never used in a production system. Should always |
1927 | * be disabled. |
1928 | * |
1929 | * Receive Return Ring |
1930 | * - After the controller has placed an incoming frame into a |
1931 | * receive buffer that buffer is moved into a receive return |
1932 | * ring. The driver is then responsible to passing the |
1933 | * buffer up to the stack. Many versions of the controller |
1934 | * support multiple RR rings. |
1935 | * |
1936 | * Send Ring |
1937 | * - This ring is used for outgoing frames. Many versions of |
1938 | * the controller support multiple send rings. |
1939 | */ |
1940 | |
1941 | /* Initialize the standard RX ring control block */ |
1942 | rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; |
1943 | BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring))do { (rcb->bge_hostaddr).bge_addr_lo = ((u_int64_t) (((sc) ->bge_ring_map->dm_segs[0].ds_addr + __builtin_offsetof (struct bge_ring_data, bge_rx_std_ring))) & 0xffffffff); if (sizeof(bus_addr_t) == 8) (rcb->bge_hostaddr).bge_addr_hi = ((u_int64_t) (((sc)->bge_ring_map->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data, bge_rx_std_ring)) ) >> 32); else (rcb->bge_hostaddr).bge_addr_hi = 0; } while(0); |
1944 | if (BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000)) { |
1945 | /* |
1946 | * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) |
1947 | * Bits 15-2 : Maximum RX frame size |
1948 | * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled |
1949 | * Bit 0 : Reserved |
1950 | */ |
1951 | rcb->bge_maxlen_flags = |
1952 | BGE_RCB_MAXLEN_FLAGS(512, ETHER_MAX_DIX_LEN << 2)((512) << 16 | (1536 << 2)); |
1953 | } else if (BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000)) { |
1954 | /* |
1955 | * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) |
1956 | * Bits 15-2 : Reserved (should be 0) |
1957 | * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled |
1958 | * Bit 0 : Reserved |
1959 | */ |
1960 | rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0)((512) << 16 | (0)); |
1961 | } else { |
1962 | /* |
1963 | * Ring size is always XXX entries |
1964 | * Bits 31-16: Maximum RX frame size |
1965 | * Bits 15-2 : Reserved (should be 0) |
1966 | * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled |
1967 | * Bit 0 : Reserved |
1968 | */ |
1969 | rcb->bge_maxlen_flags = |
1970 | BGE_RCB_MAXLEN_FLAGS(ETHER_MAX_DIX_LEN, 0)((1536) << 16 | (0)); |
1971 | } |
1972 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57170x5717 || |
1973 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719 || |
1974 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720) |
1975 | rcb->bge_nicaddr = BGE_STD_RX_RINGS_57170x00040000; |
1976 | else |
1977 | rcb->bge_nicaddr = BGE_STD_RX_RINGS0x00006000; |
1978 | /* Write the standard receive producer ring control block. */ |
1979 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2450) , (rcb->bge_hostaddr.bge_addr_hi))); |
1980 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2454) , (rcb->bge_hostaddr.bge_addr_lo))); |
1981 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2458) , (rcb->bge_maxlen_flags))); |
1982 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x245C) , (rcb->bge_nicaddr))); |
1983 | |
1984 | /* Reset the standard receive producer ring producer index. */ |
1985 | bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO0x026C, 0); |
1986 | |
1987 | /* |
1988 | * Initialize the Jumbo RX ring control block |
1989 | * We set the 'ring disabled' bit in the flags |
1990 | * field until we're actually ready to start |
1991 | * using this ring (i.e. once we set the MTU |
1992 | * high enough to require it). |
1993 | */ |
1994 | if (sc->bge_flags & BGE_JUMBO_RING0x01000000) { |
1995 | rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; |
1996 | BGE_HOSTADDR(rcb->bge_hostaddr,do { (rcb->bge_hostaddr).bge_addr_lo = ((u_int64_t) (((sc) ->bge_ring_map->dm_segs[0].ds_addr + __builtin_offsetof (struct bge_ring_data, bge_rx_jumbo_ring))) & 0xffffffff) ; if (sizeof(bus_addr_t) == 8) (rcb->bge_hostaddr).bge_addr_hi = ((u_int64_t) (((sc)->bge_ring_map->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data, bge_rx_jumbo_ring ))) >> 32); else (rcb->bge_hostaddr).bge_addr_hi = 0 ; } while(0) |
1997 | BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring))do { (rcb->bge_hostaddr).bge_addr_lo = ((u_int64_t) (((sc) ->bge_ring_map->dm_segs[0].ds_addr + __builtin_offsetof (struct bge_ring_data, bge_rx_jumbo_ring))) & 0xffffffff) ; if (sizeof(bus_addr_t) == 8) (rcb->bge_hostaddr).bge_addr_hi = ((u_int64_t) (((sc)->bge_ring_map->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data, bge_rx_jumbo_ring ))) >> 32); else (rcb->bge_hostaddr).bge_addr_hi = 0 ; } while(0); |
1998 | rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,((0) << 16 | (0x0001 | 0x0002)) |
1999 | BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED)((0) << 16 | (0x0001 | 0x0002)); |
2000 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57170x5717 || |
2001 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719 || |
2002 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720) |
2003 | rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_57170x00044400; |
2004 | else |
2005 | rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS0x00007000; |
2006 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2440) , (rcb->bge_hostaddr.bge_addr_hi))) |
2007 | rcb->bge_hostaddr.bge_addr_hi)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2440) , (rcb->bge_hostaddr.bge_addr_hi))); |
2008 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2444) , (rcb->bge_hostaddr.bge_addr_lo))) |
2009 | rcb->bge_hostaddr.bge_addr_lo)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2444) , (rcb->bge_hostaddr.bge_addr_lo))); |
2010 | /* Program the jumbo receive producer ring RCB parameters. */ |
2011 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2448) , (rcb->bge_maxlen_flags))) |
2012 | rcb->bge_maxlen_flags)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2448) , (rcb->bge_maxlen_flags))); |
2013 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x244C) , (rcb->bge_nicaddr))); |
2014 | /* Reset the jumbo receive producer ring producer index. */ |
2015 | bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO0x0274, 0); |
2016 | } |
2017 | |
2018 | /* Disable the mini receive producer ring RCB. */ |
2019 | if (BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000)) { |
2020 | /* Set up dummy disabled mini ring RCB */ |
2021 | rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; |
2022 | rcb->bge_maxlen_flags = |
2023 | BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)((0) << 16 | (0x0002)); |
2024 | CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2468) , (rcb->bge_maxlen_flags))) |
2025 | rcb->bge_maxlen_flags)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2468) , (rcb->bge_maxlen_flags))); |
2026 | /* Reset the mini receive producer ring producer index. */ |
2027 | bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO0x027C, 0); |
2028 | |
2029 | /* XXX why? */ |
2030 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_info)), (sizeof (struct bge_gib)), (0x01|0x04)) |
2031 | offsetof(struct bge_ring_data, bge_info),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_info)), (sizeof (struct bge_gib)), (0x01|0x04)) |
2032 | sizeof (struct bge_gib),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_info)), (sizeof (struct bge_gib)), (0x01|0x04)) |
2033 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_info)), (sizeof (struct bge_gib)), (0x01|0x04)); |
2034 | } |
2035 | |
2036 | /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ |
2037 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) { |
2038 | if (sc->bge_chipid == BGE_CHIPID_BCM5906_A00xc000 || |
2039 | sc->bge_chipid == BGE_CHIPID_BCM5906_A10xc001 || |
2040 | sc->bge_chipid == BGE_CHIPID_BCM5906_A20xc002) |
2041 | CSR_WRITE_4(sc, BGE_ISO_PKT_TX,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0C20) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0C20 ))) & ~3) | 2))) |
2042 | (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0C20) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0C20 ))) & ~3) | 2))); |
2043 | } |
2044 | /* |
2045 | * The BD ring replenish thresholds control how often the |
2046 | * hardware fetches new BD's from the producer rings in host |
2047 | * memory. Setting the value too low on a busy system can |
2048 | * starve the hardware and recue the throughput. |
2049 | * |
2050 | * Set the BD ring replenish thresholds. The recommended |
2051 | * values are 1/8th the number of descriptors allocated to |
2052 | * each ring, but since we try to avoid filling the entire |
2053 | * ring we set these to the minimal value of 8. This needs to |
2054 | * be done on several of the supported chip revisions anyway, |
2055 | * to work around HW bugs. |
2056 | */ |
2057 | CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2C18) , (8))); |
2058 | if (sc->bge_flags & BGE_JUMBO_RING0x01000000) |
2059 | CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2C1C) , (8))); |
2060 | |
2061 | if (BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000)) { |
2062 | CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2D00) , (4))); |
2063 | CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2D04) , (4))); |
2064 | } |
2065 | |
2066 | /* |
2067 | * Disable all send rings by setting the 'ring disabled' bit |
2068 | * in the flags field of all the TX send ring control blocks, |
2069 | * located in NIC memory. |
2070 | */ |
2071 | if (BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000)) { |
2072 | /* 5700 to 5704 had 16 send rings. */ |
2073 | limit = BGE_TX_RINGS_EXTSSRAM_MAX16; |
2074 | } else if (BGE_IS_57765_PLUS(sc)((sc)->bge_flags & 0x00040000) || |
2075 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762) |
2076 | limit = 2; |
2077 | else if (BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000)) |
2078 | limit = 4; |
2079 | else |
2080 | limit = 1; |
2081 | rcb_addr = BGE_MEMWIN_START0x00008000 + BGE_SEND_RING_RCB0x00000100; |
2082 | for (i = 0; i < limit; i++) { |
2083 | RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_maxlen_flags)), ((( 0) << 16 | (0x0002))))) |
2084 | BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED))((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_maxlen_flags)), ((( 0) << 16 | (0x0002))))); |
2085 | RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_nicaddr)), (0))); |
2086 | rcb_addr += sizeof(struct bge_rcb); |
2087 | } |
2088 | |
2089 | /* Configure send ring RCB 0 (we use only the first ring) */ |
2090 | rcb_addr = BGE_MEMWIN_START0x00008000 + BGE_SEND_RING_RCB0x00000100; |
2091 | BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring))do { (taddr).bge_addr_lo = ((u_int64_t) (((sc)->bge_ring_map ->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data , bge_tx_ring))) & 0xffffffff); if (sizeof(bus_addr_t) == 8) (taddr).bge_addr_hi = ((u_int64_t) (((sc)->bge_ring_map ->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data , bge_tx_ring))) >> 32); else (taddr).bge_addr_hi = 0; } while(0); |
2092 | RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_hostaddr.bge_addr_hi )), (taddr.bge_addr_hi))); |
2093 | RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_hostaddr.bge_addr_lo )), (taddr.bge_addr_lo))); |
2094 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57170x5717 || |
2095 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719 || |
2096 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720) |
2097 | RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_nicaddr)), (0x00004000 ))); |
2098 | else |
2099 | RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_nicaddr)), (0x00004000 + ((0 * sizeof(struct bge_tx_bd) * 512) / 4)))) |
2100 | BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT))((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_nicaddr)), (0x00004000 + ((0 * sizeof(struct bge_tx_bd) * 512) / 4)))); |
2101 | RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_maxlen_flags)), ((( 512) << 16 | (0))))) |
2102 | BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0))((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_maxlen_flags)), ((( 512) << 16 | (0))))); |
2103 | |
2104 | /* |
2105 | * Disable all receive return rings by setting the |
2106 | * 'ring disabled' bit in the flags field of all the receive |
2107 | * return ring control blocks, located in NIC memory. |
2108 | */ |
2109 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57170x5717 || |
2110 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719 || |
2111 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720) { |
2112 | /* Should be 17, use 16 until we get an SRAM map. */ |
2113 | limit = 16; |
2114 | } else if (BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000)) |
2115 | limit = BGE_RX_RINGS_MAX16; |
2116 | else if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57550x0a || |
2117 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762 || |
2118 | BGE_IS_57765_PLUS(sc)((sc)->bge_flags & 0x00040000)) |
2119 | limit = 4; |
2120 | else |
2121 | limit = 1; |
2122 | /* Disable all receive return rings */ |
2123 | rcb_addr = BGE_MEMWIN_START0x00008000 + BGE_RX_RETURN_RING_RCB0x00000200; |
2124 | for (i = 0; i < limit; i++) { |
2125 | RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_hostaddr.bge_addr_hi )), (0))); |
2126 | RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_hostaddr.bge_addr_lo )), (0))); |
2127 | RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_maxlen_flags)), ((( sc->bge_return_ring_cnt) << 16 | (0x0002))))) |
2128 | BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_maxlen_flags)), ((( sc->bge_return_ring_cnt) << 16 | (0x0002))))) |
2129 | BGE_RCB_FLAG_RING_DISABLED))((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_maxlen_flags)), ((( sc->bge_return_ring_cnt) << 16 | (0x0002))))); |
2130 | RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_nicaddr)), (0))); |
2131 | bge_writembx(sc, BGE_MBX_RX_CONS0_LO0x0284 + |
2132 | (i * (sizeof(u_int64_t))), 0); |
2133 | rcb_addr += sizeof(struct bge_rcb); |
2134 | } |
2135 | |
2136 | /* |
2137 | * Set up receive return ring 0. Note that the NIC address |
2138 | * for RX return rings is 0x0. The return rings live entirely |
2139 | * within the host, so the nicaddr field in the RCB isn't used. |
2140 | */ |
2141 | rcb_addr = BGE_MEMWIN_START0x00008000 + BGE_RX_RETURN_RING_RCB0x00000200; |
2142 | BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring))do { (taddr).bge_addr_lo = ((u_int64_t) (((sc)->bge_ring_map ->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data , bge_rx_return_ring))) & 0xffffffff); if (sizeof(bus_addr_t ) == 8) (taddr).bge_addr_hi = ((u_int64_t) (((sc)->bge_ring_map ->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data , bge_rx_return_ring))) >> 32); else (taddr).bge_addr_hi = 0; } while(0); |
2143 | RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_hostaddr.bge_addr_hi )), (taddr.bge_addr_hi))); |
2144 | RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_hostaddr.bge_addr_lo )), (taddr.bge_addr_lo))); |
2145 | RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_nicaddr)), (0x00000000 ))); |
2146 | RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_maxlen_flags)), ((( sc->bge_return_ring_cnt) << 16 | (0))))) |
2147 | BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0))((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_maxlen_flags)), ((( sc->bge_return_ring_cnt) << 16 | (0))))); |
2148 | |
2149 | /* Set random backoff seed for TX */ |
2150 | CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0438) , ((sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + sc ->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) & 0x3FF ))) |
2151 | (sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0438) , ((sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + sc ->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) & 0x3FF ))) |
2152 | sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0438) , ((sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + sc ->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) & 0x3FF ))) |
2153 | sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) &((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0438) , ((sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + sc ->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) & 0x3FF ))) |
2154 | BGE_TX_BACKOFF_SEED_MASK)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0438) , ((sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + sc ->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) & 0x3FF ))); |
2155 | |
2156 | /* Set inter-packet gap */ |
2157 | val = 0x2620; |
2158 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720 || |
2159 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762) |
2160 | val |= CSR_READ_4(sc, BGE_TX_LENGTHS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0464)) ) & |
2161 | (BGE_TXLEN_JMB_FRM_LEN_MSK0x00FF0000 | BGE_TXLEN_CNT_DN_VAL_MSK0xFF000000); |
2162 | CSR_WRITE_4(sc, BGE_TX_LENGTHS, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0464) , (val))); |
2163 | |
2164 | /* |
2165 | * Specify which ring to use for packets that don't match |
2166 | * any RX rules. |
2167 | */ |
2168 | CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0500) , (0x08))); |
2169 | |
2170 | /* |
2171 | * Configure number of RX lists. One interrupt distribution |
2172 | * list, sixteen active lists, one bad frames class. |
2173 | */ |
2174 | CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2010) , (0x181))); |
2175 | |
2176 | /* Initialize RX list placement stats mask. */ |
2177 | CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007BFFFF)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2018) , (0x007BFFFF))); |
2178 | CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2014) , (0x1))); |
2179 | |
2180 | /* Disable host coalescing until we get it set up */ |
2181 | CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C00) , (0x00000000))); |
2182 | |
2183 | /* Poll to make sure it's shut down. */ |
2184 | for (i = 0; i < 2000; i++) { |
2185 | if (!(CSR_READ_4(sc, BGE_HCC_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x3C00)) ) & BGE_HCCMODE_ENABLE0x00000002)) |
2186 | break; |
2187 | DELAY(10)(*delay_func)(10); |
2188 | } |
2189 | |
2190 | if (i == 2000) { |
2191 | printf("%s: host coalescing engine failed to idle\n", |
2192 | sc->bge_dev.dv_xname); |
2193 | return (ENXIO6); |
2194 | } |
2195 | |
2196 | /* Set up host coalescing defaults */ |
2197 | CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C08) , (sc->bge_rx_coal_ticks))); |
2198 | CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C0C) , (sc->bge_tx_coal_ticks))); |
2199 | CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C10) , (sc->bge_rx_max_coal_bds))); |
2200 | CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C14) , (sc->bge_tx_max_coal_bds))); |
2201 | if (!(BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000))) { |
2202 | CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C18) , (0))); |
2203 | CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C1C) , (0))); |
2204 | } |
2205 | CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C20) , (0))); |
2206 | CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C24) , (0))); |
2207 | |
2208 | /* Set up address of statistics block */ |
2209 | if (!(BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000))) { |
2210 | BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats))do { (taddr).bge_addr_lo = ((u_int64_t) (((sc)->bge_ring_map ->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data , bge_info.bge_stats))) & 0xffffffff); if (sizeof(bus_addr_t ) == 8) (taddr).bge_addr_hi = ((u_int64_t) (((sc)->bge_ring_map ->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data , bge_info.bge_stats))) >> 32); else (taddr).bge_addr_hi = 0; } while(0); |
2211 | CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C30) , (taddr.bge_addr_hi))); |
2212 | CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C34) , (taddr.bge_addr_lo))); |
2213 | |
2214 | CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C40) , (0x00000300))); |
2215 | CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C44) , (0x00000B00))); |
2216 | CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C28) , (sc->bge_stat_ticks))); |
2217 | } |
2218 | |
2219 | /* Set up address of status block */ |
2220 | BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block))do { (taddr).bge_addr_lo = ((u_int64_t) (((sc)->bge_ring_map ->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data , bge_status_block))) & 0xffffffff); if (sizeof(bus_addr_t ) == 8) (taddr).bge_addr_hi = ((u_int64_t) (((sc)->bge_ring_map ->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data , bge_status_block))) >> 32); else (taddr).bge_addr_hi = 0; } while(0); |
2221 | CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C38) , (taddr.bge_addr_hi))); |
2222 | CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C3C) , (taddr.bge_addr_lo))); |
2223 | |
2224 | sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; |
2225 | sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; |
2226 | |
2227 | /* Set up status block size. */ |
2228 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57000x07 && |
2229 | sc->bge_chipid != BGE_CHIPID_BCM5700_C00x7200) { |
2230 | val = BGE_STATBLKSZ_FULL0x00000000; |
2231 | bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ)__builtin_bzero((&sc->bge_rdata->bge_status_block), (sizeof (struct bge_status_block))); |
2232 | } else { |
2233 | val = BGE_STATBLKSZ_32BYTE0x00000100; |
2234 | bzero(&sc->bge_rdata->bge_status_block, 32)__builtin_bzero((&sc->bge_rdata->bge_status_block), (32)); |
2235 | } |
2236 | |
2237 | /* Turn on host coalescing state machine */ |
2238 | CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C00) , (val | 0x00000002))); |
2239 | |
2240 | /* Turn on RX BD completion state machine and enable attentions */ |
2241 | CSR_WRITE_4(sc, BGE_RBDC_MODE,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3000) , (0x00000002|0x00000004))) |
2242 | BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3000) , (0x00000002|0x00000004))); |
2243 | |
2244 | /* Turn on RX list placement state machine */ |
2245 | CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2000) , (0x00000002))); |
2246 | |
2247 | /* Turn on RX list selector state machine. */ |
2248 | if (!(BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000))) |
2249 | CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3400) , (0x00000002))); |
2250 | |
2251 | val = BGE_MACMODE_TXDMA_ENB0x00200000 | BGE_MACMODE_RXDMA_ENB0x00400000 | |
2252 | BGE_MACMODE_RX_STATS_CLEAR0x00001000 | BGE_MACMODE_TX_STATS_CLEAR0x00008000 | |
2253 | BGE_MACMODE_RX_STATS_ENB0x00000800 | BGE_MACMODE_TX_STATS_ENB0x00004000 | |
2254 | BGE_MACMODE_FRMHDR_DMA_ENB0x00800000; |
2255 | |
2256 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200) |
2257 | val |= BGE_PORTMODE_TBI0x0000000C; |
2258 | else if (sc->bge_flags & BGE_FIBER_MII0x00000400) |
2259 | val |= BGE_PORTMODE_GMII0x00000008; |
2260 | else |
2261 | val |= BGE_PORTMODE_MII0x00000004; |
2262 | |
2263 | /* Allow APE to send/receive frames. */ |
2264 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE0x00000002) != 0) |
2265 | val |= BGE_MACMODE_APE_RX_EN0x08000000 | BGE_MACMODE_APE_TX_EN0x10000000; |
2266 | |
2267 | /* Turn on DMA, clear stats */ |
2268 | CSR_WRITE_4(sc, BGE_MAC_MODE, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0400) , (val))); |
2269 | DELAY(40)(*delay_func)(40); |
2270 | |
2271 | /* Set misc. local control, enable interrupts on attentions */ |
2272 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6808) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6808 ))) | (0x00000008))))); |
2273 | |
2274 | #ifdef notdef |
2275 | /* Assert GPIO pins for PHY reset */ |
2276 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6808) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6808 ))) | (0x00004000| 0x00008000|0x00010000))))) |
2277 | BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6808) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6808 ))) | (0x00004000| 0x00008000|0x00010000))))); |
2278 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6808) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6808 ))) | (0x00000800| 0x00001000|0x00002000))))) |
2279 | BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6808) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6808 ))) | (0x00000800| 0x00001000|0x00002000))))); |
2280 | #endif |
2281 | |
2282 | /* Turn on DMA completion state machine */ |
2283 | if (!(BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000))) |
2284 | CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6400) , (0x00000002))); |
2285 | |
2286 | val = BGE_WDMAMODE_ENABLE0x00000002|BGE_WDMAMODE_ALL_ATTNS0x000003FC; |
2287 | |
2288 | /* Enable host coalescing bug fix. */ |
2289 | if (BGE_IS_5755_PLUS(sc)((sc)->bge_flags & 0x00004000)) |
2290 | val |= BGE_WDMAMODE_STATUS_TAG_FIX0x20000000; |
2291 | |
2292 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57850x5785) |
2293 | val |= BGE_WDMAMODE_BURST_ALL_DATA0xC0000000; |
2294 | |
2295 | /* Turn on write DMA state machine */ |
2296 | CSR_WRITE_4(sc, BGE_WDMA_MODE, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4C00) , (val))); |
2297 | DELAY(40)(*delay_func)(40); |
2298 | |
2299 | val = BGE_RDMAMODE_ENABLE0x00000002|BGE_RDMAMODE_ALL_ATTNS0x000003FC; |
2300 | |
2301 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57170x5717) |
2302 | val |= BGE_RDMAMODE_MULT_DMA_RD_DIS0x01000000; |
2303 | |
2304 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57840x5784 || |
2305 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57850x5785 || |
2306 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM577800x57780) |
2307 | val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN0x00000800 | |
2308 | BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN0x00001000 | |
2309 | BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN0x00002000; |
2310 | |
2311 | if (sc->bge_flags & BGE_PCIE0x00000020) |
2312 | val |= BGE_RDMAMODE_FIFO_LONG_BURST0x00030000; |
2313 | |
2314 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720 || |
2315 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762) { |
2316 | val |= CSR_READ_4(sc, BGE_RDMA_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4800)) ) & |
2317 | BGE_RDMAMODE_H2BNC_VLAN_DET0x20000000; |
2318 | /* |
2319 | * Allow multiple outstanding read requests from |
2320 | * non-LSO read DMA engine. |
2321 | */ |
2322 | val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS0x01000000; |
2323 | } |
2324 | |
2325 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57610x5761 || |
2326 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57840x5784 || |
2327 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57850x5785 || |
2328 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM577800x57780 || |
2329 | BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000) || BGE_IS_57765_PLUS(sc)((sc)->bge_flags & 0x00040000)) { |
2330 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762) |
2331 | rdmareg = BGE_RDMA_RSRVCTRL_REG20x4890; |
2332 | else |
2333 | rdmareg = BGE_RDMA_RSRVCTRL0x4900; |
2334 | dmactl = CSR_READ_4(sc, rdmareg)((sc->bge_btag)->read_4((sc->bge_bhandle), (rdmareg) )); |
2335 | /* |
2336 | * Adjust tx margin to prevent TX data corruption and |
2337 | * fix internal FIFO overflow. |
2338 | */ |
2339 | if (sc->bge_chipid == BGE_CHIPID_BCM5719_A00x05719000 || |
2340 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762) { |
2341 | dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK0x00000FF0 | |
2342 | BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK0x000FF000 | |
2343 | BGE_RDMA_RSRVCTRL_TXMRGN_MASK0xFFE00000); |
2344 | dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K0x00000C00 | |
2345 | BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K0x000C0000 | |
2346 | BGE_RDMA_RSRVCTRL_TXMRGN_320B0x28000000; |
2347 | } |
2348 | /* |
2349 | * Enable fix for read DMA FIFO overruns. |
2350 | * The fix is to limit the number of RX BDs |
2351 | * the hardware would fetch at a time. |
2352 | */ |
2353 | CSR_WRITE_4(sc, rdmareg, dmactl |((sc->bge_btag)->write_4((sc->bge_bhandle), (rdmareg ), (dmactl | 0x00000004))) |
2354 | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX)((sc->bge_btag)->write_4((sc->bge_bhandle), (rdmareg ), (dmactl | 0x00000004))); |
2355 | } |
2356 | |
2357 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719) { |
2358 | CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910 ))) | 0x00030000 | 0x000C0000))) |
2359 | CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910 ))) | 0x00030000 | 0x000C0000))) |
2360 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910 ))) | 0x00030000 | 0x000C0000))) |
2361 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910 ))) | 0x00030000 | 0x000C0000))); |
2362 | } else if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720) { |
2363 | /* |
2364 | * Allow 4KB burst length reads for non-LSO frames. |
2365 | * Enable 512B burst length reads for buffer descriptors. |
2366 | */ |
2367 | CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910 ))) | 0x00020000 | 0x000C0000))) |
2368 | CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910 ))) | 0x00020000 | 0x000C0000))) |
2369 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910 ))) | 0x00020000 | 0x000C0000))) |
2370 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910 ))) | 0x00020000 | 0x000C0000))); |
2371 | } else if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762) { |
2372 | CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x48A0) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x48A0 ))) | 0x00030000 | 0x000C0000))) |
2373 | CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) |((sc->bge_btag)->write_4((sc->bge_bhandle), (0x48A0) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x48A0 ))) | 0x00030000 | 0x000C0000))) |
2374 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |((sc->bge_btag)->write_4((sc->bge_bhandle), (0x48A0) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x48A0 ))) | 0x00030000 | 0x000C0000))) |
2375 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x48A0) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x48A0 ))) | 0x00030000 | 0x000C0000))); |
2376 | } |
2377 | |
2378 | CSR_WRITE_4(sc, BGE_RDMA_MODE, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4800) , (val))); |
2379 | DELAY(40)(*delay_func)(40); |
2380 | |
2381 | if (sc->bge_flags & BGE_RDMA_BUG0x00800000) { |
2382 | for (i = 0; i < BGE_NUM_RDMA_CHANNELS4 / 2; i++) { |
2383 | val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4BE0 + i * 4))); |
2384 | if ((val & 0xFFFF) > ETHER_MAX_LEN1518) |
2385 | break; |
2386 | if (((val >> 16) & 0xFFFF) > ETHER_MAX_LEN1518) |
2387 | break; |
2388 | } |
2389 | if (i != BGE_NUM_RDMA_CHANNELS4 / 2) { |
2390 | val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910)) ); |
2391 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719) |
2392 | val |= BGE_RDMA_TX_LENGTH_WA_57190x02000000; |
2393 | else |
2394 | val |= BGE_RDMA_TX_LENGTH_WA_57200x00200000; |
2395 | CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (val))); |
2396 | } |
2397 | } |
2398 | |
2399 | /* Turn on RX data completion state machine */ |
2400 | CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2800) , (0x00000002))); |
2401 | |
2402 | /* Turn on RX BD initiator state machine */ |
2403 | CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2C00) , (0x00000002))); |
2404 | |
2405 | /* Turn on RX data and RX BD initiator state machine */ |
2406 | CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2400) , (0x00000002))); |
2407 | |
2408 | /* Turn on Mbuf cluster free state machine */ |
2409 | if (!BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000)) |
2410 | CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3800) , (0x00000002))); |
2411 | |
2412 | /* Turn on send BD completion state machine */ |
2413 | CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x1C00) , (0x00000002))); |
2414 | |
2415 | /* Turn on send data completion state machine */ |
2416 | val = BGE_SDCMODE_ENABLE0x00000002; |
2417 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57610x5761) |
2418 | val |= BGE_SDCMODE_CDELAY0x00000010; |
2419 | CSR_WRITE_4(sc, BGE_SDC_MODE, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x1000) , (val))); |
2420 | |
2421 | /* Turn on send data initiator state machine */ |
2422 | CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0C00) , (0x00000002))); |
2423 | |
2424 | /* Turn on send BD initiator state machine */ |
2425 | CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x1800) , (0x00000002))); |
2426 | |
2427 | /* Turn on send BD selector state machine */ |
2428 | CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x1400) , (0x00000002))); |
2429 | |
2430 | CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007BFFFF)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0C0C) , (0x007BFFFF))); |
2431 | CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0C08) , (0x00000001|0x00000002))) |
2432 | BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0C08) , (0x00000001|0x00000002))); |
2433 | |
2434 | /* ack/clear link change events */ |
2435 | CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0x00000010 | 0x00000008 | 0x00400000 | 0x00001000))) |
2436 | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0x00000010 | 0x00000008 | 0x00400000 | 0x00001000))) |
2437 | BGE_MACSTAT_LINK_CHANGED)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0x00000010 | 0x00000008 | 0x00400000 | 0x00001000))); |
2438 | |
2439 | /* Enable PHY auto polling (for MII/GMII only) */ |
2440 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200) { |
2441 | CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0450) , (0x00000001))); |
2442 | } else { |
2443 | if ((sc->bge_flags & BGE_CPMU_PRESENT0x00100000) != 0) |
2444 | mimode = BGE_MIMODE_500KHZ_CONST0x00008000; |
2445 | else |
2446 | mimode = BGE_MIMODE_BASE0x000C0000; |
2447 | if (BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000) || |
2448 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57050x03) { |
2449 | mimode |= BGE_MIMODE_AUTOPOLL0x00000010; |
2450 | BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL)((sc)->bge_sts |= (0x00000004)); |
2451 | } |
2452 | mimode |= BGE_MIMODE_PHYADDR(sc->bge_phy_addr)((sc->bge_phy_addr & 0x1F) << 5); |
2453 | CSR_WRITE_4(sc, BGE_MI_MODE, mimode)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0454) , (mimode))); |
2454 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57000x07) |
2455 | CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0408) , (0x00800000))) |
2456 | BGE_EVTENB_MI_INTERRUPT)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0408) , (0x00800000))); |
2457 | } |
2458 | |
2459 | /* |
2460 | * Clear any pending link state attention. |
2461 | * Otherwise some link state change events may be lost until attention |
2462 | * is cleared by bge_intr() -> bge_link_upd() sequence. |
2463 | * It's not necessary on newer BCM chips - perhaps enabling link |
2464 | * state change attentions implies clearing pending attention. |
2465 | */ |
2466 | CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0x00000010| 0x00000008|0x00400000| 0x00001000))) |
2467 | BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0x00000010| 0x00000008|0x00400000| 0x00001000))) |
2468 | BGE_MACSTAT_LINK_CHANGED)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0x00000010| 0x00000008|0x00400000| 0x00001000))); |
2469 | |
2470 | /* Enable link state change attentions. */ |
2471 | BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0408) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0408 ))) | (0x00001000))))); |
2472 | |
2473 | return (0); |
2474 | } |
2475 | |
2476 | const struct bge_revision * |
2477 | bge_lookup_rev(u_int32_t chipid) |
2478 | { |
2479 | const struct bge_revision *br; |
2480 | |
2481 | for (br = bge_revisions; br->br_name != NULL((void *)0); br++) { |
2482 | if (br->br_chipid == chipid) |
2483 | return (br); |
2484 | } |
2485 | |
2486 | for (br = bge_majorrevs; br->br_name != NULL((void *)0); br++) { |
2487 | if (br->br_chipid == BGE_ASICREV(chipid)((chipid) >> 12)) |
2488 | return (br); |
2489 | } |
2490 | |
2491 | return (NULL((void *)0)); |
2492 | } |
2493 | |
2494 | int |
2495 | bge_can_use_msi(struct bge_softc *sc) |
2496 | { |
2497 | int can_use_msi = 0; |
2498 | |
2499 | switch (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12)) { |
2500 | case BGE_ASICREV_BCM5714_A00x05: |
2501 | case BGE_ASICREV_BCM57140x09: |
2502 | /* |
2503 | * Apparently, MSI doesn't work when these chips are |
2504 | * configured in single-port mode. |
2505 | */ |
2506 | break; |
2507 | case BGE_ASICREV_BCM57500x04: |
2508 | if (BGE_CHIPREV(sc->bge_chipid)((sc->bge_chipid) >> 8) != BGE_CHIPREV_5750_AX0x40 && |
2509 | BGE_CHIPREV(sc->bge_chipid)((sc->bge_chipid) >> 8) != BGE_CHIPREV_5750_BX0x41) |
2510 | can_use_msi = 1; |
2511 | break; |
2512 | default: |
2513 | if (BGE_IS_575X_PLUS(sc)((sc)->bge_flags & 0x00002000)) |
2514 | can_use_msi = 1; |
2515 | } |
2516 | |
2517 | return (can_use_msi); |
2518 | } |
2519 | |
2520 | /* |
2521 | * Probe for a Broadcom chip. Check the PCI vendor and device IDs |
2522 | * against our list and return its name if we find a match. Note |
2523 | * that since the Broadcom controller contains VPD support, we |
2524 | * can get the device name string from the controller itself instead |
2525 | * of the compiled-in string. This is a little slow, but it guarantees |
2526 | * we'll always announce the right product name. |
2527 | */ |
2528 | int |
2529 | bge_probe(struct device *parent, void *match, void *aux) |
2530 | { |
2531 | return (pci_matchbyid(aux, bge_devices, nitems(bge_devices)(sizeof((bge_devices)) / sizeof((bge_devices)[0])))); |
2532 | } |
2533 | |
2534 | void |
2535 | bge_attach(struct device *parent, struct device *self, void *aux) |
2536 | { |
2537 | struct bge_softc *sc = (struct bge_softc *)self; |
2538 | struct pci_attach_args *pa = aux; |
2539 | pci_chipset_tag_t pc = pa->pa_pc; |
2540 | const struct bge_revision *br; |
2541 | pcireg_t pm_ctl, memtype, subid, reg; |
2542 | pci_intr_handle_t ih; |
2543 | const char *intrstr = NULL((void *)0); |
2544 | int gotenaddr = 0; |
2545 | u_int32_t hwcfg = 0; |
2546 | u_int32_t mac_addr = 0; |
2547 | u_int32_t misccfg; |
2548 | struct ifnet *ifp; |
2549 | caddr_t kva; |
2550 | #ifdef __sparc64__ |
2551 | char name[32]; |
2552 | #endif |
2553 | |
2554 | sc->bge_pa = *pa; |
2555 | |
2556 | subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG0x2c); |
2557 | |
2558 | /* |
2559 | * Map control/status registers. |
2560 | */ |
2561 | DPRINTFN(5, ("Map control/status regs\n")); |
2562 | |
2563 | DPRINTFN(5, ("pci_mapreg_map\n")); |
2564 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR00x10); |
2565 | if (pci_mapreg_map(pa, BGE_PCI_BAR00x10, memtype, 0, &sc->bge_btag, |
2566 | &sc->bge_bhandle, NULL((void *)0), &sc->bge_bsize, 0)) { |
2567 | printf(": can't find mem space\n"); |
2568 | return; |
2569 | } |
2570 | |
2571 | /* |
2572 | * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) |
2573 | * can clobber the chip's PCI config-space power control registers, |
2574 | * leaving the card in D3 powersave state. |
2575 | * We do not have memory-mapped registers in this state, |
2576 | * so force device into D0 state before starting initialization. |
2577 | */ |
2578 | pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD0x4C); |
2579 | pm_ctl &= ~(PCI_PWR_D00|PCI_PWR_D11|PCI_PWR_D22|PCI_PWR_D33); |
2580 | pm_ctl |= (1 << 8) | PCI_PWR_D00 ; /* D0 state */ |
2581 | pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD0x4C, pm_ctl); |
2582 | DELAY(1000)(*delay_func)(1000); /* 27 usec is allegedly sufficient */ |
2583 | |
2584 | /* |
2585 | * Save ASIC rev. |
2586 | */ |
2587 | sc->bge_chipid = |
2588 | (pci_conf_read(pc, pa->pa_tag, BGE_PCI_MISC_CTL0x68) |
2589 | >> BGE_PCIMISCCTL_ASICREV_SHIFT16); |
2590 | |
2591 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_USE_PRODID_REG0x0f) { |
2592 | switch (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff)) { |
2593 | case PCI_PRODUCT_BROADCOM_BCM57170x1655: |
2594 | case PCI_PRODUCT_BROADCOM_BCM57180x1656: |
2595 | case PCI_PRODUCT_BROADCOM_BCM57190x1657: |
2596 | case PCI_PRODUCT_BROADCOM_BCM57200x165f: |
2597 | case PCI_PRODUCT_BROADCOM_BCM57250x1643: |
2598 | case PCI_PRODUCT_BROADCOM_BCM57270x16f3: |
2599 | case PCI_PRODUCT_BROADCOM_BCM57620x1687: |
2600 | case PCI_PRODUCT_BROADCOM_BCM577640x1642: |
2601 | case PCI_PRODUCT_BROADCOM_BCM577670x1683: |
2602 | case PCI_PRODUCT_BROADCOM_BCM577870x1641: |
2603 | sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, |
2604 | BGE_PCI_GEN2_PRODID_ASICREV0xF4); |
2605 | break; |
2606 | case PCI_PRODUCT_BROADCOM_BCM577610x16b0: |
2607 | case PCI_PRODUCT_BROADCOM_BCM577620x1682: |
2608 | case PCI_PRODUCT_BROADCOM_BCM577650x16b4: |
2609 | case PCI_PRODUCT_BROADCOM_BCM577660x1686: |
2610 | case PCI_PRODUCT_BROADCOM_BCM577810x16b1: |
2611 | case PCI_PRODUCT_BROADCOM_BCM577820x16b7: |
2612 | case PCI_PRODUCT_BROADCOM_BCM577850x16b5: |
2613 | case PCI_PRODUCT_BROADCOM_BCM577860x16b3: |
2614 | case PCI_PRODUCT_BROADCOM_BCM577910x16b2: |
2615 | case PCI_PRODUCT_BROADCOM_BCM577950x16b6: |
2616 | sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, |
2617 | BGE_PCI_GEN15_PRODID_ASICREV0xFC); |
2618 | break; |
2619 | default: |
2620 | sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, |
2621 | BGE_PCI_PRODID_ASICREV0xBC); |
2622 | break; |
2623 | } |
2624 | } |
2625 | |
2626 | sc->bge_phy_addr = bge_phy_addr(sc); |
2627 | |
2628 | printf(", "); |
2629 | br = bge_lookup_rev(sc->bge_chipid); |
2630 | if (br == NULL((void *)0)) |
2631 | printf("unknown ASIC (0x%x)", sc->bge_chipid); |
2632 | else |
2633 | printf("%s (0x%x)", br->br_name, sc->bge_chipid); |
2634 | |
2635 | /* |
2636 | * PCI Express or PCI-X controller check. |
2637 | */ |
2638 | if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS0x10, |
2639 | &sc->bge_expcap, NULL((void *)0)) != 0) { |
2640 | /* Extract supported maximum payload size. */ |
2641 | reg = pci_conf_read(pa->pa_pc, pa->pa_tag, sc->bge_expcap + |
2642 | PCI_PCIE_DCAP0x04); |
2643 | sc->bge_mps = 128 << (reg & 0x7); |
2644 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719 || |
2645 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720) |
2646 | sc->bge_expmrq = (fls(2048) - 8) << 12; |
2647 | else |
2648 | sc->bge_expmrq = (fls(4096) - 8) << 12; |
2649 | /* Disable PCIe Active State Power Management (ASPM). */ |
2650 | reg = pci_conf_read(pa->pa_pc, pa->pa_tag, |
2651 | sc->bge_expcap + PCI_PCIE_LCSR0x10); |
2652 | reg &= ~(PCI_PCIE_LCSR_ASPM_L0S0x00000001 | PCI_PCIE_LCSR_ASPM_L10x00000002); |
2653 | pci_conf_write(pa->pa_pc, pa->pa_tag, |
2654 | sc->bge_expcap + PCI_PCIE_LCSR0x10, reg); |
2655 | sc->bge_flags |= BGE_PCIE0x00000020; |
2656 | } else { |
2657 | if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE0x70) & |
2658 | BGE_PCISTATE_PCI_BUSMODE0x00000004) == 0) |
2659 | sc->bge_flags |= BGE_PCIX0x00000010; |
2660 | } |
2661 | |
2662 | /* |
2663 | * SEEPROM check. |
2664 | */ |
2665 | #ifdef __sparc64__ |
2666 | /* |
2667 | * Onboard interfaces on UltraSPARC systems generally don't |
2668 | * have a SEEPROM fitted. These interfaces, and cards that |
2669 | * have FCode, are named "network" by the PROM, whereas cards |
2670 | * without FCode show up as "ethernet". Since we don't really |
2671 | * need the information from the SEEPROM on cards that have |
2672 | * FCode it's fine to pretend they don't have one. |
2673 | */ |
2674 | if (OF_getprop(PCITAG_NODE(pa->pa_tag), "name", name, |
2675 | sizeof(name)) > 0 && strcmp(name, "network") == 0) |
2676 | sc->bge_flags |= BGE_NO_EEPROM0x00000080; |
2677 | #endif |
2678 | |
2679 | /* Save chipset family. */ |
2680 | switch (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12)) { |
2681 | case BGE_ASICREV_BCM57620x5762: |
2682 | case BGE_ASICREV_BCM577650x57785: |
2683 | case BGE_ASICREV_BCM577660x57766: |
2684 | sc->bge_flags |= BGE_57765_PLUS0x00040000; |
2685 | /* FALLTHROUGH */ |
2686 | case BGE_ASICREV_BCM57170x5717: |
2687 | case BGE_ASICREV_BCM57190x5719: |
2688 | case BGE_ASICREV_BCM57200x5720: |
2689 | sc->bge_flags |= BGE_5717_PLUS0x00020000 | BGE_5755_PLUS0x00004000 | BGE_575X_PLUS0x00002000 | |
2690 | BGE_5705_PLUS0x00001000 | BGE_JUMBO_CAPABLE0x00000100 | BGE_JUMBO_RING0x01000000 | |
2691 | BGE_JUMBO_FRAME0x04000000; |
2692 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719 || |
2693 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720) { |
2694 | /* |
2695 | * Enable work around for DMA engine miscalculation |
2696 | * of TXMBUF available space. |
2697 | */ |
2698 | sc->bge_flags |= BGE_RDMA_BUG0x00800000; |
2699 | |
2700 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719 && |
2701 | sc->bge_chipid == BGE_CHIPID_BCM5719_A00x05719000) { |
2702 | /* Jumbo frame on BCM5719 A0 does not work. */ |
2703 | sc->bge_flags &= ~(BGE_JUMBO_CAPABLE0x00000100 | |
2704 | BGE_JUMBO_RING0x01000000 | BGE_JUMBO_FRAME0x04000000); |
2705 | } |
2706 | } |
2707 | break; |
2708 | case BGE_ASICREV_BCM57550x0a: |
2709 | case BGE_ASICREV_BCM57610x5761: |
2710 | case BGE_ASICREV_BCM57840x5784: |
2711 | case BGE_ASICREV_BCM57850x5785: |
2712 | case BGE_ASICREV_BCM57870x0b: |
2713 | case BGE_ASICREV_BCM577800x57780: |
2714 | sc->bge_flags |= BGE_5755_PLUS0x00004000 | BGE_575X_PLUS0x00002000 | BGE_5705_PLUS0x00001000; |
2715 | break; |
2716 | case BGE_ASICREV_BCM57000x07: |
2717 | case BGE_ASICREV_BCM57010x00: |
2718 | case BGE_ASICREV_BCM57030x01: |
2719 | case BGE_ASICREV_BCM57040x02: |
2720 | sc->bge_flags |= BGE_5700_FAMILY0x00010000 | BGE_JUMBO_CAPABLE0x00000100 | BGE_JUMBO_RING0x01000000; |
2721 | break; |
2722 | case BGE_ASICREV_BCM5714_A00x05: |
2723 | case BGE_ASICREV_BCM57800x08: |
2724 | case BGE_ASICREV_BCM57140x09: |
2725 | sc->bge_flags |= BGE_5714_FAMILY0x00008000 | BGE_JUMBO_CAPABLE0x00000100 | BGE_JUMBO_STD0x02000000; |
2726 | /* FALLTHROUGH */ |
2727 | case BGE_ASICREV_BCM57500x04: |
2728 | case BGE_ASICREV_BCM57520x06: |
2729 | case BGE_ASICREV_BCM59060x0c: |
2730 | sc->bge_flags |= BGE_575X_PLUS0x00002000; |
2731 | /* FALLTHROUGH */ |
2732 | case BGE_ASICREV_BCM57050x03: |
2733 | sc->bge_flags |= BGE_5705_PLUS0x00001000; |
2734 | break; |
2735 | } |
2736 | |
2737 | if (sc->bge_flags & BGE_JUMBO_STD0x02000000) |
2738 | sc->bge_rx_std_len = BGE_JLEN((9022 + 2) + (sizeof(u_int64_t) - ((9022 + 2) % sizeof(u_int64_t )))); |
2739 | else |
2740 | sc->bge_rx_std_len = MCLBYTES(1 << 11); |
2741 | |
2742 | /* |
2743 | * When using the BCM5701 in PCI-X mode, data corruption has |
2744 | * been observed in the first few bytes of some received packets. |
2745 | * Aligning the packet buffer in memory eliminates the corruption. |
2746 | * Unfortunately, this misaligns the packet payloads. On platforms |
2747 | * which do not support unaligned accesses, we will realign the |
2748 | * payloads by copying the received packets. |
2749 | */ |
2750 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57010x00 && |
2751 | sc->bge_flags & BGE_PCIX0x00000010) |
2752 | sc->bge_flags |= BGE_RX_ALIGNBUG0x00000008; |
2753 | |
2754 | if ((BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57000x07 || |
2755 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57010x00) && |
2756 | PCI_VENDOR(subid)(((subid) >> 0) & 0xffff) == DELL_VENDORID0x1028) |
2757 | sc->bge_phy_flags |= BGE_PHY_NO_3LED0x00000001; |
2758 | |
2759 | misccfg = CSR_READ_4(sc, BGE_MISC_CFG)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6804)) ); |
2760 | misccfg &= BGE_MISCCFG_BOARD_ID_MASK0x0001E000; |
2761 | |
2762 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57050x03 && |
2763 | (misccfg == BGE_MISCCFG_BOARD_ID_57880x00010000 || |
2764 | misccfg == BGE_MISCCFG_BOARD_ID_5788M0x00018000)) |
2765 | sc->bge_flags |= BGE_IS_57880x00000800; |
2766 | |
2767 | if ((BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57030x01 && |
2768 | (misccfg == 0x4000 || misccfg == 0x8000)) || |
2769 | (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57050x03 && |
2770 | PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff) == PCI_VENDOR_BROADCOM0x14e4 && |
2771 | (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM59010x170d || |
2772 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM5901A20x170e || |
2773 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM5705F0x166e)) || |
2774 | (PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff) == PCI_VENDOR_BROADCOM0x14e4 && |
2775 | (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM5751F0x167e || |
2776 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM5753F0x16fe || |
2777 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM5787F0x167f)) || |
2778 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM577900x1694 || |
2779 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM577910x16b2 || |
2780 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM577950x16b6 || |
2781 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) |
2782 | sc->bge_phy_flags |= BGE_PHY_10_100_ONLY0x00000002; |
2783 | |
2784 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57000x07 || |
2785 | (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57050x03 && |
2786 | (sc->bge_chipid != BGE_CHIPID_BCM5705_A00x3000 && |
2787 | sc->bge_chipid != BGE_CHIPID_BCM5705_A10x3001)) || |
2788 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) |
2789 | sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED0x00000100; |
2790 | |
2791 | if (sc->bge_chipid == BGE_CHIPID_BCM5701_A00x0000 || |
2792 | sc->bge_chipid == BGE_CHIPID_BCM5701_B00x0100) |
2793 | sc->bge_phy_flags |= BGE_PHY_CRC_BUG0x00000004; |
2794 | if (BGE_CHIPREV(sc->bge_chipid)((sc->bge_chipid) >> 8) == BGE_CHIPREV_5703_AX0x10 || |
2795 | BGE_CHIPREV(sc->bge_chipid)((sc->bge_chipid) >> 8) == BGE_CHIPREV_5704_AX0x20) |
2796 | sc->bge_phy_flags |= BGE_PHY_ADC_BUG0x00000008; |
2797 | if (sc->bge_chipid == BGE_CHIPID_BCM5704_A00x2000) |
2798 | sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG0x00008010; |
2799 | |
2800 | if ((BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000)) && |
2801 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM59060x0c && |
2802 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57850x5785 && |
2803 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM577800x57780 && |
2804 | !BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000)) { |
2805 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57550x0a || |
2806 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57610x5761 || |
2807 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57840x5784 || |
2808 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57870x0b) { |
2809 | if (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) != PCI_PRODUCT_BROADCOM_BCM57220x165a && |
2810 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) != PCI_PRODUCT_BROADCOM_BCM57560x1674) |
2811 | sc->bge_phy_flags |= BGE_PHY_JITTER_BUG0x00000020; |
2812 | if (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM5755M0x1673) |
2813 | sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM0x00000080; |
2814 | } else |
2815 | sc->bge_phy_flags |= BGE_PHY_BER_BUG0x00000040; |
2816 | } |
2817 | |
2818 | /* Identify chips with APE processor. */ |
2819 | switch (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12)) { |
2820 | case BGE_ASICREV_BCM57170x5717: |
2821 | case BGE_ASICREV_BCM57190x5719: |
2822 | case BGE_ASICREV_BCM57200x5720: |
2823 | case BGE_ASICREV_BCM57610x5761: |
2824 | case BGE_ASICREV_BCM57620x5762: |
2825 | sc->bge_flags |= BGE_APE0x00080000; |
2826 | break; |
2827 | } |
2828 | |
2829 | /* Chips with APE need BAR2 access for APE registers/memory. */ |
2830 | if ((sc->bge_flags & BGE_APE0x00080000) != 0) { |
2831 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR20x18); |
2832 | if (pci_mapreg_map(pa, BGE_PCI_BAR20x18, memtype, 0, |
2833 | &sc->bge_apetag, &sc->bge_apehandle, NULL((void *)0), |
2834 | &sc->bge_apesize, 0)) { |
2835 | printf(": couldn't map BAR2 memory\n"); |
2836 | goto fail_1; |
2837 | } |
2838 | |
2839 | /* Enable APE register/memory access by host driver. */ |
2840 | reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE0x70); |
2841 | reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR0x00010000 | |
2842 | BGE_PCISTATE_ALLOW_APE_SHMEM_WR0x00020000 | |
2843 | BGE_PCISTATE_ALLOW_APE_PSPACE_WR0x00040000; |
2844 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE0x70, reg); |
2845 | |
2846 | bge_ape_lock_init(sc); |
2847 | bge_ape_read_fw_ver(sc); |
2848 | } |
2849 | |
2850 | /* Identify the chips that use an CPMU. */ |
2851 | if (BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000) || |
2852 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57840x5784 || |
2853 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57610x5761 || |
2854 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57850x5785 || |
2855 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM577800x57780) |
2856 | sc->bge_flags |= BGE_CPMU_PRESENT0x00100000; |
2857 | |
2858 | if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI0x05, |
2859 | &sc->bge_msicap, NULL((void *)0))) { |
2860 | if (bge_can_use_msi(sc) == 0) |
2861 | pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED0x20; |
2862 | } |
2863 | |
2864 | DPRINTFN(5, ("pci_intr_map\n")); |
2865 | if (pci_intr_map_msi(pa, &ih) == 0) |
2866 | sc->bge_flags |= BGE_MSI0x00400000; |
2867 | else if (pci_intr_map(pa, &ih)) { |
2868 | printf(": couldn't map interrupt\n"); |
2869 | goto fail_1; |
2870 | } |
2871 | |
2872 | /* |
2873 | * All controllers except BCM5700 supports tagged status but |
2874 | * we use tagged status only for MSI case on BCM5717. Otherwise |
2875 | * MSI on BCM5717 does not work. |
2876 | */ |
2877 | if (BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000) && sc->bge_flags & BGE_MSI0x00400000) |
2878 | sc->bge_flags |= BGE_TAGGED_STATUS0x00200000; |
2879 | |
2880 | DPRINTFN(5, ("pci_intr_string\n")); |
2881 | intrstr = pci_intr_string(pc, ih); |
2882 | |
2883 | /* Try to reset the chip. */ |
2884 | DPRINTFN(5, ("bge_reset\n")); |
2885 | bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN0); |
2886 | bge_reset(sc); |
2887 | |
2888 | bge_sig_legacy(sc, BGE_RESET_SHUTDOWN0); |
2889 | bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN0); |
2890 | |
2891 | bge_chipinit(sc); |
2892 | |
2893 | #if defined(__sparc64__) || defined(__HAVE_FDT) |
2894 | if (!gotenaddr && PCITAG_NODE(pa->pa_tag)) { |
2895 | if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address", |
2896 | sc->arpcom.ac_enaddr, ETHER_ADDR_LEN6) == ETHER_ADDR_LEN6) |
2897 | gotenaddr = 1; |
2898 | } |
2899 | #endif |
2900 | |
2901 | /* |
2902 | * Get station address from the EEPROM. |
2903 | */ |
2904 | if (!gotenaddr) { |
2905 | mac_addr = bge_readmem_ind(sc, 0x0c14); |
2906 | if ((mac_addr >> 16) == 0x484b) { |
2907 | sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8); |
2908 | sc->arpcom.ac_enaddr[1] = (u_char)mac_addr; |
2909 | mac_addr = bge_readmem_ind(sc, 0x0c18); |
2910 | sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24); |
2911 | sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16); |
2912 | sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8); |
2913 | sc->arpcom.ac_enaddr[5] = (u_char)mac_addr; |
2914 | gotenaddr = 1; |
2915 | } |
2916 | } |
2917 | if (!gotenaddr) { |
2918 | int mac_offset = BGE_EE_MAC_OFFSET0x7C; |
2919 | |
2920 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) |
2921 | mac_offset = BGE_EE_MAC_OFFSET_59060x10; |
2922 | |
2923 | if (bge_read_nvram(sc, (caddr_t)&sc->arpcom.ac_enaddr, |
2924 | mac_offset + 2, ETHER_ADDR_LEN6) == 0) |
2925 | gotenaddr = 1; |
2926 | } |
2927 | if (!gotenaddr && (!(sc->bge_flags & BGE_NO_EEPROM0x00000080))) { |
2928 | if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, |
2929 | BGE_EE_MAC_OFFSET0x7C + 2, ETHER_ADDR_LEN6) == 0) |
2930 | gotenaddr = 1; |
2931 | } |
2932 | |
2933 | #ifdef __sparc64__ |
2934 | if (!gotenaddr) { |
2935 | extern void myetheraddr(u_char *); |
2936 | |
2937 | myetheraddr(sc->arpcom.ac_enaddr); |
2938 | gotenaddr = 1; |
2939 | } |
2940 | #endif |
2941 | |
2942 | if (!gotenaddr) { |
2943 | printf(": failed to read station address\n"); |
2944 | goto fail_2; |
2945 | } |
2946 | |
2947 | /* Allocate the general information block and ring buffers. */ |
2948 | sc->bge_dmatag = pa->pa_dmat; |
2949 | DPRINTFN(5, ("bus_dmamem_alloc\n")); |
2950 | if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),(*(sc->bge_dmatag)->_dmamem_alloc)((sc->bge_dmatag), (sizeof(struct bge_ring_data)), ((1 << 12)), (0), (& sc->bge_ring_seg), (1), (&sc->bge_ring_nseg), (0x0001 )) |
2951 | PAGE_SIZE, 0, &sc->bge_ring_seg, 1, &sc->bge_ring_nseg,(*(sc->bge_dmatag)->_dmamem_alloc)((sc->bge_dmatag), (sizeof(struct bge_ring_data)), ((1 << 12)), (0), (& sc->bge_ring_seg), (1), (&sc->bge_ring_nseg), (0x0001 )) |
2952 | BUS_DMA_NOWAIT)(*(sc->bge_dmatag)->_dmamem_alloc)((sc->bge_dmatag), (sizeof(struct bge_ring_data)), ((1 << 12)), (0), (& sc->bge_ring_seg), (1), (&sc->bge_ring_nseg), (0x0001 ))) { |
2953 | printf(": can't alloc rx buffers\n"); |
2954 | goto fail_2; |
2955 | } |
2956 | DPRINTFN(5, ("bus_dmamem_map\n")); |
2957 | if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_ring_seg,(*(sc->bge_dmatag)->_dmamem_map)((sc->bge_dmatag), ( &sc->bge_ring_seg), (sc->bge_ring_nseg), (sizeof(struct bge_ring_data)), (&kva), (0x0001)) |
2958 | sc->bge_ring_nseg, sizeof(struct bge_ring_data), &kva,(*(sc->bge_dmatag)->_dmamem_map)((sc->bge_dmatag), ( &sc->bge_ring_seg), (sc->bge_ring_nseg), (sizeof(struct bge_ring_data)), (&kva), (0x0001)) |
2959 | BUS_DMA_NOWAIT)(*(sc->bge_dmatag)->_dmamem_map)((sc->bge_dmatag), ( &sc->bge_ring_seg), (sc->bge_ring_nseg), (sizeof(struct bge_ring_data)), (&kva), (0x0001))) { |
2960 | printf(": can't map dma buffers (%lu bytes)\n", |
2961 | sizeof(struct bge_ring_data)); |
2962 | goto fail_3; |
2963 | } |
2964 | DPRINTFN(5, ("bus_dmamem_create\n")); |
2965 | if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (sizeof(struct bge_ring_data)), (1), (sizeof(struct bge_ring_data )), (0), (0x0001), (&sc->bge_ring_map)) |
2966 | sizeof(struct bge_ring_data), 0,(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (sizeof(struct bge_ring_data)), (1), (sizeof(struct bge_ring_data )), (0), (0x0001), (&sc->bge_ring_map)) |
2967 | BUS_DMA_NOWAIT, &sc->bge_ring_map)(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (sizeof(struct bge_ring_data)), (1), (sizeof(struct bge_ring_data )), (0), (0x0001), (&sc->bge_ring_map))) { |
2968 | printf(": can't create dma map\n"); |
2969 | goto fail_4; |
2970 | } |
2971 | DPRINTFN(5, ("bus_dmamem_load\n")); |
2972 | if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,(*(sc->bge_dmatag)->_dmamap_load)((sc->bge_dmatag), ( sc->bge_ring_map), (kva), (sizeof(struct bge_ring_data)), ( ((void *)0)), (0x0001)) |
2973 | sizeof(struct bge_ring_data), NULL,(*(sc->bge_dmatag)->_dmamap_load)((sc->bge_dmatag), ( sc->bge_ring_map), (kva), (sizeof(struct bge_ring_data)), ( ((void *)0)), (0x0001)) |
2974 | BUS_DMA_NOWAIT)(*(sc->bge_dmatag)->_dmamap_load)((sc->bge_dmatag), ( sc->bge_ring_map), (kva), (sizeof(struct bge_ring_data)), ( ((void *)0)), (0x0001))) { |
2975 | goto fail_5; |
2976 | } |
2977 | |
2978 | DPRINTFN(5, ("bzero\n")); |
2979 | sc->bge_rdata = (struct bge_ring_data *)kva; |
2980 | |
2981 | bzero(sc->bge_rdata, sizeof(struct bge_ring_data))__builtin_bzero((sc->bge_rdata), (sizeof(struct bge_ring_data ))); |
2982 | |
2983 | /* Set default tuneable values. */ |
2984 | sc->bge_stat_ticks = BGE_TICKS_PER_SEC1000000; |
2985 | sc->bge_rx_coal_ticks = 150; |
2986 | sc->bge_rx_max_coal_bds = 64; |
2987 | sc->bge_tx_coal_ticks = 300; |
2988 | sc->bge_tx_max_coal_bds = 400; |
2989 | |
2990 | /* 5705 limits RX return ring to 512 entries. */ |
2991 | if (BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000) || BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000)) |
2992 | sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT1024; |
2993 | else |
2994 | sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705512; |
2995 | |
2996 | /* Set up ifnet structure */ |
2997 | ifp = &sc->arpcom.ac_if; |
2998 | ifp->if_softc = sc; |
2999 | ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000; |
3000 | ifp->if_xflags = IFXF_MPSAFE0x1; |
3001 | ifp->if_ioctl = bge_ioctl; |
3002 | ifp->if_qstart = bge_start; |
3003 | ifp->if_watchdog = bge_watchdog; |
3004 | ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1)((&ifp->if_snd)->ifq_maxlen = (512 - 1)); |
3005 | |
3006 | DPRINTFN(5, ("bcopy\n")); |
3007 | bcopy(sc->bge_dev.dv_xname, ifp->if_xname, IFNAMSIZ16); |
3008 | |
3009 | ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010; |
3010 | |
3011 | #if NVLAN1 > 0 |
3012 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020; |
3013 | #endif |
3014 | |
3015 | /* |
3016 | * 5700 B0 chips do not support checksumming correctly due |
3017 | * to hardware bugs. |
3018 | * |
3019 | * It seems all controllers have a bug that can generate UDP |
3020 | * datagrams with a checksum value 0 when TX UDP checksum |
3021 | * offloading is enabled. Generating UDP checksum value 0 is |
3022 | * a violation of RFC 768. |
3023 | */ |
3024 | if (sc->bge_chipid != BGE_CHIPID_BCM5700_B00x7100) |
3025 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_CSUM_IPv40x00000001 | IFCAP_CSUM_TCPv40x00000002; |
3026 | |
3027 | if (BGE_IS_JUMBO_CAPABLE(sc)((sc)->bge_flags & 0x00000100)) |
3028 | ifp->if_hardmtu = BGE_JUMBO_MTU(9022 - ((6 * 2) + 2) - 4 - 4); |
3029 | |
3030 | /* |
3031 | * Do MII setup. |
3032 | */ |
3033 | DPRINTFN(5, ("mii setup\n")); |
3034 | sc->bge_mii.mii_ifp = ifp; |
3035 | sc->bge_mii.mii_readreg = bge_miibus_readreg; |
3036 | sc->bge_mii.mii_writereg = bge_miibus_writereg; |
3037 | sc->bge_mii.mii_statchg = bge_miibus_statchg; |
3038 | |
3039 | /* |
3040 | * Figure out what sort of media we have by checking the hardware |
3041 | * config word in the first 32K of internal NIC memory, or fall back to |
3042 | * examining the EEPROM if necessary. Note: on some BCM5700 cards, |
3043 | * this value seems to be unset. If that's the case, we have to rely on |
3044 | * identifying the NIC by its PCI subsystem ID, as we do below for the |
3045 | * SysKonnect SK-9D41. |
3046 | */ |
3047 | if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG0x00000B54) == BGE_MAGIC_NUMBER0x4B657654) |
3048 | hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG0x00000B58); |
3049 | else if (!(sc->bge_flags & BGE_NO_EEPROM0x00000080)) { |
3050 | if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET0xC8, |
3051 | sizeof(hwcfg))) { |
3052 | printf(": failed to read media type\n"); |
3053 | goto fail_6; |
3054 | } |
3055 | hwcfg = ntohl(hwcfg)(__uint32_t)(__builtin_constant_p(hwcfg) ? (__uint32_t)(((__uint32_t )(hwcfg) & 0xff) << 24 | ((__uint32_t)(hwcfg) & 0xff00) << 8 | ((__uint32_t)(hwcfg) & 0xff0000) >> 8 | ((__uint32_t)(hwcfg) & 0xff000000) >> 24) : __swap32md (hwcfg)); |
3056 | } |
3057 | |
3058 | /* The SysKonnect SK-9D41 is a 1000baseSX card. */ |
3059 | if (PCI_PRODUCT(subid)(((subid) >> 16) & 0xffff) == SK_SUBSYSID_9D410x4441 || |
3060 | (hwcfg & BGE_HWCFG_MEDIA0x00000030) == BGE_MEDIA_FIBER0x00000020) { |
3061 | if (BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000)) |
3062 | sc->bge_flags |= BGE_FIBER_TBI0x00000200; |
3063 | else |
3064 | sc->bge_flags |= BGE_FIBER_MII0x00000400; |
3065 | } |
3066 | |
3067 | /* Take advantage of single-shot MSI. */ |
3068 | if (BGE_IS_5755_PLUS(sc)((sc)->bge_flags & 0x00004000) && sc->bge_flags & BGE_MSI0x00400000) |
3069 | CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6000) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6000 ))) & ~0x00000020))) |
3070 | ~BGE_MSIMODE_ONE_SHOT_DISABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6000) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6000 ))) & ~0x00000020))); |
3071 | |
3072 | /* Hookup IRQ last. */ |
3073 | DPRINTFN(5, ("pci_intr_establish\n")); |
3074 | sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET0x7 | IPL_MPSAFE0x100, |
3075 | bge_intr, sc, sc->bge_dev.dv_xname); |
3076 | if (sc->bge_intrhand == NULL((void *)0)) { |
3077 | printf(": couldn't establish interrupt"); |
3078 | if (intrstr != NULL((void *)0)) |
3079 | printf(" at %s", intrstr); |
3080 | printf("\n"); |
3081 | goto fail_6; |
3082 | } |
3083 | |
3084 | /* |
3085 | * A Broadcom chip was detected. Inform the world. |
3086 | */ |
3087 | printf(": %s, address %s\n", intrstr, |
3088 | ether_sprintf(sc->arpcom.ac_enaddr)); |
3089 | |
3090 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200) { |
3091 | ifmedia_init(&sc->bge_ifmedia, IFM_IMASK0xff00000000000000ULL, bge_ifmedia_upd, |
3092 | bge_ifmedia_sts); |
3093 | ifmedia_add(&sc->bge_ifmedia, IFM_ETHER0x0000000000000100ULL|IFM_1000_SX11, 0, NULL((void *)0)); |
3094 | ifmedia_add(&sc->bge_ifmedia, IFM_ETHER0x0000000000000100ULL|IFM_1000_SX11|IFM_FDX0x0000010000000000ULL, |
3095 | 0, NULL((void *)0)); |
3096 | ifmedia_add(&sc->bge_ifmedia, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL, 0, NULL((void *)0)); |
3097 | ifmedia_set(&sc->bge_ifmedia, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL); |
3098 | sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; |
3099 | } else { |
3100 | int mii_flags; |
3101 | |
3102 | /* |
3103 | * Do transceiver setup. |
3104 | */ |
3105 | ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, |
3106 | bge_ifmedia_sts); |
3107 | mii_flags = MIIF_DOPAUSE0x0100; |
3108 | if (sc->bge_flags & BGE_FIBER_MII0x00000400) |
3109 | mii_flags |= MIIF_HAVEFIBER0x0020; |
3110 | mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff, |
3111 | sc->bge_phy_addr, MII_OFFSET_ANY-1, mii_flags); |
3112 | |
3113 | if (LIST_FIRST(&sc->bge_mii.mii_phys)((&sc->bge_mii.mii_phys)->lh_first) == NULL((void *)0)) { |
3114 | printf("%s: no PHY found!\n", sc->bge_dev.dv_xname); |
3115 | ifmedia_add(&sc->bge_mii.mii_media, |
3116 | IFM_ETHER0x0000000000000100ULL|IFM_MANUAL1ULL, 0, NULL((void *)0)); |
3117 | ifmedia_set(&sc->bge_mii.mii_media, |
3118 | IFM_ETHER0x0000000000000100ULL|IFM_MANUAL1ULL); |
3119 | } else |
3120 | ifmedia_set(&sc->bge_mii.mii_media, |
3121 | IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL); |
3122 | } |
3123 | |
3124 | /* |
3125 | * Call MI attach routine. |
3126 | */ |
3127 | if_attach(ifp); |
3128 | ether_ifattach(ifp); |
3129 | |
3130 | timeout_set(&sc->bge_timeout, bge_tick, sc); |
3131 | timeout_set(&sc->bge_rxtimeout, bge_rxtick, sc); |
3132 | timeout_set(&sc->bge_rxtimeout_jumbo, bge_rxtick_jumbo, sc); |
3133 | return; |
3134 | |
3135 | fail_6: |
3136 | bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map)(*(sc->bge_dmatag)->_dmamap_unload)((sc->bge_dmatag) , (sc->bge_ring_map)); |
3137 | |
3138 | fail_5: |
3139 | bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map)(*(sc->bge_dmatag)->_dmamap_destroy)((sc->bge_dmatag ), (sc->bge_ring_map)); |
3140 | |
3141 | fail_4: |
3142 | bus_dmamem_unmap(sc->bge_dmatag, (caddr_t)sc->bge_rdata,(*(sc->bge_dmatag)->_dmamem_unmap)((sc->bge_dmatag), ((caddr_t)sc->bge_rdata), (sizeof(struct bge_ring_data))) |
3143 | sizeof(struct bge_ring_data))(*(sc->bge_dmatag)->_dmamem_unmap)((sc->bge_dmatag), ((caddr_t)sc->bge_rdata), (sizeof(struct bge_ring_data))); |
3144 | |
3145 | fail_3: |
3146 | bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, sc->bge_ring_nseg)(*(sc->bge_dmatag)->_dmamem_free)((sc->bge_dmatag), ( &sc->bge_ring_seg), (sc->bge_ring_nseg)); |
3147 | |
3148 | fail_2: |
3149 | if ((sc->bge_flags & BGE_APE0x00080000) != 0) |
3150 | bus_space_unmap(sc->bge_apetag, sc->bge_apehandle, |
3151 | sc->bge_apesize); |
3152 | |
3153 | fail_1: |
3154 | bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize); |
3155 | } |
3156 | |
3157 | int |
3158 | bge_detach(struct device *self, int flags) |
3159 | { |
3160 | struct bge_softc *sc = (struct bge_softc *)self; |
3161 | struct ifnet *ifp = &sc->arpcom.ac_if; |
3162 | |
3163 | if (sc->bge_intrhand) |
3164 | pci_intr_disestablish(sc->bge_pa.pa_pc, sc->bge_intrhand); |
3165 | |
3166 | bge_stop(sc, 1); |
3167 | |
3168 | /* Detach any PHYs we might have. */ |
3169 | if (LIST_FIRST(&sc->bge_mii.mii_phys)((&sc->bge_mii.mii_phys)->lh_first) != NULL((void *)0)) |
3170 | mii_detach(&sc->bge_mii, MII_PHY_ANY-1, MII_OFFSET_ANY-1); |
3171 | |
3172 | /* Delete any remaining media. */ |
3173 | ifmedia_delete_instance(&sc->bge_mii.mii_media, IFM_INST_ANY((uint64_t) -1)); |
3174 | |
3175 | ether_ifdetach(ifp); |
3176 | if_detach(ifp); |
3177 | |
3178 | bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map)(*(sc->bge_dmatag)->_dmamap_unload)((sc->bge_dmatag) , (sc->bge_ring_map)); |
3179 | bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map)(*(sc->bge_dmatag)->_dmamap_destroy)((sc->bge_dmatag ), (sc->bge_ring_map)); |
3180 | bus_dmamem_unmap(sc->bge_dmatag, (caddr_t)sc->bge_rdata,(*(sc->bge_dmatag)->_dmamem_unmap)((sc->bge_dmatag), ((caddr_t)sc->bge_rdata), (sizeof(struct bge_ring_data))) |
3181 | sizeof(struct bge_ring_data))(*(sc->bge_dmatag)->_dmamem_unmap)((sc->bge_dmatag), ((caddr_t)sc->bge_rdata), (sizeof(struct bge_ring_data))); |
3182 | bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, sc->bge_ring_nseg)(*(sc->bge_dmatag)->_dmamem_free)((sc->bge_dmatag), ( &sc->bge_ring_seg), (sc->bge_ring_nseg)); |
3183 | |
3184 | if ((sc->bge_flags & BGE_APE0x00080000) != 0) |
3185 | bus_space_unmap(sc->bge_apetag, sc->bge_apehandle, |
3186 | sc->bge_apesize); |
3187 | |
3188 | bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize); |
3189 | return (0); |
3190 | } |
3191 | |
3192 | int |
3193 | bge_activate(struct device *self, int act) |
3194 | { |
3195 | struct bge_softc *sc = (struct bge_softc *)self; |
3196 | struct ifnet *ifp = &sc->arpcom.ac_if; |
3197 | int rv = 0; |
3198 | |
3199 | switch (act) { |
3200 | case DVACT_SUSPEND3: |
3201 | rv = config_activate_children(self, act); |
3202 | if (ifp->if_flags & IFF_RUNNING0x40) |
3203 | bge_stop(sc, 0); |
3204 | break; |
3205 | case DVACT_RESUME4: |
3206 | if (ifp->if_flags & IFF_UP0x1) |
3207 | bge_init(sc); |
3208 | break; |
3209 | default: |
3210 | rv = config_activate_children(self, act); |
3211 | break; |
3212 | } |
3213 | return (rv); |
3214 | } |
3215 | |
3216 | void |
3217 | bge_reset(struct bge_softc *sc) |
3218 | { |
3219 | struct pci_attach_args *pa = &sc->bge_pa; |
3220 | pcireg_t cachesize, command, devctl; |
3221 | u_int32_t reset, mac_mode, mac_mode_mask, val; |
3222 | void (*write_op)(struct bge_softc *, int, int); |
3223 | int i; |
3224 | |
3225 | mac_mode_mask = BGE_MACMODE_HALF_DUPLEX0x00000002 | BGE_MACMODE_PORTMODE0x0000000C; |
3226 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE0x00000002) != 0) |
3227 | mac_mode_mask |= BGE_MACMODE_APE_RX_EN0x08000000 | BGE_MACMODE_APE_TX_EN0x10000000; |
3228 | mac_mode = CSR_READ_4(sc, BGE_MAC_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400)) ) & mac_mode_mask; |
3229 | |
3230 | if (BGE_IS_575X_PLUS(sc)((sc)->bge_flags & 0x00002000) && !BGE_IS_5714_FAMILY(sc)((sc)->bge_flags & 0x00008000) && |
3231 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM59060x0c) { |
3232 | if (sc->bge_flags & BGE_PCIE0x00000020) |
3233 | write_op = bge_writembx; |
3234 | else |
3235 | write_op = bge_writemem_ind; |
3236 | } else |
3237 | write_op = bge_writereg_ind; |
3238 | |
3239 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57000x07 && |
3240 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57010x00 && |
3241 | !(sc->bge_flags & BGE_NO_EEPROM0x00000080)) { |
3242 | CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x7020) , (0x00000002))); |
3243 | for (i = 0; i < 8000; i++) { |
3244 | if (CSR_READ_4(sc, BGE_NVRAM_SWARB)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x7020)) ) & |
3245 | BGE_NVRAMSWARB_GNT10x00000200) |
3246 | break; |
3247 | DELAY(20)(*delay_func)(20); |
3248 | } |
3249 | if (i == 8000) |
3250 | printf("%s: nvram lock timed out\n", |
3251 | sc->bge_dev.dv_xname); |
3252 | } |
3253 | /* Take APE lock when performing reset. */ |
3254 | bge_ape_lock(sc, BGE_APE_LOCK_GRC1); |
3255 | |
3256 | /* Save some important PCI state. */ |
3257 | cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ0x0C); |
3258 | command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD0x04); |
3259 | |
3260 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL0x68, |
3261 | BGE_PCIMISCCTL_INDIRECT_ACCESS0x00000080 | BGE_PCIMISCCTL_MASK_PCI_INTR0x00000002 | |
3262 | BGE_PCIMISCCTL_ENDIAN_WORDSWAP0x00000008 | BGE_PCIMISCCTL_PCISTATE_RW0x00000010); |
3263 | |
3264 | /* Disable fastboot on controllers that support it. */ |
3265 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57520x06 || |
3266 | BGE_IS_5755_PLUS(sc)((sc)->bge_flags & 0x00004000)) |
3267 | CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6894) , (0))); |
3268 | |
3269 | /* |
3270 | * Write the magic number to SRAM at offset 0xB50. |
3271 | * When firmware finishes its initialization it will |
3272 | * write ~BGE_SRAM_FW_MB_MAGIC to the same location. |
3273 | */ |
3274 | bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM0x00000B50, BGE_MAGIC_NUMBER0x4B657654); |
3275 | |
3276 | reset = BGE_MISCCFG_RESET_CORE_CLOCKS0x00000001 | BGE_32BITTIME_66MHZ(0x41 << 1); |
3277 | |
3278 | if (sc->bge_flags & BGE_PCIE0x00000020) { |
3279 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57850x5785 && |
3280 | !BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000)) { |
3281 | if (CSR_READ_4(sc, 0x7e2c)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x7e2c)) ) == 0x60) { |
3282 | /* PCI Express 1.0 system */ |
3283 | CSR_WRITE_4(sc, 0x7e2c, 0x20)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x7e2c) , (0x20))); |
3284 | } |
3285 | } |
3286 | if (sc->bge_chipid != BGE_CHIPID_BCM5750_A00x4000) { |
3287 | /* |
3288 | * Prevent PCI Express link training |
3289 | * during global reset. |
3290 | */ |
3291 | CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29))((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6804) , ((1<<29)))); |
3292 | reset |= (1<<29); |
3293 | } |
3294 | } |
3295 | |
3296 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) { |
3297 | val = CSR_READ_4(sc, BGE_VCPU_STATUS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x5100)) ); |
3298 | CSR_WRITE_4(sc, BGE_VCPU_STATUS,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x5100) , (val | 0x08000000))) |
3299 | val | BGE_VCPU_STATUS_DRV_RESET)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x5100) , (val | 0x08000000))); |
3300 | val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6890)) ); |
3301 | CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6890) , (val & ~0x00400000))) |
3302 | val & ~BGE_VCPU_EXT_CTRL_HALT_CPU)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6890) , (val & ~0x00400000))); |
3303 | |
3304 | sc->bge_flags |= BGE_NO_EEPROM0x00000080; |
3305 | } |
3306 | |
3307 | /* |
3308 | * Set GPHY Power Down Override to leave GPHY |
3309 | * powered up in D0 uninitialized. |
3310 | */ |
3311 | if (BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000) && |
3312 | (sc->bge_flags & BGE_CPMU_PRESENT0x00100000) == 0) |
3313 | reset |= BGE_MISCCFG_KEEP_GPHY_POWER0x04000000; |
3314 | |
3315 | /* Issue global reset */ |
3316 | write_op(sc, BGE_MISC_CFG0x6804, reset); |
3317 | |
3318 | if (sc->bge_flags & BGE_PCIE0x00000020) |
3319 | DELAY(100 * 1000)(*delay_func)(100 * 1000); |
3320 | else |
3321 | DELAY(1000)(*delay_func)(1000); |
3322 | |
3323 | if (sc->bge_flags & BGE_PCIE0x00000020) { |
3324 | if (sc->bge_chipid == BGE_CHIPID_BCM5750_A00x4000) { |
3325 | pcireg_t v; |
3326 | |
3327 | DELAY(500000)(*delay_func)(500000); /* wait for link training to complete */ |
3328 | v = pci_conf_read(pa->pa_pc, pa->pa_tag, 0xc4); |
3329 | pci_conf_write(pa->pa_pc, pa->pa_tag, 0xc4, v | (1<<15)); |
3330 | } |
3331 | |
3332 | devctl = pci_conf_read(pa->pa_pc, pa->pa_tag, sc->bge_expcap + |
3333 | PCI_PCIE_DCSR0x08); |
3334 | /* Clear enable no snoop and disable relaxed ordering. */ |
3335 | devctl &= ~(PCI_PCIE_DCSR_ERO0x00000010 | PCI_PCIE_DCSR_ENS0x00000800); |
3336 | /* Set PCI Express max payload size. */ |
3337 | devctl = (devctl & ~PCI_PCIE_DCSR_MPS0x00007000) | sc->bge_expmrq; |
3338 | /* Clear error status. */ |
3339 | devctl |= PCI_PCIE_DCSR_CEE0x00010000 | PCI_PCIE_DCSR_NFE0x00020000 | |
3340 | PCI_PCIE_DCSR_FEE0x00040000 | PCI_PCIE_DCSR_URE0x00080000; |
3341 | pci_conf_write(pa->pa_pc, pa->pa_tag, sc->bge_expcap + |
3342 | PCI_PCIE_DCSR0x08, devctl); |
3343 | } |
3344 | |
3345 | /* Reset some of the PCI state that got zapped by reset */ |
3346 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL0x68, |
3347 | BGE_PCIMISCCTL_INDIRECT_ACCESS0x00000080 | BGE_PCIMISCCTL_MASK_PCI_INTR0x00000002 | |
3348 | BGE_PCIMISCCTL_ENDIAN_WORDSWAP0x00000008 | BGE_PCIMISCCTL_PCISTATE_RW0x00000010); |
3349 | val = BGE_PCISTATE_ROM_ENABLE0x00000020 | BGE_PCISTATE_ROM_RETRY_ENABLE0x00000040; |
3350 | if (sc->bge_chipid == BGE_CHIPID_BCM5704_A00x2000 && |
3351 | (sc->bge_flags & BGE_PCIX0x00000010) != 0) |
3352 | val |= BGE_PCISTATE_RETRY_SAME_DMA0x00002000; |
3353 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE0x00000002) != 0) |
3354 | val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR0x00010000 | |
3355 | BGE_PCISTATE_ALLOW_APE_SHMEM_WR0x00020000 | |
3356 | BGE_PCISTATE_ALLOW_APE_PSPACE_WR0x00040000; |
3357 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE0x70, val); |
3358 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ0x0C, cachesize); |
3359 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD0x04, command); |
3360 | |
3361 | /* Re-enable MSI, if necessary, and enable memory arbiter. */ |
3362 | if (BGE_IS_5714_FAMILY(sc)((sc)->bge_flags & 0x00008000)) { |
3363 | /* This chip disables MSI on reset. */ |
3364 | if (sc->bge_flags & BGE_MSI0x00400000) { |
3365 | val = pci_conf_read(pa->pa_pc, pa->pa_tag, |
3366 | sc->bge_msicap + PCI_MSI_MC0x00); |
3367 | pci_conf_write(pa->pa_pc, pa->pa_tag, |
3368 | sc->bge_msicap + PCI_MSI_MC0x00, |
3369 | val | PCI_MSI_MC_MSIE0x00010000); |
3370 | val = CSR_READ_4(sc, BGE_MSI_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6000)) ); |
3371 | CSR_WRITE_4(sc, BGE_MSI_MODE,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6000) , (val | 0x00000002))) |
3372 | val | BGE_MSIMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6000) , (val | 0x00000002))); |
3373 | } |
3374 | val = CSR_READ_4(sc, BGE_MARB_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4000)) ); |
3375 | CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4000) , (0x00000002 | val))); |
3376 | } else |
3377 | CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4000) , (0x00000002))); |
3378 | |
3379 | /* Fix up byte swapping */ |
3380 | CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc))((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6800) , (bge_dma_swap_options(sc)))); |
3381 | |
3382 | val = CSR_READ_4(sc, BGE_MAC_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400)) ); |
3383 | val = (val & ~mac_mode_mask) | mac_mode; |
3384 | CSR_WRITE_4(sc, BGE_MAC_MODE, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0400) , (val))); |
3385 | DELAY(40)(*delay_func)(40); |
3386 | |
3387 | bge_ape_unlock(sc, BGE_APE_LOCK_GRC1); |
3388 | |
3389 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) { |
3390 | for (i = 0; i < BGE_TIMEOUT100000; i++) { |
3391 | val = CSR_READ_4(sc, BGE_VCPU_STATUS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x5100)) ); |
3392 | if (val & BGE_VCPU_STATUS_INIT_DONE0x04000000) |
3393 | break; |
3394 | DELAY(100)(*delay_func)(100); |
3395 | } |
3396 | |
3397 | if (i >= BGE_TIMEOUT100000) |
3398 | printf("%s: reset timed out\n", sc->bge_dev.dv_xname); |
3399 | } else { |
3400 | /* |
3401 | * Poll until we see 1's complement of the magic number. |
3402 | * This indicates that the firmware initialization |
3403 | * is complete. We expect this to fail if no SEEPROM |
3404 | * is fitted. |
3405 | */ |
3406 | for (i = 0; i < BGE_TIMEOUT100000 * 10; i++) { |
3407 | val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM0x00000B50); |
3408 | if (val == ~BGE_MAGIC_NUMBER0x4B657654) |
3409 | break; |
3410 | DELAY(10)(*delay_func)(10); |
3411 | } |
3412 | |
3413 | if ((i >= BGE_TIMEOUT100000 * 10) && |
3414 | (!(sc->bge_flags & BGE_NO_EEPROM0x00000080))) |
3415 | printf("%s: firmware handshake timed out\n", |
3416 | sc->bge_dev.dv_xname); |
3417 | /* BCM57765 A0 needs additional time before accessing. */ |
3418 | if (sc->bge_chipid == BGE_CHIPID_BCM57765_A00x57785000) |
3419 | DELAY(10 * 1000)(*delay_func)(10 * 1000); /* XXX */ |
3420 | } |
3421 | |
3422 | /* |
3423 | * The 5704 in TBI mode apparently needs some special |
3424 | * adjustment to ensure the SERDES drive level is set |
3425 | * to 1.2V. |
3426 | */ |
3427 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200 && |
3428 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57040x02) { |
3429 | val = CSR_READ_4(sc, BGE_SERDES_CFG)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0590)) ); |
3430 | val = (val & ~0xFFF) | 0x880; |
3431 | CSR_WRITE_4(sc, BGE_SERDES_CFG, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0590) , (val))); |
3432 | } |
3433 | |
3434 | if (sc->bge_flags & BGE_PCIE0x00000020 && |
3435 | !BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000) && |
3436 | sc->bge_chipid != BGE_CHIPID_BCM5750_A00x4000 && |
3437 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57850x5785) { |
3438 | /* Enable Data FIFO protection. */ |
3439 | val = CSR_READ_4(sc, 0x7c00)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x7c00)) ); |
3440 | CSR_WRITE_4(sc, 0x7c00, val | (1<<25))((sc->bge_btag)->write_4((sc->bge_bhandle), (0x7c00) , (val | (1<<25)))); |
3441 | } |
3442 | |
3443 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720) |
3444 | BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3624) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x3624 ))) & ~(0x80000000))))) |
3445 | CPMU_CLCK_ORIDE_MAC_ORIDE_EN)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3624) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x3624 ))) & ~(0x80000000))))); |
3446 | } |
3447 | |
3448 | /* |
3449 | * Frame reception handling. This is called if there's a frame |
3450 | * on the receive return list. |
3451 | * |
3452 | * Note: we have to be able to handle two possibilities here: |
3453 | * 1) the frame is from the jumbo receive ring |
3454 | * 2) the frame is from the standard receive ring |
3455 | */ |
3456 | |
3457 | void |
3458 | bge_rxeof(struct bge_softc *sc) |
3459 | { |
3460 | struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 }; |
3461 | struct ifnet *ifp; |
3462 | uint16_t rx_prod, rx_cons; |
3463 | int stdcnt = 0, jumbocnt = 0; |
3464 | bus_dmamap_t dmamap; |
3465 | bus_addr_t offset, toff; |
3466 | bus_size_t tlen; |
3467 | int tosync; |
3468 | int livelocked; |
3469 | |
3470 | rx_cons = sc->bge_rx_saved_considx; |
3471 | rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx; |
3472 | |
3473 | /* Nothing to do */ |
3474 | if (rx_cons == rx_prod) |
3475 | return; |
3476 | |
3477 | ifp = &sc->arpcom.ac_if; |
3478 | |
3479 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 )) |
3480 | offsetof(struct bge_ring_data, bge_status_block),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 )) |
3481 | sizeof (struct bge_status_block),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 )) |
3482 | BUS_DMASYNC_POSTREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 )); |
3483 | |
3484 | offset = offsetof(struct bge_ring_data, bge_rx_return_ring)__builtin_offsetof(struct bge_ring_data, bge_rx_return_ring); |
3485 | tosync = rx_prod - rx_cons; |
3486 | |
3487 | toff = offset + (rx_cons * sizeof (struct bge_rx_bd)); |
3488 | |
3489 | if (tosync < 0) { |
3490 | tlen = (sc->bge_return_ring_cnt - rx_cons) * |
3491 | sizeof (struct bge_rx_bd); |
3492 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (toff), (tlen), (0x02)) |
3493 | toff, tlen, BUS_DMASYNC_POSTREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (toff), (tlen), (0x02)); |
3494 | tosync = -tosync; |
3495 | } |
3496 | |
3497 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (offset), (tosync * sizeof (struct bge_rx_bd )), (0x02)) |
3498 | offset, tosync * sizeof (struct bge_rx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (offset), (tosync * sizeof (struct bge_rx_bd )), (0x02)) |
3499 | BUS_DMASYNC_POSTREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (offset), (tosync * sizeof (struct bge_rx_bd )), (0x02)); |
3500 | |
3501 | while (rx_cons != rx_prod) { |
3502 | struct bge_rx_bd *cur_rx; |
3503 | u_int32_t rxidx; |
3504 | struct mbuf *m = NULL((void *)0); |
3505 | |
3506 | cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons]; |
3507 | |
3508 | rxidx = cur_rx->bge_idx; |
3509 | BGE_INC(rx_cons, sc->bge_return_ring_cnt)(rx_cons) = (rx_cons + 1) % sc->bge_return_ring_cnt; |
3510 | |
3511 | if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING0x0020) { |
3512 | m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; |
3513 | sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL((void *)0); |
3514 | |
3515 | jumbocnt++; |
3516 | |
3517 | dmamap = sc->bge_cdata.bge_rx_jumbo_map[rxidx]; |
3518 | bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmamap), (0), (dmamap->dm_mapsize), (0x02)) |
3519 | dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmamap), (0), (dmamap->dm_mapsize), (0x02)); |
3520 | bus_dmamap_unload(sc->bge_dmatag, dmamap)(*(sc->bge_dmatag)->_dmamap_unload)((sc->bge_dmatag) , (dmamap)); |
3521 | |
3522 | if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR0x0400) { |
3523 | m_freem(m); |
3524 | continue; |
3525 | } |
3526 | } else { |
3527 | m = sc->bge_cdata.bge_rx_std_chain[rxidx]; |
3528 | sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL((void *)0); |
3529 | |
3530 | stdcnt++; |
3531 | |
3532 | dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; |
3533 | bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmamap), (0), (dmamap->dm_mapsize), (0x02)) |
3534 | dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmamap), (0), (dmamap->dm_mapsize), (0x02)); |
3535 | bus_dmamap_unload(sc->bge_dmatag, dmamap)(*(sc->bge_dmatag)->_dmamap_unload)((sc->bge_dmatag) , (dmamap)); |
3536 | |
3537 | if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR0x0400) { |
3538 | m_freem(m); |
3539 | continue; |
3540 | } |
3541 | } |
3542 | |
3543 | #ifdef __STRICT_ALIGNMENT |
3544 | /* |
3545 | * The i386 allows unaligned accesses, but for other |
3546 | * platforms we must make sure the payload is aligned. |
3547 | */ |
3548 | if (sc->bge_flags & BGE_RX_ALIGNBUG0x00000008) { |
3549 | bcopy(m->m_datam_hdr.mh_data, m->m_datam_hdr.mh_data + ETHER_ALIGN2, |
3550 | cur_rx->bge_len); |
3551 | m->m_datam_hdr.mh_data += ETHER_ALIGN2; |
3552 | } |
3553 | #endif |
3554 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = cur_rx->bge_len - ETHER_CRC_LEN4; |
3555 | |
3556 | bge_rxcsum(sc, cur_rx, m); |
3557 | |
3558 | #if NVLAN1 > 0 |
3559 | if (ifp->if_capabilitiesif_data.ifi_capabilities & IFCAP_VLAN_HWTAGGING0x00000020 && |
3560 | cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG0x0040) { |
3561 | m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag = cur_rx->bge_vlan_tag; |
3562 | m->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020; |
3563 | } |
3564 | #endif |
3565 | |
3566 | ml_enqueue(&ml, m); |
3567 | } |
3568 | |
3569 | sc->bge_rx_saved_considx = rx_cons; |
3570 | bge_writembx(sc, BGE_MBX_RX_CONS0_LO0x0284, sc->bge_rx_saved_considx); |
3571 | |
3572 | livelocked = ifiq_input(&ifp->if_rcv, &ml); |
3573 | if (stdcnt) { |
3574 | if_rxr_put(&sc->bge_std_ring, stdcnt)do { (&sc->bge_std_ring)->rxr_alive -= (stdcnt); } while (0); |
3575 | if (livelocked) |
3576 | if_rxr_livelocked(&sc->bge_std_ring); |
3577 | bge_fill_rx_ring_std(sc); |
3578 | } |
3579 | if (jumbocnt) { |
3580 | if_rxr_put(&sc->bge_jumbo_ring, jumbocnt)do { (&sc->bge_jumbo_ring)->rxr_alive -= (jumbocnt) ; } while (0); |
3581 | if (livelocked) |
3582 | if_rxr_livelocked(&sc->bge_jumbo_ring); |
3583 | bge_fill_rx_ring_jumbo(sc); |
3584 | } |
3585 | } |
3586 | |
3587 | void |
3588 | bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m) |
3589 | { |
3590 | if (sc->bge_chipid == BGE_CHIPID_BCM5700_B00x7100) { |
3591 | /* |
3592 | * 5700 B0 chips do not support checksumming correctly due |
3593 | * to hardware bugs. |
3594 | */ |
3595 | return; |
3596 | } else if (BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000)) { |
3597 | if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV60x8000) == 0) { |
3598 | if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM0x1000 && |
3599 | (cur_rx->bge_error_flag & |
3600 | BGE_RXERRFLAG_IP_CSUM_NOK0x1000) == 0) |
3601 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK0x0008; |
3602 | |
3603 | if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM0x2000) { |
3604 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= |
3605 | M_TCP_CSUM_IN_OK0x0020|M_UDP_CSUM_IN_OK0x0080; |
3606 | } |
3607 | } |
3608 | } else { |
3609 | if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM0x1000 && |
3610 | cur_rx->bge_ip_csum == 0xFFFF) |
3611 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK0x0008; |
3612 | |
3613 | if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM0x2000 && |
3614 | m->m_pkthdrM_dat.MH.MH_pkthdr.len >= ETHER_MIN_NOPAD(64 - 4) && |
3615 | cur_rx->bge_tcp_udp_csum == 0xFFFF) { |
3616 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= |
3617 | M_TCP_CSUM_IN_OK0x0020|M_UDP_CSUM_IN_OK0x0080; |
3618 | } |
3619 | } |
3620 | } |
3621 | |
3622 | void |
3623 | bge_txeof(struct bge_softc *sc) |
3624 | { |
3625 | struct bge_tx_bd *cur_tx = NULL((void *)0); |
3626 | struct ifnet *ifp; |
3627 | bus_dmamap_t dmamap; |
3628 | bus_addr_t offset, toff; |
3629 | bus_size_t tlen; |
3630 | int tosync, freed, txcnt; |
3631 | u_int32_t cons, newcons; |
3632 | struct mbuf *m; |
3633 | |
3634 | /* Nothing to do */ |
3635 | cons = sc->bge_tx_saved_considx; |
3636 | newcons = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx; |
3637 | if (cons == newcons) |
3638 | return; |
3639 | |
3640 | ifp = &sc->arpcom.ac_if; |
3641 | |
3642 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 )) |
3643 | offsetof(struct bge_ring_data, bge_status_block),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 )) |
3644 | sizeof (struct bge_status_block),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 )) |
3645 | BUS_DMASYNC_POSTREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 )); |
3646 | |
3647 | offset = offsetof(struct bge_ring_data, bge_tx_ring)__builtin_offsetof(struct bge_ring_data, bge_tx_ring); |
3648 | tosync = newcons - cons; |
3649 | |
3650 | toff = offset + (cons * sizeof (struct bge_tx_bd)); |
3651 | |
3652 | if (tosync < 0) { |
3653 | tlen = (BGE_TX_RING_CNT512 - cons) * sizeof (struct bge_tx_bd); |
3654 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (toff), (tlen), (0x02|0x08)) |
3655 | toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (toff), (tlen), (0x02|0x08)); |
3656 | tosync = -tosync; |
3657 | } |
3658 | |
3659 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (offset), (tosync * sizeof (struct bge_tx_bd )), (0x02|0x08)) |
3660 | offset, tosync * sizeof (struct bge_tx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (offset), (tosync * sizeof (struct bge_tx_bd )), (0x02|0x08)) |
3661 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (offset), (tosync * sizeof (struct bge_tx_bd )), (0x02|0x08)); |
3662 | |
3663 | /* |
3664 | * Go through our tx ring and free mbufs for those |
3665 | * frames that have been sent. |
3666 | */ |
3667 | freed = 0; |
3668 | while (cons != newcons) { |
3669 | cur_tx = &sc->bge_rdata->bge_tx_ring[cons]; |
Value stored to 'cur_tx' is never read | |
3670 | m = sc->bge_cdata.bge_tx_chain[cons]; |
3671 | if (m != NULL((void *)0)) { |
3672 | dmamap = sc->bge_cdata.bge_tx_map[cons]; |
3673 | |
3674 | sc->bge_cdata.bge_tx_chain[cons] = NULL((void *)0); |
3675 | sc->bge_cdata.bge_tx_map[cons] = NULL((void *)0); |
3676 | bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmamap), (0), (dmamap->dm_mapsize), (0x08)) |
3677 | dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmamap), (0), (dmamap->dm_mapsize), (0x08)); |
3678 | bus_dmamap_unload(sc->bge_dmatag, dmamap)(*(sc->bge_dmatag)->_dmamap_unload)((sc->bge_dmatag) , (dmamap)); |
3679 | |
3680 | m_freem(m); |
3681 | } |
3682 | freed++; |
3683 | BGE_INC(cons, BGE_TX_RING_CNT)(cons) = (cons + 1) % 512; |
3684 | } |
3685 | |
3686 | txcnt = atomic_sub_int_nv(&sc->bge_txcnt, freed)_atomic_sub_int_nv(&sc->bge_txcnt, freed); |
3687 | |
3688 | sc->bge_tx_saved_considx = cons; |
3689 | |
3690 | if (ifq_is_oactive(&ifp->if_snd)) |
3691 | ifq_restart(&ifp->if_snd); |
3692 | else if (txcnt == 0) |
3693 | ifp->if_timer = 0; |
3694 | } |
3695 | |
3696 | int |
3697 | bge_intr(void *xsc) |
3698 | { |
3699 | struct bge_softc *sc; |
3700 | struct ifnet *ifp; |
3701 | u_int32_t statusword, statustag; |
3702 | |
3703 | sc = xsc; |
3704 | ifp = &sc->arpcom.ac_if; |
3705 | |
3706 | /* read status word from status block */ |
3707 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 | 0x08)) |
3708 | offsetof(struct bge_ring_data, bge_status_block),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 | 0x08)) |
3709 | sizeof (struct bge_status_block),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 | 0x08)) |
3710 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 | 0x08)); |
3711 | |
3712 | statusword = sc->bge_rdata->bge_status_block.bge_status; |
3713 | statustag = sc->bge_rdata->bge_status_block.bge_status_tag << 24; |
3714 | |
3715 | if (sc->bge_flags & BGE_TAGGED_STATUS0x00200000) { |
3716 | if (sc->bge_lasttag == statustag && |
3717 | (CSR_READ_4(sc, BGE_PCI_PCISTATE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x70))) & |
3718 | BGE_PCISTATE_INTR_NOT_ACTIVE0x00000002)) |
3719 | return (0); |
3720 | sc->bge_lasttag = statustag; |
3721 | } else { |
3722 | if (!(statusword & BGE_STATFLAG_UPDATED0x00000001) && |
3723 | (CSR_READ_4(sc, BGE_PCI_PCISTATE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x70))) & |
3724 | BGE_PCISTATE_INTR_NOT_ACTIVE0x00000002)) |
3725 | return (0); |
3726 | /* Ack interrupt and stop others from occurring. */ |
3727 | bge_writembx(sc, BGE_MBX_IRQ0_LO0x0204, 1); |
3728 | statustag = 0; |
3729 | } |
3730 | |
3731 | /* clear status word */ |
3732 | sc->bge_rdata->bge_status_block.bge_status = 0; |
3733 | |
3734 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x01 | 0x04)) |
3735 | offsetof(struct bge_ring_data, bge_status_block),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x01 | 0x04)) |
3736 | sizeof (struct bge_status_block),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x01 | 0x04)) |
3737 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x01 | 0x04)); |
3738 | |
3739 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57000x07 || |
3740 | statusword & BGE_STATFLAG_LINKSTATE_CHANGED0x00000002 || |
3741 | BGE_STS_BIT(sc, BGE_STS_LINK_EVT)((sc)->bge_sts & (0x00000002))) { |
3742 | KERNEL_LOCK()_kernel_lock(); |
3743 | bge_link_upd(sc); |
3744 | KERNEL_UNLOCK()_kernel_unlock(); |
3745 | } |
3746 | |
3747 | /* Re-enable interrupts. */ |
3748 | bge_writembx(sc, BGE_MBX_IRQ0_LO0x0204, statustag); |
3749 | |
3750 | if (ifp->if_flags & IFF_RUNNING0x40) { |
3751 | /* Check RX return ring producer/consumer */ |
3752 | bge_rxeof(sc); |
3753 | |
3754 | /* Check TX ring producer/consumer */ |
3755 | bge_txeof(sc); |
3756 | } |
3757 | |
3758 | return (1); |
3759 | } |
3760 | |
3761 | void |
3762 | bge_tick(void *xsc) |
3763 | { |
3764 | struct bge_softc *sc = xsc; |
3765 | struct mii_data *mii = &sc->bge_mii; |
3766 | int s; |
3767 | |
3768 | s = splnet()splraise(0x7); |
3769 | |
3770 | if (BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000)) |
3771 | bge_stats_update_regs(sc); |
3772 | else |
3773 | bge_stats_update(sc); |
3774 | |
3775 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200) { |
3776 | /* |
3777 | * Since in TBI mode auto-polling can't be used we should poll |
3778 | * link status manually. Here we register pending link event |
3779 | * and trigger interrupt. |
3780 | */ |
3781 | BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT)((sc)->bge_sts |= (0x00000002)); |
3782 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6808) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6808 ))) | (0x00000004))))); |
3783 | } else { |
3784 | /* |
3785 | * Do not touch PHY if we have link up. This could break |
3786 | * IPMI/ASF mode or produce extra input errors. |
3787 | * (extra input errors was reported for bcm5701 & bcm5704). |
3788 | */ |
3789 | if (!BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001))) |
3790 | mii_tick(mii); |
3791 | } |
3792 | |
3793 | timeout_add_sec(&sc->bge_timeout, 1); |
3794 | |
3795 | splx(s)spllower(s); |
3796 | } |
3797 | |
3798 | void |
3799 | bge_stats_update_regs(struct bge_softc *sc) |
3800 | { |
3801 | struct ifnet *ifp = &sc->arpcom.ac_if; |
3802 | |
3803 | sc->bge_tx_collisions += CSR_READ_4(sc, BGE_MAC_STATS +((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, etherStatsCollisions )))) |
3804 | offsetof(struct bge_mac_stats_regs, etherStatsCollisions))((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, etherStatsCollisions )))); |
3805 | |
3806 | sc->bge_rx_overruns += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x224C)) ); |
3807 | |
3808 | /* |
3809 | * XXX |
3810 | * Unlike other controllers, the BGE_RXLP_LOCSTAT_IFIN_DROPS counter |
3811 | * of the BCM5717, BCM5718, BCM5762, BCM5719 A0 and BCM5720 A0 |
3812 | * controllers includes the number of unwanted multicast frames. |
3813 | * This comes from a silicon bug and known workaround to get rough |
3814 | * (not exact) counter is to enable interrupt on MBUF low watermark |
3815 | * attention. This can be accomplished by setting BGE_HCCMODE_ATTN |
3816 | * bit of BGE_HDD_MODE, BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE |
3817 | * and BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL. However |
3818 | * that change would generate more interrupts and there are still |
3819 | * possibilities of losing multiple frames during |
3820 | * BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling. Given that |
3821 | * the workaround still would not get correct counter I don't think |
3822 | * it's worth to implement it. So ignore reading the counter on |
3823 | * controllers that have the silicon bug. |
3824 | */ |
3825 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57170x5717 && |
3826 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57620x5762 && |
3827 | sc->bge_chipid != BGE_CHIPID_BCM5719_A00x05719000 && |
3828 | sc->bge_chipid != BGE_CHIPID_BCM5720_A00x05720000) |
3829 | sc->bge_rx_discards += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x2250)) ); |
3830 | |
3831 | sc->bge_rx_inerrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x2254)) ); |
3832 | |
3833 | ifp->if_collisionsif_data.ifi_collisions = sc->bge_tx_collisions; |
3834 | ifp->if_ierrorsif_data.ifi_ierrors = sc->bge_rx_discards + sc->bge_rx_inerrors; |
3835 | |
3836 | if (sc->bge_flags & BGE_RDMA_BUG0x00800000) { |
3837 | u_int32_t val, ucast, mcast, bcast; |
3838 | |
3839 | ucast = CSR_READ_4(sc, BGE_MAC_STATS +((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts )))) |
3840 | offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts))((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts )))); |
3841 | mcast = CSR_READ_4(sc, BGE_MAC_STATS +((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts )))) |
3842 | offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts))((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts )))); |
3843 | bcast = CSR_READ_4(sc, BGE_MAC_STATS +((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts )))) |
3844 | offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts))((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts )))); |
3845 | |
3846 | /* |
3847 | * If controller transmitted more than BGE_NUM_RDMA_CHANNELS |
3848 | * frames, it's safe to disable workaround for DMA engine's |
3849 | * miscalculation of TXMBUF space. |
3850 | */ |
3851 | if (ucast + mcast + bcast > BGE_NUM_RDMA_CHANNELS4) { |
3852 | val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910)) ); |
3853 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719) |
3854 | val &= ~BGE_RDMA_TX_LENGTH_WA_57190x02000000; |
3855 | else |
3856 | val &= ~BGE_RDMA_TX_LENGTH_WA_57200x00200000; |
3857 | CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (val))); |
3858 | sc->bge_flags &= ~BGE_RDMA_BUG0x00800000; |
3859 | } |
3860 | } |
3861 | } |
3862 | |
3863 | void |
3864 | bge_stats_update(struct bge_softc *sc) |
3865 | { |
3866 | struct ifnet *ifp = &sc->arpcom.ac_if; |
3867 | bus_size_t stats = BGE_MEMWIN_START0x00008000 + BGE_STATS_BLOCK0x00000300; |
3868 | u_int32_t cnt; |
3869 | |
3870 | #define READ_STAT(sc, stats, stat) \ |
3871 | CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))((sc->bge_btag)->read_4((sc->bge_bhandle), (stats + __builtin_offsetof (struct bge_stats, stat)))) |
3872 | |
3873 | cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo); |
3874 | ifp->if_collisionsif_data.ifi_collisions += (u_int32_t)(cnt - sc->bge_tx_collisions); |
3875 | sc->bge_tx_collisions = cnt; |
3876 | |
3877 | cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo); |
3878 | sc->bge_rx_overruns = cnt; |
3879 | cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo); |
3880 | ifp->if_ierrorsif_data.ifi_ierrors += (uint32_t)(cnt - sc->bge_rx_inerrors); |
3881 | sc->bge_rx_inerrors = cnt; |
3882 | cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); |
3883 | ifp->if_ierrorsif_data.ifi_ierrors += (u_int32_t)(cnt - sc->bge_rx_discards); |
3884 | sc->bge_rx_discards = cnt; |
3885 | |
3886 | cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); |
3887 | ifp->if_oerrorsif_data.ifi_oerrors += (u_int32_t)(cnt - sc->bge_tx_discards); |
3888 | sc->bge_tx_discards = cnt; |
3889 | |
3890 | #undef READ_STAT |
3891 | } |
3892 | |
3893 | /* |
3894 | * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. |
3895 | */ |
3896 | int |
3897 | bge_compact_dma_runt(struct mbuf *pkt) |
3898 | { |
3899 | struct mbuf *m, *prev, *n = NULL((void *)0); |
3900 | int totlen, newprevlen; |
3901 | |
3902 | prev = NULL((void *)0); |
3903 | totlen = 0; |
3904 | |
3905 | for (m = pkt; m != NULL((void *)0); prev = m,m = m->m_nextm_hdr.mh_next) { |
3906 | int mlen = m->m_lenm_hdr.mh_len; |
3907 | int shortfall = 8 - mlen ; |
3908 | |
3909 | totlen += mlen; |
3910 | if (mlen == 0) |
3911 | continue; |
3912 | if (mlen >= 8) |
3913 | continue; |
3914 | |
3915 | /* If we get here, mbuf data is too small for DMA engine. |
3916 | * Try to fix by shuffling data to prev or next in chain. |
3917 | * If that fails, do a compacting deep-copy of the whole chain. |
3918 | */ |
3919 | |
3920 | /* Internal frag. If fits in prev, copy it there. */ |
3921 | if (prev && m_trailingspace(prev) >= m->m_lenm_hdr.mh_len) { |
3922 | bcopy(m->m_datam_hdr.mh_data, prev->m_datam_hdr.mh_data+prev->m_lenm_hdr.mh_len, mlen); |
3923 | prev->m_lenm_hdr.mh_len += mlen; |
3924 | m->m_lenm_hdr.mh_len = 0; |
3925 | /* XXX stitch chain */ |
3926 | prev->m_nextm_hdr.mh_next = m_free(m); |
3927 | m = prev; |
3928 | continue; |
3929 | } else if (m->m_nextm_hdr.mh_next != NULL((void *)0) && |
3930 | m_trailingspace(m) >= shortfall && |
3931 | m->m_nextm_hdr.mh_next->m_lenm_hdr.mh_len >= (8 + shortfall)) { |
3932 | /* m is writable and have enough data in next, pull up. */ |
3933 | |
3934 | bcopy(m->m_nextm_hdr.mh_next->m_datam_hdr.mh_data, m->m_datam_hdr.mh_data+m->m_lenm_hdr.mh_len, shortfall); |
3935 | m->m_lenm_hdr.mh_len += shortfall; |
3936 | m->m_nextm_hdr.mh_next->m_lenm_hdr.mh_len -= shortfall; |
3937 | m->m_nextm_hdr.mh_next->m_datam_hdr.mh_data += shortfall; |
3938 | } else if (m->m_nextm_hdr.mh_next == NULL((void *)0) || 1) { |
3939 | /* Got a runt at the very end of the packet. |
3940 | * borrow data from the tail of the preceding mbuf and |
3941 | * update its length in-place. (The original data is still |
3942 | * valid, so we can do this even if prev is not writable.) |
3943 | */ |
3944 | |
3945 | /* if we'd make prev a runt, just move all of its data. */ |
3946 | #ifdef DEBUG |
3947 | KASSERT(prev != NULL /*, ("runt but null PREV")*/)((prev != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_bge.c" , 3947, "prev != NULL")); |
3948 | KASSERT(prev->m_len >= 8 /*, ("runt prev")*/)((prev->m_hdr.mh_len >= 8) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/pci/if_bge.c", 3948, "prev->m_len >= 8" )); |
3949 | #endif |
3950 | if ((prev->m_lenm_hdr.mh_len - shortfall) < 8) |
3951 | shortfall = prev->m_lenm_hdr.mh_len; |
3952 | |
3953 | newprevlen = prev->m_lenm_hdr.mh_len - shortfall; |
3954 | |
3955 | MGET(n, M_NOWAIT, MT_DATA)n = m_get((0x0002), (1)); |
3956 | if (n == NULL((void *)0)) |
3957 | return (ENOBUFS55); |
3958 | KASSERT(m->m_len + shortfall < MLEN((m->m_hdr.mh_len + shortfall < (256 - sizeof(struct m_hdr ))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_bge.c" , 3960, "m->m_len + shortfall < MLEN")) |
3959 | /*,((m->m_hdr.mh_len + shortfall < (256 - sizeof(struct m_hdr ))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_bge.c" , 3960, "m->m_len + shortfall < MLEN")) |
3960 | ("runt %d +prev %d too big\n", m->m_len, shortfall)*/)((m->m_hdr.mh_len + shortfall < (256 - sizeof(struct m_hdr ))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_bge.c" , 3960, "m->m_len + shortfall < MLEN")); |
3961 | |
3962 | /* first copy the data we're stealing from prev */ |
3963 | bcopy(prev->m_datam_hdr.mh_data + newprevlen, n->m_datam_hdr.mh_data, shortfall); |
3964 | |
3965 | /* update prev->m_len accordingly */ |
3966 | prev->m_lenm_hdr.mh_len -= shortfall; |
3967 | |
3968 | /* copy data from runt m */ |
3969 | bcopy(m->m_datam_hdr.mh_data, n->m_datam_hdr.mh_data + shortfall, m->m_lenm_hdr.mh_len); |
3970 | |
3971 | /* n holds what we stole from prev, plus m */ |
3972 | n->m_lenm_hdr.mh_len = shortfall + m->m_lenm_hdr.mh_len; |
3973 | |
3974 | /* stitch n into chain and free m */ |
3975 | n->m_nextm_hdr.mh_next = m->m_nextm_hdr.mh_next; |
3976 | prev->m_nextm_hdr.mh_next = n; |
3977 | /* KASSERT(m->m_next == NULL); */ |
3978 | m->m_nextm_hdr.mh_next = NULL((void *)0); |
3979 | m_free(m); |
3980 | m = n; /* for continuing loop */ |
3981 | } |
3982 | } |
3983 | return (0); |
3984 | } |
3985 | |
3986 | /* |
3987 | * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. |
3988 | * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, |
3989 | * but when such padded frames employ the bge IP/TCP checksum offload, |
3990 | * the hardware checksum assist gives incorrect results (possibly |
3991 | * from incorporating its own padding into the UDP/TCP checksum; who knows). |
3992 | * If we pad such runts with zeros, the onboard checksum comes out correct. |
3993 | */ |
3994 | int |
3995 | bge_cksum_pad(struct mbuf *m) |
3996 | { |
3997 | int padlen = ETHER_MIN_NOPAD(64 - 4) - m->m_pkthdrM_dat.MH.MH_pkthdr.len; |
3998 | struct mbuf *last; |
3999 | |
4000 | /* If there's only the packet-header and we can pad there, use it. */ |
4001 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.len == m->m_lenm_hdr.mh_len && m_trailingspace(m) >= padlen) { |
4002 | last = m; |
4003 | } else { |
4004 | /* |
4005 | * Walk packet chain to find last mbuf. We will either |
4006 | * pad there, or append a new mbuf and pad it. |
4007 | */ |
4008 | for (last = m; last->m_nextm_hdr.mh_next != NULL((void *)0); last = last->m_nextm_hdr.mh_next); |
4009 | if (m_trailingspace(last) < padlen) { |
4010 | /* Allocate new empty mbuf, pad it. Compact later. */ |
4011 | struct mbuf *n; |
4012 | |
4013 | MGET(n, M_DONTWAIT, MT_DATA)n = m_get((0x0002), (1)); |
4014 | if (n == NULL((void *)0)) |
4015 | return (ENOBUFS55); |
4016 | n->m_lenm_hdr.mh_len = 0; |
4017 | last->m_nextm_hdr.mh_next = n; |
4018 | last = n; |
4019 | } |
4020 | } |
4021 | |
4022 | /* Now zero the pad area, to avoid the bge cksum-assist bug. */ |
4023 | memset(mtod(last, caddr_t) + last->m_len, 0, padlen)__builtin_memset((((caddr_t)((last)->m_hdr.mh_data)) + last ->m_hdr.mh_len), (0), (padlen)); |
4024 | last->m_lenm_hdr.mh_len += padlen; |
4025 | m->m_pkthdrM_dat.MH.MH_pkthdr.len += padlen; |
4026 | |
4027 | return (0); |
4028 | } |
4029 | |
4030 | /* |
4031 | * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data |
4032 | * pointers to descriptors. |
4033 | */ |
4034 | int |
4035 | bge_encap(struct bge_softc *sc, struct mbuf *m, int *txinc) |
4036 | { |
4037 | struct bge_tx_bd *f = NULL((void *)0); |
4038 | u_int32_t frag, cur; |
4039 | u_int16_t csum_flags = 0; |
4040 | bus_dmamap_t dmamap; |
4041 | int i = 0; |
4042 | |
4043 | cur = frag = (sc->bge_tx_prodidx + *txinc) % BGE_TX_RING_CNT512; |
4044 | |
4045 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags) { |
4046 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_IPV4_CSUM_OUT0x0001) |
4047 | csum_flags |= BGE_TXBDFLAG_IP_CSUM0x0002; |
4048 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & |
4049 | (M_TCP_CSUM_OUT0x0002 | M_UDP_CSUM_OUT0x0004)) { |
4050 | csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM0x0001; |
4051 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < ETHER_MIN_NOPAD(64 - 4) && |
4052 | bge_cksum_pad(m) != 0) |
4053 | return (ENOBUFS55); |
4054 | } |
4055 | } |
4056 | |
4057 | if (sc->bge_flags & BGE_JUMBO_FRAME0x04000000 && |
4058 | m->m_pkthdrM_dat.MH.MH_pkthdr.len > ETHER_MAX_LEN1518) |
4059 | csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME0x0008; |
4060 | |
4061 | if (!(BGE_CHIPREV(sc->bge_chipid)((sc->bge_chipid) >> 8) == BGE_CHIPREV_5700_BX0x71)) |
4062 | goto doit; |
4063 | |
4064 | /* |
4065 | * bcm5700 Revision B silicon cannot handle DMA descriptors with |
4066 | * less than eight bytes. If we encounter a teeny mbuf |
4067 | * at the end of a chain, we can pad. Otherwise, copy. |
4068 | */ |
4069 | if (bge_compact_dma_runt(m) != 0) |
4070 | return (ENOBUFS55); |
4071 | |
4072 | doit: |
4073 | dmamap = sc->bge_txdma[cur]; |
4074 | |
4075 | /* |
4076 | * Start packing the mbufs in this chain into |
4077 | * the fragment pointers. Stop when we run out |
4078 | * of fragments or hit the end of the mbuf chain. |
4079 | */ |
4080 | switch (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m,(*(sc->bge_dmatag)->_dmamap_load_mbuf)((sc->bge_dmatag ), (dmamap), (m), (0x0001)) |
4081 | BUS_DMA_NOWAIT)(*(sc->bge_dmatag)->_dmamap_load_mbuf)((sc->bge_dmatag ), (dmamap), (m), (0x0001))) { |
4082 | case 0: |
4083 | break; |
4084 | case EFBIG27: |
4085 | if (m_defrag(m, M_DONTWAIT0x0002) == 0 && |
4086 | bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m,(*(sc->bge_dmatag)->_dmamap_load_mbuf)((sc->bge_dmatag ), (dmamap), (m), (0x0001)) |
4087 | BUS_DMA_NOWAIT)(*(sc->bge_dmatag)->_dmamap_load_mbuf)((sc->bge_dmatag ), (dmamap), (m), (0x0001)) == 0) |
4088 | break; |
4089 | |
4090 | /* FALLTHROUGH */ |
4091 | default: |
4092 | return (ENOBUFS55); |
4093 | } |
4094 | |
4095 | for (i = 0; i < dmamap->dm_nsegs; i++) { |
4096 | f = &sc->bge_rdata->bge_tx_ring[frag]; |
4097 | if (sc->bge_cdata.bge_tx_chain[frag] != NULL((void *)0)) |
4098 | break; |
4099 | BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr)do { (f->bge_addr).bge_addr_lo = ((u_int64_t) (dmamap-> dm_segs[i].ds_addr) & 0xffffffff); if (sizeof(bus_addr_t) == 8) (f->bge_addr).bge_addr_hi = ((u_int64_t) (dmamap-> dm_segs[i].ds_addr) >> 32); else (f->bge_addr).bge_addr_hi = 0; } while(0); |
4100 | f->bge_len = dmamap->dm_segs[i].ds_len; |
4101 | f->bge_flags = csum_flags; |
4102 | f->bge_vlan_tag = 0; |
4103 | #if NVLAN1 > 0 |
4104 | if (m->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020) { |
4105 | f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG0x0040; |
4106 | f->bge_vlan_tag = m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag; |
4107 | } |
4108 | #endif |
4109 | cur = frag; |
4110 | BGE_INC(frag, BGE_TX_RING_CNT)(frag) = (frag + 1) % 512; |
4111 | } |
4112 | |
4113 | if (i < dmamap->dm_nsegs) |
4114 | goto fail_unload; |
4115 | |
4116 | if (frag == sc->bge_tx_saved_considx) |
4117 | goto fail_unload; |
4118 | |
4119 | bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmamap), (0), (dmamap->dm_mapsize), (0x04)) |
4120 | BUS_DMASYNC_PREWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmamap), (0), (dmamap->dm_mapsize), (0x04)); |
4121 | |
4122 | sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END0x0004; |
4123 | sc->bge_cdata.bge_tx_chain[cur] = m; |
4124 | sc->bge_cdata.bge_tx_map[cur] = dmamap; |
4125 | |
4126 | *txinc += dmamap->dm_nsegs; |
4127 | |
4128 | return (0); |
4129 | |
4130 | fail_unload: |
4131 | bus_dmamap_unload(sc->bge_dmatag, dmamap)(*(sc->bge_dmatag)->_dmamap_unload)((sc->bge_dmatag) , (dmamap)); |
4132 | |
4133 | return (ENOBUFS55); |
4134 | } |
4135 | |
4136 | /* |
4137 | * Main transmit routine. To avoid having to do mbuf copies, we put pointers |
4138 | * to the mbuf data regions directly in the transmit descriptors. |
4139 | */ |
4140 | void |
4141 | bge_start(struct ifqueue *ifq) |
4142 | { |
4143 | struct ifnet *ifp = ifq->ifq_if; |
4144 | struct bge_softc *sc = ifp->if_softc; |
4145 | struct mbuf *m; |
4146 | int txinc; |
4147 | |
4148 | if (!BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001))) { |
4149 | ifq_purge(ifq); |
4150 | return; |
4151 | } |
4152 | |
4153 | txinc = 0; |
4154 | while (1) { |
4155 | /* Check if we have enough free send BDs. */ |
4156 | if (sc->bge_txcnt + txinc + BGE_NTXSEG30 + 16 >= |
4157 | BGE_TX_RING_CNT512) { |
4158 | ifq_set_oactive(ifq); |
4159 | break; |
4160 | } |
4161 | |
4162 | m = ifq_dequeue(ifq); |
4163 | if (m == NULL((void *)0)) |
4164 | break; |
4165 | |
4166 | if (bge_encap(sc, m, &txinc) != 0) { |
4167 | m_freem(m); |
4168 | continue; |
4169 | } |
4170 | |
4171 | #if NBPFILTER1 > 0 |
4172 | if (ifp->if_bpf) |
4173 | bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1)); |
4174 | #endif |
4175 | } |
4176 | |
4177 | if (txinc != 0) { |
4178 | /* Transmit */ |
4179 | sc->bge_tx_prodidx = (sc->bge_tx_prodidx + txinc) % |
4180 | BGE_TX_RING_CNT512; |
4181 | bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO0x0304, sc->bge_tx_prodidx); |
4182 | if (BGE_CHIPREV(sc->bge_chipid)((sc->bge_chipid) >> 8) == BGE_CHIPREV_5700_BX0x71) |
4183 | bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO0x0304, |
4184 | sc->bge_tx_prodidx); |
4185 | |
4186 | atomic_add_int(&sc->bge_txcnt, txinc)_atomic_add_int(&sc->bge_txcnt, txinc); |
4187 | |
4188 | /* |
4189 | * Set a timeout in case the chip goes out to lunch. |
4190 | */ |
4191 | ifp->if_timer = 5; |
4192 | } |
4193 | } |
4194 | |
4195 | void |
4196 | bge_init(void *xsc) |
4197 | { |
4198 | struct bge_softc *sc = xsc; |
4199 | struct ifnet *ifp; |
4200 | u_int16_t *m; |
4201 | u_int32_t mode; |
4202 | int s; |
4203 | |
4204 | s = splnet()splraise(0x7); |
4205 | |
4206 | ifp = &sc->arpcom.ac_if; |
4207 | |
4208 | /* Cancel pending I/O and flush buffers. */ |
4209 | bge_stop(sc, 0); |
4210 | bge_sig_pre_reset(sc, BGE_RESET_START1); |
4211 | bge_reset(sc); |
4212 | bge_sig_legacy(sc, BGE_RESET_START1); |
4213 | bge_sig_post_reset(sc, BGE_RESET_START1); |
4214 | |
4215 | bge_chipinit(sc); |
4216 | |
4217 | /* |
4218 | * Init the various state machines, ring |
4219 | * control blocks and firmware. |
4220 | */ |
4221 | if (bge_blockinit(sc)) { |
4222 | printf("%s: initialization failure\n", sc->bge_dev.dv_xname); |
4223 | splx(s)spllower(s); |
4224 | return; |
4225 | } |
4226 | |
4227 | /* Specify MRU. */ |
4228 | if (BGE_IS_JUMBO_CAPABLE(sc)((sc)->bge_flags & 0x00000100)) |
4229 | CSR_WRITE_4(sc, BGE_RX_MTU,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x043C) , (9022 + 4))) |
4230 | BGE_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x043C) , (9022 + 4))); |
4231 | else |
4232 | CSR_WRITE_4(sc, BGE_RX_MTU,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x043C) , (1518 + 4))) |
4233 | ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x043C) , (1518 + 4))); |
4234 | |
4235 | /* Load our MAC address. */ |
4236 | m = (u_int16_t *)&sc->arpcom.ac_enaddr[0]; |
4237 | CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]))((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0410) , ((__uint16_t)(__builtin_constant_p(m[0]) ? (__uint16_t)(((__uint16_t )(m[0]) & 0xffU) << 8 | ((__uint16_t)(m[0]) & 0xff00U ) >> 8) : __swap16md(m[0]))))); |
4238 | CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]))((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0414) , (((__uint16_t)(__builtin_constant_p(m[1]) ? (__uint16_t)((( __uint16_t)(m[1]) & 0xffU) << 8 | ((__uint16_t)(m[1 ]) & 0xff00U) >> 8) : __swap16md(m[1])) << 16 ) | (__uint16_t)(__builtin_constant_p(m[2]) ? (__uint16_t)((( __uint16_t)(m[2]) & 0xffU) << 8 | ((__uint16_t)(m[2 ]) & 0xff00U) >> 8) : __swap16md(m[2]))))); |
4239 | |
4240 | if (!(ifp->if_capabilitiesif_data.ifi_capabilities & IFCAP_VLAN_HWTAGGING0x00000020)) { |
4241 | /* Disable hardware decapsulation of VLAN frames. */ |
4242 | BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0468) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0468 ))) | (0x00000400))))); |
4243 | } |
4244 | |
4245 | /* Program promiscuous mode and multicast filters. */ |
4246 | bge_iff(sc); |
4247 | |
4248 | /* Init RX ring. */ |
4249 | bge_init_rx_ring_std(sc); |
4250 | |
4251 | /* |
4252 | * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's |
4253 | * memory to ensure that the chip has in fact read the first |
4254 | * entry of the ring. |
4255 | */ |
4256 | if (sc->bge_chipid == BGE_CHIPID_BCM5705_A00x3000) { |
4257 | u_int32_t v, i; |
4258 | for (i = 0; i < 10; i++) { |
4259 | DELAY(20)(*delay_func)(20); |
4260 | v = bge_readmem_ind(sc, BGE_STD_RX_RINGS0x00006000 + 8); |
4261 | if (v == (MCLBYTES(1 << 11) - ETHER_ALIGN2)) |
4262 | break; |
4263 | } |
4264 | if (i == 10) |
4265 | printf("%s: 5705 A0 chip failed to load RX ring\n", |
4266 | sc->bge_dev.dv_xname); |
4267 | } |
4268 | |
4269 | /* Init Jumbo RX ring. */ |
4270 | if (sc->bge_flags & BGE_JUMBO_RING0x01000000) |
4271 | bge_init_rx_ring_jumbo(sc); |
4272 | |
4273 | /* Init our RX return ring index */ |
4274 | sc->bge_rx_saved_considx = 0; |
4275 | |
4276 | /* Init our RX/TX stat counters. */ |
4277 | sc->bge_tx_collisions = 0; |
4278 | sc->bge_rx_discards = 0; |
4279 | sc->bge_rx_inerrors = 0; |
4280 | sc->bge_rx_overruns = 0; |
4281 | sc->bge_tx_discards = 0; |
4282 | |
4283 | /* Init TX ring. */ |
4284 | bge_init_tx_ring(sc); |
4285 | |
4286 | /* Enable TX MAC state machine lockup fix. */ |
4287 | mode = CSR_READ_4(sc, BGE_TX_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x045C)) ); |
4288 | if (BGE_IS_5755_PLUS(sc)((sc)->bge_flags & 0x00004000) || |
4289 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) |
4290 | mode |= BGE_TXMODE_MBUF_LOCKUP_FIX0x00000100; |
4291 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720 || |
4292 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762) { |
4293 | mode &= ~(BGE_TXMODE_JMB_FRM_LEN0x00400000 | BGE_TXMODE_CNT_DN_MODE0x00800000); |
4294 | mode |= CSR_READ_4(sc, BGE_TX_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x045C)) ) & |
4295 | (BGE_TXMODE_JMB_FRM_LEN0x00400000 | BGE_TXMODE_CNT_DN_MODE0x00800000); |
4296 | } |
4297 | |
4298 | /* Turn on transmitter */ |
4299 | CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x045C) , (mode | 0x00000002))); |
4300 | DELAY(100)(*delay_func)(100); |
4301 | |
4302 | mode = CSR_READ_4(sc, BGE_RX_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0468)) ); |
4303 | if (BGE_IS_5755_PLUS(sc)((sc)->bge_flags & 0x00004000)) |
4304 | mode |= BGE_RXMODE_IPV6_ENABLE0x01000000; |
4305 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762) |
4306 | mode |= BGE_RXMODE_IPV4_FRAG_FIX0x02000000; |
4307 | |
4308 | /* Turn on receiver */ |
4309 | CSR_WRITE_4(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0468) , (mode | 0x00000002))); |
4310 | DELAY(10)(*delay_func)(10); |
4311 | |
4312 | /* |
4313 | * Set the number of good frames to receive after RX MBUF |
4314 | * Low Watermark has been reached. After the RX MAC receives |
4315 | * this number of frames, it will drop subsequent incoming |
4316 | * frames until the MBUF High Watermark is reached. |
4317 | */ |
4318 | if (BGE_IS_57765_PLUS(sc)((sc)->bge_flags & 0x00040000)) |
4319 | CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0504) , (1))); |
4320 | else |
4321 | CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0504) , (2))); |
4322 | |
4323 | /* Tell firmware we're alive. */ |
4324 | BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6800) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6800 ))) | (0x00010000))))); |
4325 | |
4326 | /* Enable host interrupts. */ |
4327 | BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x68), ( (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x68))) | (0x00000001))))); |
4328 | BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x68), ( (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x68))) & ~(0x00000002))))); |
4329 | bge_writembx(sc, BGE_MBX_IRQ0_LO0x0204, 0); |
4330 | |
4331 | bge_ifmedia_upd(ifp); |
4332 | |
4333 | ifp->if_flags |= IFF_RUNNING0x40; |
4334 | ifq_clr_oactive(&ifp->if_snd); |
4335 | |
4336 | splx(s)spllower(s); |
4337 | |
4338 | timeout_add_sec(&sc->bge_timeout, 1); |
4339 | } |
4340 | |
4341 | /* |
4342 | * Set media options. |
4343 | */ |
4344 | int |
4345 | bge_ifmedia_upd(struct ifnet *ifp) |
4346 | { |
4347 | struct bge_softc *sc = ifp->if_softc; |
4348 | struct mii_data *mii = &sc->bge_mii; |
4349 | struct ifmedia *ifm = &sc->bge_ifmedia; |
4350 | |
4351 | /* If this is a 1000baseX NIC, enable the TBI port. */ |
4352 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200) { |
4353 | if (IFM_TYPE(ifm->ifm_media)((ifm->ifm_media) & 0x000000000000ff00ULL) != IFM_ETHER0x0000000000000100ULL) |
4354 | return (EINVAL22); |
4355 | switch(IFM_SUBTYPE(ifm->ifm_media)((ifm->ifm_media) & 0x00000000000000ffULL)) { |
4356 | case IFM_AUTO0ULL: |
4357 | /* |
4358 | * The BCM5704 ASIC appears to have a special |
4359 | * mechanism for programming the autoneg |
4360 | * advertisement registers in TBI mode. |
4361 | */ |
4362 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57040x02) { |
4363 | u_int32_t sgdig; |
4364 | sgdig = CSR_READ_4(sc, BGE_SGDIG_STS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x05B4)) ); |
4365 | if (sgdig & BGE_SGDIGSTS_DONE0x00000002) { |
4366 | CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0444) , (0))); |
4367 | sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x05B0)) ); |
4368 | sgdig |= BGE_SGDIGCFG_AUTO0x80000000 | |
4369 | BGE_SGDIGCFG_PAUSE_CAP0x00000800 | |
4370 | BGE_SGDIGCFG_ASYM_PAUSE0x00001000; |
4371 | CSR_WRITE_4(sc, BGE_SGDIG_CFG,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x05B0) , (sgdig | 0x40000000))) |
4372 | sgdig | BGE_SGDIGCFG_SEND)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x05B0) , (sgdig | 0x40000000))); |
4373 | DELAY(5)(*delay_func)(5); |
4374 | CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x05B0) , (sgdig))); |
4375 | } |
4376 | } |
4377 | break; |
4378 | case IFM_1000_SX11: |
4379 | if ((ifm->ifm_media & IFM_GMASK0x00ffff0000000000ULL) == IFM_FDX0x0000010000000000ULL) { |
4380 | BGE_CLRBIT(sc, BGE_MAC_MODE,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0400) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400 ))) & ~(0x00000002))))) |
4381 | BGE_MACMODE_HALF_DUPLEX)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0400) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400 ))) & ~(0x00000002))))); |
4382 | } else { |
4383 | BGE_SETBIT(sc, BGE_MAC_MODE,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0400) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400 ))) | (0x00000002))))) |
4384 | BGE_MACMODE_HALF_DUPLEX)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0400) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400 ))) | (0x00000002))))); |
4385 | } |
4386 | DELAY(40)(*delay_func)(40); |
4387 | break; |
4388 | default: |
4389 | return (EINVAL22); |
4390 | } |
4391 | /* XXX 802.3x flow control for 1000BASE-SX */ |
4392 | return (0); |
4393 | } |
4394 | |
4395 | BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT)((sc)->bge_sts |= (0x00000002)); |
4396 | if (mii->mii_instance) { |
4397 | struct mii_softc *miisc; |
4398 | LIST_FOREACH(miisc, &mii->mii_phys, mii_list)for((miisc) = ((&mii->mii_phys)->lh_first); (miisc) != ((void *)0); (miisc) = ((miisc)->mii_list.le_next)) |
4399 | mii_phy_reset(miisc); |
4400 | } |
4401 | mii_mediachg(mii); |
4402 | |
4403 | /* |
4404 | * Force an interrupt so that we will call bge_link_upd |
4405 | * if needed and clear any pending link state attention. |
4406 | * Without this we are not getting any further interrupts |
4407 | * for link state changes and thus will not UP the link and |
4408 | * not be able to send in bge_start. The only way to get |
4409 | * things working was to receive a packet and get a RX intr. |
4410 | */ |
4411 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57000x07 || |
4412 | sc->bge_flags & BGE_IS_57880x00000800) |
4413 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6808) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6808 ))) | (0x00000004))))); |
4414 | else |
4415 | BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C00) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x3C00 ))) | (0x00000008))))); |
4416 | |
4417 | return (0); |
4418 | } |
4419 | |
4420 | /* |
4421 | * Report current media status. |
4422 | */ |
4423 | void |
4424 | bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) |
4425 | { |
4426 | struct bge_softc *sc = ifp->if_softc; |
4427 | struct mii_data *mii = &sc->bge_mii; |
4428 | |
4429 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200) { |
4430 | ifmr->ifm_status = IFM_AVALID0x0000000000000001ULL; |
4431 | ifmr->ifm_active = IFM_ETHER0x0000000000000100ULL; |
4432 | if (CSR_READ_4(sc, BGE_MAC_STS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0404)) ) & |
4433 | BGE_MACSTAT_TBI_PCS_SYNCHED0x00000001) { |
4434 | ifmr->ifm_status |= IFM_ACTIVE0x0000000000000002ULL; |
4435 | } else { |
4436 | ifmr->ifm_active |= IFM_NONE2ULL; |
4437 | return; |
4438 | } |
4439 | ifmr->ifm_active |= IFM_1000_SX11; |
4440 | if (CSR_READ_4(sc, BGE_MAC_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400)) ) & BGE_MACMODE_HALF_DUPLEX0x00000002) |
4441 | ifmr->ifm_active |= IFM_HDX0x0000020000000000ULL; |
4442 | else |
4443 | ifmr->ifm_active |= IFM_FDX0x0000010000000000ULL; |
4444 | return; |
4445 | } |
4446 | |
4447 | mii_pollstat(mii); |
4448 | ifmr->ifm_status = mii->mii_media_status; |
4449 | ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK(0x0000040000000000ULL|0x0000000000020000ULL|0x0000000000040000ULL )) | |
4450 | sc->bge_flowflags; |
4451 | } |
4452 | |
4453 | int |
4454 | bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) |
4455 | { |
4456 | struct bge_softc *sc = ifp->if_softc; |
4457 | struct ifreq *ifr = (struct ifreq *) data; |
4458 | int s, error = 0; |
4459 | struct mii_data *mii; |
4460 | |
4461 | s = splnet()splraise(0x7); |
4462 | |
4463 | switch(command) { |
4464 | case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((12))): |
4465 | ifp->if_flags |= IFF_UP0x1; |
4466 | if (!(ifp->if_flags & IFF_RUNNING0x40)) |
4467 | bge_init(sc); |
4468 | break; |
4469 | |
4470 | case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))): |
4471 | if (ifp->if_flags & IFF_UP0x1) { |
4472 | if (ifp->if_flags & IFF_RUNNING0x40) |
4473 | error = ENETRESET52; |
4474 | else |
4475 | bge_init(sc); |
4476 | } else { |
4477 | if (ifp->if_flags & IFF_RUNNING0x40) |
4478 | bge_stop(sc, 0); |
4479 | } |
4480 | break; |
4481 | |
4482 | case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((55))): |
4483 | /* XXX Flow control is not supported for 1000BASE-SX */ |
4484 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200) { |
4485 | ifr->ifr_mediaifr_ifru.ifru_media &= ~IFM_ETH_FMASK(0x0000040000000000ULL|0x0000000000020000ULL|0x0000000000040000ULL ); |
4486 | sc->bge_flowflags = 0; |
4487 | } |
4488 | |
4489 | /* Flow control requires full-duplex mode. */ |
4490 | if (IFM_SUBTYPE(ifr->ifr_media)((ifr->ifr_ifru.ifru_media) & 0x00000000000000ffULL) == IFM_AUTO0ULL || |
4491 | (ifr->ifr_mediaifr_ifru.ifru_media & IFM_FDX0x0000010000000000ULL) == 0) { |
4492 | ifr->ifr_mediaifr_ifru.ifru_media &= ~IFM_ETH_FMASK(0x0000040000000000ULL|0x0000000000020000ULL|0x0000000000040000ULL ); |
4493 | } |
4494 | if (IFM_SUBTYPE(ifr->ifr_media)((ifr->ifr_ifru.ifru_media) & 0x00000000000000ffULL) != IFM_AUTO0ULL) { |
4495 | if ((ifr->ifr_mediaifr_ifru.ifru_media & IFM_ETH_FMASK(0x0000040000000000ULL|0x0000000000020000ULL|0x0000000000040000ULL )) == IFM_FLOW0x0000040000000000ULL) { |
4496 | /* We can do both TXPAUSE and RXPAUSE. */ |
4497 | ifr->ifr_mediaifr_ifru.ifru_media |= |
4498 | IFM_ETH_TXPAUSE0x0000000000040000ULL | IFM_ETH_RXPAUSE0x0000000000020000ULL; |
4499 | } |
4500 | sc->bge_flowflags = ifr->ifr_mediaifr_ifru.ifru_media & IFM_ETH_FMASK(0x0000040000000000ULL|0x0000000000020000ULL|0x0000000000040000ULL ); |
4501 | } |
4502 | /* FALLTHROUGH */ |
4503 | case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifmediareq) & 0x1fff) << 16) | ((('i')) << 8) | ((56))): |
4504 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200) { |
4505 | error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, |
4506 | command); |
4507 | } else { |
4508 | mii = &sc->bge_mii; |
4509 | error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, |
4510 | command); |
4511 | } |
4512 | break; |
4513 | |
4514 | case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((170))): |
4515 | error = bge_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data); |
4516 | break; |
4517 | |
4518 | default: |
4519 | error = ether_ioctl(ifp, &sc->arpcom, command, data); |
4520 | } |
4521 | |
4522 | if (error == ENETRESET52) { |
4523 | if (ifp->if_flags & IFF_RUNNING0x40) |
4524 | bge_iff(sc); |
4525 | error = 0; |
4526 | } |
4527 | |
4528 | splx(s)spllower(s); |
4529 | return (error); |
4530 | } |
4531 | |
4532 | int |
4533 | bge_rxrinfo(struct bge_softc *sc, struct if_rxrinfo *ifri) |
4534 | { |
4535 | struct if_rxring_info ifr[2]; |
4536 | u_int n = 0; |
4537 | |
4538 | memset(ifr, 0, sizeof(ifr))__builtin_memset((ifr), (0), (sizeof(ifr))); |
4539 | |
4540 | if (ISSET(sc->bge_flags, BGE_RXRING_VALID)((sc->bge_flags) & (0x00000002))) { |
4541 | ifr[n].ifr_size = sc->bge_rx_std_len; |
4542 | strlcpy(ifr[n].ifr_name, "std", sizeof(ifr[n].ifr_name)); |
4543 | ifr[n].ifr_info = sc->bge_std_ring; |
4544 | |
4545 | n++; |
4546 | } |
4547 | |
4548 | if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)((sc->bge_flags) & (0x00000004))) { |
4549 | ifr[n].ifr_size = BGE_JLEN((9022 + 2) + (sizeof(u_int64_t) - ((9022 + 2) % sizeof(u_int64_t )))); |
4550 | strlcpy(ifr[n].ifr_name, "jumbo", sizeof(ifr[n].ifr_name)); |
4551 | ifr[n].ifr_info = sc->bge_jumbo_ring; |
4552 | |
4553 | n++; |
4554 | } |
4555 | |
4556 | return (if_rxr_info_ioctl(ifri, n, ifr)); |
4557 | } |
4558 | |
4559 | void |
4560 | bge_watchdog(struct ifnet *ifp) |
4561 | { |
4562 | struct bge_softc *sc; |
4563 | |
4564 | sc = ifp->if_softc; |
4565 | |
4566 | printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname); |
4567 | |
4568 | bge_init(sc); |
4569 | |
4570 | ifp->if_oerrorsif_data.ifi_oerrors++; |
4571 | } |
4572 | |
4573 | void |
4574 | bge_stop_block(struct bge_softc *sc, bus_size_t reg, u_int32_t bit) |
4575 | { |
4576 | int i; |
4577 | |
4578 | BGE_CLRBIT(sc, reg, bit)((sc->bge_btag)->write_4((sc->bge_bhandle), (reg), ( (((sc->bge_btag)->read_4((sc->bge_bhandle), (reg))) & ~(bit))))); |
4579 | |
4580 | for (i = 0; i < BGE_TIMEOUT100000; i++) { |
4581 | if ((CSR_READ_4(sc, reg)((sc->bge_btag)->read_4((sc->bge_bhandle), (reg))) & bit) == 0) |
4582 | return; |
4583 | delay(100)(*delay_func)(100); |
4584 | } |
4585 | |
4586 | DPRINTFN(5, ("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n", |
4587 | sc->bge_dev.dv_xname, (u_long) reg, bit)); |
4588 | } |
4589 | |
4590 | /* |
4591 | * Stop the adapter and free any mbufs allocated to the |
4592 | * RX and TX lists. |
4593 | */ |
4594 | void |
4595 | bge_stop(struct bge_softc *sc, int softonly) |
4596 | { |
4597 | struct ifnet *ifp = &sc->arpcom.ac_if; |
4598 | struct ifmedia_entry *ifm; |
4599 | struct mii_data *mii; |
4600 | int mtmp, itmp; |
4601 | |
4602 | timeout_del(&sc->bge_timeout); |
4603 | timeout_del(&sc->bge_rxtimeout); |
4604 | timeout_del(&sc->bge_rxtimeout_jumbo); |
4605 | |
4606 | ifp->if_flags &= ~IFF_RUNNING0x40; |
4607 | ifp->if_timer = 0; |
4608 | |
4609 | if (!softonly) { |
4610 | /* |
4611 | * Tell firmware we're shutting down. |
4612 | */ |
4613 | /* bge_stop_fw(sc); */ |
4614 | bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN0); |
4615 | |
4616 | /* |
4617 | * Disable all of the receiver blocks |
4618 | */ |
4619 | bge_stop_block(sc, BGE_RX_MODE0x0468, BGE_RXMODE_ENABLE0x00000002); |
4620 | bge_stop_block(sc, BGE_RBDI_MODE0x2C00, BGE_RBDIMODE_ENABLE0x00000002); |
4621 | bge_stop_block(sc, BGE_RXLP_MODE0x2000, BGE_RXLPMODE_ENABLE0x00000002); |
4622 | if (BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000)) |
4623 | bge_stop_block(sc, BGE_RXLS_MODE0x3400, BGE_RXLSMODE_ENABLE0x00000002); |
4624 | bge_stop_block(sc, BGE_RDBDI_MODE0x2400, BGE_RBDIMODE_ENABLE0x00000002); |
4625 | bge_stop_block(sc, BGE_RDC_MODE0x2800, BGE_RDCMODE_ENABLE0x00000002); |
4626 | bge_stop_block(sc, BGE_RBDC_MODE0x3000, BGE_RBDCMODE_ENABLE0x00000002); |
4627 | |
4628 | /* |
4629 | * Disable all of the transmit blocks |
4630 | */ |
4631 | bge_stop_block(sc, BGE_SRS_MODE0x1400, BGE_SRSMODE_ENABLE0x00000002); |
4632 | bge_stop_block(sc, BGE_SBDI_MODE0x1800, BGE_SBDIMODE_ENABLE0x00000002); |
4633 | bge_stop_block(sc, BGE_SDI_MODE0x0C00, BGE_SDIMODE_ENABLE0x00000002); |
4634 | bge_stop_block(sc, BGE_RDMA_MODE0x4800, BGE_RDMAMODE_ENABLE0x00000002); |
4635 | bge_stop_block(sc, BGE_SDC_MODE0x1000, BGE_SDCMODE_ENABLE0x00000002); |
4636 | if (BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000)) |
4637 | bge_stop_block(sc, BGE_DMAC_MODE0x6400, BGE_DMACMODE_ENABLE0x00000002); |
4638 | bge_stop_block(sc, BGE_SBDC_MODE0x1C00, BGE_SBDCMODE_ENABLE0x00000002); |
4639 | |
4640 | /* |
4641 | * Shut down all of the memory managers and related |
4642 | * state machines. |
4643 | */ |
4644 | bge_stop_block(sc, BGE_HCC_MODE0x3C00, BGE_HCCMODE_ENABLE0x00000002); |
4645 | bge_stop_block(sc, BGE_WDMA_MODE0x4C00, BGE_WDMAMODE_ENABLE0x00000002); |
4646 | if (BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000)) |
4647 | bge_stop_block(sc, BGE_MBCF_MODE0x3800, BGE_MBCFMODE_ENABLE0x00000002); |
4648 | |
4649 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x5C00) , (0xFFFFFFFF))); |
4650 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x5C00) , (0))); |
4651 | |
4652 | if (!BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000)) { |
4653 | bge_stop_block(sc, BGE_BMAN_MODE0x4400, BGE_BMANMODE_ENABLE0x00000002); |
4654 | bge_stop_block(sc, BGE_MARB_MODE0x4000, BGE_MARBMODE_ENABLE0x00000002); |
4655 | } |
4656 | |
4657 | bge_reset(sc); |
4658 | bge_sig_legacy(sc, BGE_RESET_SHUTDOWN0); |
4659 | bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN0); |
4660 | |
4661 | /* |
4662 | * Tell firmware we're shutting down. |
4663 | */ |
4664 | BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6800) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6800 ))) & ~(0x00010000))))); |
4665 | } |
4666 | |
4667 | intr_barrier(sc->bge_intrhand); |
4668 | ifq_barrier(&ifp->if_snd); |
4669 | |
4670 | ifq_clr_oactive(&ifp->if_snd); |
4671 | |
4672 | /* Free the RX lists. */ |
4673 | bge_free_rx_ring_std(sc); |
4674 | |
4675 | /* Free jumbo RX list. */ |
4676 | if (sc->bge_flags & BGE_JUMBO_RING0x01000000) |
4677 | bge_free_rx_ring_jumbo(sc); |
4678 | |
4679 | /* Free TX buffers. */ |
4680 | bge_free_tx_ring(sc); |
4681 | |
4682 | /* |
4683 | * Isolate/power down the PHY, but leave the media selection |
4684 | * unchanged so that things will be put back to normal when |
4685 | * we bring the interface back up. |
4686 | */ |
4687 | if (!(sc->bge_flags & BGE_FIBER_TBI0x00000200)) { |
4688 | mii = &sc->bge_mii; |
4689 | itmp = ifp->if_flags; |
4690 | ifp->if_flags |= IFF_UP0x1; |
4691 | ifm = mii->mii_media.ifm_cur; |
4692 | mtmp = ifm->ifm_media; |
4693 | ifm->ifm_media = IFM_ETHER0x0000000000000100ULL|IFM_NONE2ULL; |
4694 | mii_mediachg(mii); |
4695 | ifm->ifm_media = mtmp; |
4696 | ifp->if_flags = itmp; |
4697 | } |
4698 | |
4699 | sc->bge_tx_saved_considx = BGE_TXCONS_UNSET0xFFFF; |
4700 | |
4701 | if (!softonly) { |
4702 | /* Clear MAC's link state (PHY may still have link UP). */ |
4703 | BGE_STS_CLRBIT(sc, BGE_STS_LINK)((sc)->bge_sts &= ~(0x00000001)); |
4704 | } |
4705 | } |
4706 | |
4707 | void |
4708 | bge_link_upd(struct bge_softc *sc) |
4709 | { |
4710 | struct ifnet *ifp = &sc->arpcom.ac_if; |
4711 | struct mii_data *mii = &sc->bge_mii; |
4712 | u_int32_t status; |
4713 | int link; |
4714 | |
4715 | /* Clear 'pending link event' flag */ |
4716 | BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT)((sc)->bge_sts &= ~(0x00000002)); |
4717 | |
4718 | /* |
4719 | * Process link state changes. |
4720 | * Grrr. The link status word in the status block does |
4721 | * not work correctly on the BCM5700 rev AX and BX chips, |
4722 | * according to all available information. Hence, we have |
4723 | * to enable MII interrupts in order to properly obtain |
4724 | * async link changes. Unfortunately, this also means that |
4725 | * we have to read the MAC status register to detect link |
4726 | * changes, thereby adding an additional register access to |
4727 | * the interrupt handler. |
4728 | * |
4729 | */ |
4730 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57000x07) { |
4731 | status = CSR_READ_4(sc, BGE_MAC_STS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0404)) ); |
4732 | if (status & BGE_MACSTAT_MI_INTERRUPT0x00800000) { |
4733 | mii_pollstat(mii); |
4734 | |
4735 | if (!BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001)) && |
4736 | mii->mii_media_status & IFM_ACTIVE0x0000000000000002ULL && |
4737 | IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) != IFM_NONE2ULL) |
4738 | BGE_STS_SETBIT(sc, BGE_STS_LINK)((sc)->bge_sts |= (0x00000001)); |
4739 | else if (BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001)) && |
4740 | (!(mii->mii_media_status & IFM_ACTIVE0x0000000000000002ULL) || |
4741 | IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) == IFM_NONE2ULL)) |
4742 | BGE_STS_CLRBIT(sc, BGE_STS_LINK)((sc)->bge_sts &= ~(0x00000001)); |
4743 | |
4744 | /* Clear the interrupt */ |
4745 | CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0408) , (0x00800000))) |
4746 | BGE_EVTENB_MI_INTERRUPT)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0408) , (0x00800000))); |
4747 | bge_miibus_readreg(&sc->bge_dev, sc->bge_phy_addr, |
4748 | BRGPHY_MII_ISR0x1A); |
4749 | bge_miibus_writereg(&sc->bge_dev, sc->bge_phy_addr, |
4750 | BRGPHY_MII_IMR0x1B, BRGPHY_INTRS~(0x0002|0x0004|0x0008)); |
4751 | } |
4752 | return; |
4753 | } |
4754 | |
4755 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200) { |
4756 | status = CSR_READ_4(sc, BGE_MAC_STS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0404)) ); |
4757 | if (status & BGE_MACSTAT_TBI_PCS_SYNCHED0x00000001) { |
4758 | if (!BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001))) { |
4759 | BGE_STS_SETBIT(sc, BGE_STS_LINK)((sc)->bge_sts |= (0x00000001)); |
4760 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57040x02) |
4761 | BGE_CLRBIT(sc, BGE_MAC_MODE,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0400) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400 ))) & ~(0x00020000))))) |
4762 | BGE_MACMODE_TBI_SEND_CFGS)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0400) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400 ))) & ~(0x00020000))))); |
4763 | CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0xFFFFFFFF))); |
4764 | status = CSR_READ_4(sc, BGE_MAC_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400)) ); |
4765 | link = (status & BGE_MACMODE_HALF_DUPLEX0x00000002) ? |
4766 | LINK_STATE_HALF_DUPLEX5 : |
4767 | LINK_STATE_FULL_DUPLEX6; |
4768 | ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL))); |
4769 | if (ifp->if_link_stateif_data.ifi_link_state != link) { |
4770 | ifp->if_link_stateif_data.ifi_link_state = link; |
4771 | if_link_state_change(ifp); |
4772 | } |
4773 | } |
4774 | } else if (BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001))) { |
4775 | BGE_STS_CLRBIT(sc, BGE_STS_LINK)((sc)->bge_sts &= ~(0x00000001)); |
4776 | link = LINK_STATE_DOWN2; |
4777 | ifp->if_baudrateif_data.ifi_baudrate = 0; |
4778 | if (ifp->if_link_stateif_data.ifi_link_state != link) { |
4779 | ifp->if_link_stateif_data.ifi_link_state = link; |
4780 | if_link_state_change(ifp); |
4781 | } |
4782 | } |
4783 | } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)((sc)->bge_sts & (0x00000004))) { |
4784 | /* |
4785 | * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit |
4786 | * in status word always set. Workaround this bug by reading |
4787 | * PHY link status directly. |
4788 | */ |
4789 | link = (CSR_READ_4(sc, BGE_MI_STS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0450)) ) & BGE_MISTS_LINK0x00000001)? |
4790 | BGE_STS_LINK0x00000001 : 0; |
4791 | |
4792 | if (BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001)) != link) { |
4793 | mii_pollstat(mii); |
4794 | |
4795 | if (!BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001)) && |
4796 | mii->mii_media_status & IFM_ACTIVE0x0000000000000002ULL && |
4797 | IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) != IFM_NONE2ULL) |
4798 | BGE_STS_SETBIT(sc, BGE_STS_LINK)((sc)->bge_sts |= (0x00000001)); |
4799 | else if (BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001)) && |
4800 | (!(mii->mii_media_status & IFM_ACTIVE0x0000000000000002ULL) || |
4801 | IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) == IFM_NONE2ULL)) |
4802 | BGE_STS_CLRBIT(sc, BGE_STS_LINK)((sc)->bge_sts &= ~(0x00000001)); |
4803 | } |
4804 | } else { |
4805 | /* |
4806 | * For controllers that call mii_tick, we have to poll |
4807 | * link status. |
4808 | */ |
4809 | mii_pollstat(mii); |
4810 | } |
4811 | |
4812 | /* Clear the attention */ |
4813 | CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0x00000010| 0x00000008|0x00400000| 0x00001000))) |
4814 | BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0x00000010| 0x00000008|0x00400000| 0x00001000))) |
4815 | BGE_MACSTAT_LINK_CHANGED)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0x00000010| 0x00000008|0x00400000| 0x00001000))); |
4816 | } |