File: | dev/pci/if_bge.c |
Warning: | line 3729, column 3 Value stored to 'cur_tx' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: if_bge.c,v 1.402 2023/11/10 15:51:20 bluhm Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2001 Wind River Systems |
5 | * Copyright (c) 1997, 1998, 1999, 2001 |
6 | * Bill Paul <wpaul@windriver.com>. All rights reserved. |
7 | * |
8 | * Redistribution and use in source and binary forms, with or without |
9 | * modification, are permitted provided that the following conditions |
10 | * are met: |
11 | * 1. Redistributions of source code must retain the above copyright |
12 | * notice, this list of conditions and the following disclaimer. |
13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. |
16 | * 3. All advertising materials mentioning features or use of this software |
17 | * must display the following acknowledgement: |
18 | * This product includes software developed by Bill Paul. |
19 | * 4. Neither the name of the author nor the names of any co-contributors |
20 | * may be used to endorse or promote products derived from this software |
21 | * without specific prior written permission. |
22 | * |
23 | * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND |
24 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
25 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
26 | * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD |
27 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
28 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
29 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
30 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
31 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
32 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
33 | * THE POSSIBILITY OF SUCH DAMAGE. |
34 | * |
35 | * $FreeBSD: if_bge.c,v 1.25 2002/11/14 23:54:49 sam Exp $ |
36 | */ |
37 | |
38 | /* |
39 | * Broadcom BCM57xx/BCM590x family ethernet driver for OpenBSD. |
40 | * |
41 | * Written by Bill Paul <wpaul@windriver.com> |
42 | * Senior Engineer, Wind River Systems |
43 | */ |
44 | |
45 | /* |
46 | * The Broadcom BCM5700 is based on technology originally developed by |
47 | * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet |
48 | * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has |
49 | * two on-board MIPS R4000 CPUs and can have as much as 16MB of external |
50 | * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo |
51 | * frames, highly configurable RX filtering, and 16 RX and TX queues |
52 | * (which, along with RX filter rules, can be used for QOS applications). |
53 | * Other features, such as TCP segmentation, may be available as part |
54 | * of value-added firmware updates. Unlike the Tigon I and Tigon II, |
55 | * firmware images can be stored in hardware and need not be compiled |
56 | * into the driver. |
57 | * |
58 | * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will |
59 | * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. |
60 | * |
61 | * The BCM5701 is a single-chip solution incorporating both the BCM5700 |
62 | * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 |
63 | * does not support external SSRAM. |
64 | * |
65 | * Broadcom also produces a variation of the BCM5700 under the "Altima" |
66 | * brand name, which is functionally similar but lacks PCI-X support. |
67 | * |
68 | * Without external SSRAM, you can only have at most 4 TX rings, |
69 | * and the use of the mini RX ring is disabled. This seems to imply |
70 | * that these features are simply not available on the BCM5701. As a |
71 | * result, this driver does not implement any support for the mini RX |
72 | * ring. |
73 | */ |
74 | |
75 | #include "bpfilter.h" |
76 | #include "vlan.h" |
77 | #include "kstat.h" |
78 | |
79 | #include <sys/param.h> |
80 | #include <sys/systm.h> |
81 | #include <sys/sockio.h> |
82 | #include <sys/mbuf.h> |
83 | #include <sys/malloc.h> |
84 | #include <sys/kernel.h> |
85 | #include <sys/device.h> |
86 | #include <sys/timeout.h> |
87 | #include <sys/socket.h> |
88 | #include <sys/atomic.h> |
89 | #include <sys/kstat.h> |
90 | |
91 | #include <net/if.h> |
92 | #include <net/if_media.h> |
93 | |
94 | #include <netinet/in.h> |
95 | #include <netinet/if_ether.h> |
96 | |
97 | #if NBPFILTER1 > 0 |
98 | #include <net/bpf.h> |
99 | #endif |
100 | |
101 | #if defined(__sparc64__) || defined(__HAVE_FDT) |
102 | #include <dev/ofw/openfirm.h> |
103 | #endif |
104 | |
105 | #include <dev/pci/pcireg.h> |
106 | #include <dev/pci/pcivar.h> |
107 | #include <dev/pci/pcidevs.h> |
108 | |
109 | #include <dev/mii/mii.h> |
110 | #include <dev/mii/miivar.h> |
111 | #include <dev/mii/miidevs.h> |
112 | #include <dev/mii/brgphyreg.h> |
113 | |
114 | #include <dev/pci/if_bgereg.h> |
115 | |
116 | #define ETHER_MIN_NOPAD(64 - 4) (ETHER_MIN_LEN64 - ETHER_CRC_LEN4) /* i.e., 60 */ |
117 | |
118 | const struct bge_revision * bge_lookup_rev(u_int32_t); |
119 | int bge_can_use_msi(struct bge_softc *); |
120 | int bge_probe(struct device *, void *, void *); |
121 | void bge_attach(struct device *, struct device *, void *); |
122 | int bge_detach(struct device *, int); |
123 | int bge_activate(struct device *, int); |
124 | |
125 | const struct cfattach bge_ca = { |
126 | sizeof(struct bge_softc), bge_probe, bge_attach, bge_detach, |
127 | bge_activate |
128 | }; |
129 | |
130 | struct cfdriver bge_cd = { |
131 | NULL((void *)0), "bge", DV_IFNET |
132 | }; |
133 | |
134 | void bge_txeof(struct bge_softc *); |
135 | void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *); |
136 | void bge_rxeof(struct bge_softc *); |
137 | |
138 | void bge_tick(void *); |
139 | void bge_stats_update(struct bge_softc *); |
140 | void bge_stats_update_regs(struct bge_softc *); |
141 | int bge_cksum_pad(struct mbuf *); |
142 | int bge_encap(struct bge_softc *, struct mbuf *, int *); |
143 | int bge_compact_dma_runt(struct mbuf *); |
144 | |
145 | int bge_intr(void *); |
146 | void bge_start(struct ifqueue *); |
147 | int bge_ioctl(struct ifnet *, u_long, caddr_t); |
148 | int bge_rxrinfo(struct bge_softc *, struct if_rxrinfo *); |
149 | void bge_init(void *); |
150 | void bge_stop_block(struct bge_softc *, bus_size_t, u_int32_t); |
151 | void bge_stop(struct bge_softc *, int); |
152 | void bge_watchdog(struct ifnet *); |
153 | int bge_ifmedia_upd(struct ifnet *); |
154 | void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); |
155 | |
156 | u_int8_t bge_nvram_getbyte(struct bge_softc *, int, u_int8_t *); |
157 | int bge_read_nvram(struct bge_softc *, caddr_t, int, int); |
158 | u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *); |
159 | int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); |
160 | |
161 | void bge_iff(struct bge_softc *); |
162 | |
163 | int bge_newbuf_jumbo(struct bge_softc *, int); |
164 | int bge_init_rx_ring_jumbo(struct bge_softc *); |
165 | void bge_fill_rx_ring_jumbo(struct bge_softc *); |
166 | void bge_free_rx_ring_jumbo(struct bge_softc *); |
167 | |
168 | int bge_newbuf(struct bge_softc *, int); |
169 | int bge_init_rx_ring_std(struct bge_softc *); |
170 | void bge_rxtick(void *); |
171 | void bge_fill_rx_ring_std(struct bge_softc *); |
172 | void bge_free_rx_ring_std(struct bge_softc *); |
173 | |
174 | void bge_free_tx_ring(struct bge_softc *); |
175 | int bge_init_tx_ring(struct bge_softc *); |
176 | |
177 | void bge_chipinit(struct bge_softc *); |
178 | int bge_blockinit(struct bge_softc *); |
179 | u_int32_t bge_dma_swap_options(struct bge_softc *); |
180 | int bge_phy_addr(struct bge_softc *); |
181 | |
182 | u_int32_t bge_readmem_ind(struct bge_softc *, int); |
183 | void bge_writemem_ind(struct bge_softc *, int, int); |
184 | void bge_writereg_ind(struct bge_softc *, int, int); |
185 | void bge_writembx(struct bge_softc *, int, int); |
186 | |
187 | int bge_miibus_readreg(struct device *, int, int); |
188 | void bge_miibus_writereg(struct device *, int, int, int); |
189 | void bge_miibus_statchg(struct device *); |
190 | |
191 | #define BGE_RESET_SHUTDOWN0 0 |
192 | #define BGE_RESET_START1 1 |
193 | #define BGE_RESET_SUSPEND2 2 |
194 | void bge_sig_post_reset(struct bge_softc *, int); |
195 | void bge_sig_legacy(struct bge_softc *, int); |
196 | void bge_sig_pre_reset(struct bge_softc *, int); |
197 | void bge_stop_fw(struct bge_softc *, int); |
198 | void bge_reset(struct bge_softc *); |
199 | void bge_link_upd(struct bge_softc *); |
200 | |
201 | void bge_ape_lock_init(struct bge_softc *); |
202 | void bge_ape_read_fw_ver(struct bge_softc *); |
203 | int bge_ape_lock(struct bge_softc *, int); |
204 | void bge_ape_unlock(struct bge_softc *, int); |
205 | void bge_ape_send_event(struct bge_softc *, uint32_t); |
206 | void bge_ape_driver_state_change(struct bge_softc *, int); |
207 | |
208 | #if NKSTAT1 > 0 |
209 | void bge_kstat_attach(struct bge_softc *); |
210 | |
211 | enum { |
212 | bge_stat_out_octets = 0, |
213 | bge_stat_collisions, |
214 | bge_stat_xon_sent, |
215 | bge_stat_xoff_sent, |
216 | bge_stat_xmit_errors, |
217 | bge_stat_coll_frames, |
218 | bge_stat_multicoll_frames, |
219 | bge_stat_deferred_xmit, |
220 | bge_stat_excess_coll, |
221 | bge_stat_late_coll, |
222 | bge_stat_out_ucast_pkt, |
223 | bge_stat_out_mcast_pkt, |
224 | bge_stat_out_bcast_pkt, |
225 | bge_stat_in_octets, |
226 | bge_stat_fragments, |
227 | bge_stat_in_ucast_pkt, |
228 | bge_stat_in_mcast_pkt, |
229 | bge_stat_in_bcast_pkt, |
230 | bge_stat_fcs_errors, |
231 | bge_stat_align_errors, |
232 | bge_stat_xon_rcvd, |
233 | bge_stat_xoff_rcvd, |
234 | bge_stat_ctrl_frame_rcvd, |
235 | bge_stat_xoff_entered, |
236 | bge_stat_too_long_frames, |
237 | bge_stat_jabbers, |
238 | bge_stat_too_short_pkts, |
239 | |
240 | bge_stat_dma_rq_full, |
241 | bge_stat_dma_hprq_full, |
242 | bge_stat_sdc_queue_full, |
243 | bge_stat_nic_sendprod_set, |
244 | bge_stat_status_updated, |
245 | bge_stat_irqs, |
246 | bge_stat_avoided_irqs, |
247 | bge_stat_tx_thresh_hit, |
248 | |
249 | bge_stat_filtdrop, |
250 | bge_stat_dma_wrq_full, |
251 | bge_stat_dma_hpwrq_full, |
252 | bge_stat_out_of_bds, |
253 | bge_stat_if_in_drops, |
254 | bge_stat_if_in_errors, |
255 | bge_stat_rx_thresh_hit, |
256 | }; |
257 | |
258 | #endif |
259 | |
260 | #ifdef BGE_DEBUG |
261 | #define DPRINTF(x) do { if (bgedebug) printf x; } while (0) |
262 | #define DPRINTFN(n,x) do { if (bgedebug >= (n)) printf x; } while (0) |
263 | int bgedebug = 0; |
264 | #else |
265 | #define DPRINTF(x) |
266 | #define DPRINTFN(n,x) |
267 | #endif |
268 | |
269 | /* |
270 | * Various supported device vendors/types and their names. Note: the |
271 | * spec seems to indicate that the hardware still has Alteon's vendor |
272 | * ID burned into it, though it will always be overridden by the vendor |
273 | * ID in the EEPROM. Just to be safe, we cover all possibilities. |
274 | */ |
275 | const struct pci_matchid bge_devices[] = { |
276 | { PCI_VENDOR_ALTEON0x12ae, PCI_PRODUCT_ALTEON_BCM57000x0003 }, |
277 | { PCI_VENDOR_ALTEON0x12ae, PCI_PRODUCT_ALTEON_BCM57010x0004 }, |
278 | |
279 | { PCI_VENDOR_ALTIMA0x173b, PCI_PRODUCT_ALTIMA_AC10000x03e8 }, |
280 | { PCI_VENDOR_ALTIMA0x173b, PCI_PRODUCT_ALTIMA_AC10010x03e9 }, |
281 | { PCI_VENDOR_ALTIMA0x173b, PCI_PRODUCT_ALTIMA_AC10030x03eb }, |
282 | { PCI_VENDOR_ALTIMA0x173b, PCI_PRODUCT_ALTIMA_AC91000x03ea }, |
283 | |
284 | { PCI_VENDOR_APPLE0x106b, PCI_PRODUCT_APPLE_BCM57010x1645 }, |
285 | |
286 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57000x1644 }, |
287 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57010x1645 }, |
288 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57020x1646 }, |
289 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5702_ALT0x16c6 }, |
290 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5702X0x16a6 }, |
291 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57030x1647 }, |
292 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5703_ALT0x16c7 }, |
293 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5703X0x16a7 }, |
294 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5704C0x1648 }, |
295 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5704S0x16a8 }, |
296 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5704S_ALT0x1649 }, |
297 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57050x1653 }, |
298 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5705F0x166e }, |
299 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5705K0x1654 }, |
300 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5705M0x165d }, |
301 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5705M_ALT0x165e }, |
302 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57140x1668 }, |
303 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5714S0x1669 }, |
304 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57150x1678 }, |
305 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5715S0x1679 }, |
306 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57170x1655 }, |
307 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5717C0x1665 }, |
308 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57180x1656 }, |
309 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57190x1657 }, |
310 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57200x165f }, |
311 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57210x1659 }, |
312 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57220x165a }, |
313 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57230x165b }, |
314 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57250x1643 }, |
315 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57270x16f3 }, |
316 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57510x1677 }, |
317 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5751F0x167e }, |
318 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5751M0x167d }, |
319 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57520x1600 }, |
320 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5752M0x1601 }, |
321 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57530x16f7 }, |
322 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5753F0x16fe }, |
323 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5753M0x16fd }, |
324 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57540x167a }, |
325 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5754M0x1672 }, |
326 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57550x167b }, |
327 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5755M0x1673 }, |
328 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57560x1674 }, |
329 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57610x1681 }, |
330 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5761E0x1680 }, |
331 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5761S0x1688 }, |
332 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5761SE0x1689 }, |
333 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57620x1687 }, |
334 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57640x1684 }, |
335 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57800x166a }, |
336 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5780S0x166b }, |
337 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57810x16dd }, |
338 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57820x1696 }, |
339 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57840x1698 }, |
340 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5785F0x16a0 }, |
341 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5785G0x1699 }, |
342 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57860x169a }, |
343 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57870x169b }, |
344 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5787F0x167f }, |
345 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5787M0x1693 }, |
346 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57880x169c }, |
347 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57890x169d }, |
348 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM59010x170d }, |
349 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5901A20x170e }, |
350 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5903M0x16ff }, |
351 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM59060x1712 }, |
352 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM5906M0x1713 }, |
353 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577600x1690 }, |
354 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577610x16b0 }, |
355 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577620x1682 }, |
356 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577640x1642 }, |
357 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577650x16b4 }, |
358 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577660x1686 }, |
359 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577670x1683 }, |
360 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577800x1692 }, |
361 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577810x16b1 }, |
362 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577820x16b7 }, |
363 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577850x16b5 }, |
364 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577860x16b3 }, |
365 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577870x1641 }, |
366 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577880x1691 }, |
367 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577900x1694 }, |
368 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577910x16b2 }, |
369 | { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM577950x16b6 }, |
370 | |
371 | { PCI_VENDOR_FUJITSU0x10cf, PCI_PRODUCT_FUJITSU_PW008GE40x11a2 }, |
372 | { PCI_VENDOR_FUJITSU0x10cf, PCI_PRODUCT_FUJITSU_PW008GE50x11a1 }, |
373 | { PCI_VENDOR_FUJITSU0x10cf, PCI_PRODUCT_FUJITSU_PP250_450_LAN0x11cc }, |
374 | |
375 | { PCI_VENDOR_SCHNEIDERKOCH0x1148, PCI_PRODUCT_SCHNEIDERKOCH_SK9D210x4400 }, |
376 | |
377 | { PCI_VENDOR_3COM0x10b7, PCI_PRODUCT_3COM_3C9960x0003 } |
378 | }; |
379 | |
380 | #define BGE_IS_JUMBO_CAPABLE(sc)((sc)->bge_flags & 0x00000100) ((sc)->bge_flags & BGE_JUMBO_CAPABLE0x00000100) |
381 | #define BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000) ((sc)->bge_flags & BGE_5700_FAMILY0x00010000) |
382 | #define BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000) ((sc)->bge_flags & BGE_5705_PLUS0x00001000) |
383 | #define BGE_IS_5714_FAMILY(sc)((sc)->bge_flags & 0x00008000) ((sc)->bge_flags & BGE_5714_FAMILY0x00008000) |
384 | #define BGE_IS_575X_PLUS(sc)((sc)->bge_flags & 0x00002000) ((sc)->bge_flags & BGE_575X_PLUS0x00002000) |
385 | #define BGE_IS_5755_PLUS(sc)((sc)->bge_flags & 0x00004000) ((sc)->bge_flags & BGE_5755_PLUS0x00004000) |
386 | #define BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000) ((sc)->bge_flags & BGE_5717_PLUS0x00020000) |
387 | #define BGE_IS_57765_PLUS(sc)((sc)->bge_flags & 0x00040000) ((sc)->bge_flags & BGE_57765_PLUS0x00040000) |
388 | |
389 | static const struct bge_revision { |
390 | u_int32_t br_chipid; |
391 | const char *br_name; |
392 | } bge_revisions[] = { |
393 | { BGE_CHIPID_BCM5700_A00x7000, "BCM5700 A0" }, |
394 | { BGE_CHIPID_BCM5700_A10x7001, "BCM5700 A1" }, |
395 | { BGE_CHIPID_BCM5700_B00x7100, "BCM5700 B0" }, |
396 | { BGE_CHIPID_BCM5700_B10x7101, "BCM5700 B1" }, |
397 | { BGE_CHIPID_BCM5700_B20x7102, "BCM5700 B2" }, |
398 | { BGE_CHIPID_BCM5700_B30x7103, "BCM5700 B3" }, |
399 | { BGE_CHIPID_BCM5700_ALTIMA0x7104, "BCM5700 Altima" }, |
400 | { BGE_CHIPID_BCM5700_C00x7200, "BCM5700 C0" }, |
401 | { BGE_CHIPID_BCM5701_A00x0000, "BCM5701 A0" }, |
402 | { BGE_CHIPID_BCM5701_B00x0100, "BCM5701 B0" }, |
403 | { BGE_CHIPID_BCM5701_B20x0102, "BCM5701 B2" }, |
404 | { BGE_CHIPID_BCM5701_B50x0105, "BCM5701 B5" }, |
405 | /* the 5702 and 5703 share the same ASIC ID */ |
406 | { BGE_CHIPID_BCM5703_A00x1000, "BCM5702/5703 A0" }, |
407 | { BGE_CHIPID_BCM5703_A10x1001, "BCM5702/5703 A1" }, |
408 | { BGE_CHIPID_BCM5703_A20x1002, "BCM5702/5703 A2" }, |
409 | { BGE_CHIPID_BCM5703_A30x1003, "BCM5702/5703 A3" }, |
410 | { BGE_CHIPID_BCM5703_B00x1100, "BCM5702/5703 B0" }, |
411 | { BGE_CHIPID_BCM5704_A00x2000, "BCM5704 A0" }, |
412 | { BGE_CHIPID_BCM5704_A10x2001, "BCM5704 A1" }, |
413 | { BGE_CHIPID_BCM5704_A20x2002, "BCM5704 A2" }, |
414 | { BGE_CHIPID_BCM5704_A30x2003, "BCM5704 A3" }, |
415 | { BGE_CHIPID_BCM5704_B00x2100, "BCM5704 B0" }, |
416 | { BGE_CHIPID_BCM5705_A00x3000, "BCM5705 A0" }, |
417 | { BGE_CHIPID_BCM5705_A10x3001, "BCM5705 A1" }, |
418 | { BGE_CHIPID_BCM5705_A20x3002, "BCM5705 A2" }, |
419 | { BGE_CHIPID_BCM5705_A30x3003, "BCM5705 A3" }, |
420 | { BGE_CHIPID_BCM5750_A00x4000, "BCM5750 A0" }, |
421 | { BGE_CHIPID_BCM5750_A10x4001, "BCM5750 A1" }, |
422 | { BGE_CHIPID_BCM5750_A30x4003, "BCM5750 A3" }, |
423 | { BGE_CHIPID_BCM5750_B00x4010, "BCM5750 B0" }, |
424 | { BGE_CHIPID_BCM5750_B10x4101, "BCM5750 B1" }, |
425 | { BGE_CHIPID_BCM5750_C00x4200, "BCM5750 C0" }, |
426 | { BGE_CHIPID_BCM5750_C10x4201, "BCM5750 C1" }, |
427 | { BGE_CHIPID_BCM5750_C20x4202, "BCM5750 C2" }, |
428 | { BGE_CHIPID_BCM5714_A00x5000, "BCM5714 A0" }, |
429 | { BGE_CHIPID_BCM5752_A00x6000, "BCM5752 A0" }, |
430 | { BGE_CHIPID_BCM5752_A10x6001, "BCM5752 A1" }, |
431 | { BGE_CHIPID_BCM5752_A20x6002, "BCM5752 A2" }, |
432 | { BGE_CHIPID_BCM5714_B00x8000, "BCM5714 B0" }, |
433 | { BGE_CHIPID_BCM5714_B30x8003, "BCM5714 B3" }, |
434 | { BGE_CHIPID_BCM5715_A00x9000, "BCM5715 A0" }, |
435 | { BGE_CHIPID_BCM5715_A10x9001, "BCM5715 A1" }, |
436 | { BGE_CHIPID_BCM5715_A30x9003, "BCM5715 A3" }, |
437 | { BGE_CHIPID_BCM5717_A00x05717000, "BCM5717 A0" }, |
438 | { BGE_CHIPID_BCM5717_B00x05717100, "BCM5717 B0" }, |
439 | { BGE_CHIPID_BCM5719_A00x05719000, "BCM5719 A0" }, |
440 | { BGE_CHIPID_BCM5719_A10x05719001, "BCM5719 A1" }, |
441 | { BGE_CHIPID_BCM5720_A00x05720000, "BCM5720 A0" }, |
442 | { BGE_CHIPID_BCM5755_A00xa000, "BCM5755 A0" }, |
443 | { BGE_CHIPID_BCM5755_A10xa001, "BCM5755 A1" }, |
444 | { BGE_CHIPID_BCM5755_A20xa002, "BCM5755 A2" }, |
445 | { BGE_CHIPID_BCM5755_C00xa200, "BCM5755 C0" }, |
446 | { BGE_CHIPID_BCM5761_A00x5761000, "BCM5761 A0" }, |
447 | { BGE_CHIPID_BCM5761_A10x5761100, "BCM5761 A1" }, |
448 | { BGE_CHIPID_BCM5762_A00x05762000, "BCM5762 A0" }, |
449 | { BGE_CHIPID_BCM5762_B00x05762100, "BCM5762 B0" }, |
450 | { BGE_CHIPID_BCM5784_A00x5784000, "BCM5784 A0" }, |
451 | { BGE_CHIPID_BCM5784_A10x5784100, "BCM5784 A1" }, |
452 | /* the 5754 and 5787 share the same ASIC ID */ |
453 | { BGE_CHIPID_BCM5787_A00xb000, "BCM5754/5787 A0" }, |
454 | { BGE_CHIPID_BCM5787_A10xb001, "BCM5754/5787 A1" }, |
455 | { BGE_CHIPID_BCM5787_A20xb002, "BCM5754/5787 A2" }, |
456 | { BGE_CHIPID_BCM5906_A10xc001, "BCM5906 A1" }, |
457 | { BGE_CHIPID_BCM5906_A20xc002, "BCM5906 A2" }, |
458 | { BGE_CHIPID_BCM57765_A00x57785000, "BCM57765 A0" }, |
459 | { BGE_CHIPID_BCM57765_B00x57785100, "BCM57765 B0" }, |
460 | { BGE_CHIPID_BCM57766_A00x57766000, "BCM57766 A0" }, |
461 | { BGE_CHIPID_BCM57766_A10x57766001, "BCM57766 A1" }, |
462 | { BGE_CHIPID_BCM57780_A00x57780000, "BCM57780 A0" }, |
463 | { BGE_CHIPID_BCM57780_A10x57780001, "BCM57780 A1" }, |
464 | |
465 | { 0, NULL((void *)0) } |
466 | }; |
467 | |
468 | /* |
469 | * Some defaults for major revisions, so that newer steppings |
470 | * that we don't know about have a shot at working. |
471 | */ |
472 | static const struct bge_revision bge_majorrevs[] = { |
473 | { BGE_ASICREV_BCM57000x07, "unknown BCM5700" }, |
474 | { BGE_ASICREV_BCM57010x00, "unknown BCM5701" }, |
475 | /* 5702 and 5703 share the same ASIC ID */ |
476 | { BGE_ASICREV_BCM57030x01, "unknown BCM5703" }, |
477 | { BGE_ASICREV_BCM57040x02, "unknown BCM5704" }, |
478 | { BGE_ASICREV_BCM57050x03, "unknown BCM5705" }, |
479 | { BGE_ASICREV_BCM57500x04, "unknown BCM5750" }, |
480 | { BGE_ASICREV_BCM57140x09, "unknown BCM5714" }, |
481 | { BGE_ASICREV_BCM5714_A00x05, "unknown BCM5714" }, |
482 | { BGE_ASICREV_BCM57520x06, "unknown BCM5752" }, |
483 | { BGE_ASICREV_BCM57800x08, "unknown BCM5780" }, |
484 | { BGE_ASICREV_BCM57550x0a, "unknown BCM5755" }, |
485 | { BGE_ASICREV_BCM57610x5761, "unknown BCM5761" }, |
486 | { BGE_ASICREV_BCM57840x5784, "unknown BCM5784" }, |
487 | { BGE_ASICREV_BCM57850x5785, "unknown BCM5785" }, |
488 | /* 5754 and 5787 share the same ASIC ID */ |
489 | { BGE_ASICREV_BCM57870x0b, "unknown BCM5754/5787" }, |
490 | { BGE_ASICREV_BCM59060x0c, "unknown BCM5906" }, |
491 | { BGE_ASICREV_BCM577650x57785, "unknown BCM57765" }, |
492 | { BGE_ASICREV_BCM577660x57766, "unknown BCM57766" }, |
493 | { BGE_ASICREV_BCM577800x57780, "unknown BCM57780" }, |
494 | { BGE_ASICREV_BCM57170x5717, "unknown BCM5717" }, |
495 | { BGE_ASICREV_BCM57190x5719, "unknown BCM5719" }, |
496 | { BGE_ASICREV_BCM57200x5720, "unknown BCM5720" }, |
497 | { BGE_ASICREV_BCM57620x5762, "unknown BCM5762" }, |
498 | |
499 | { 0, NULL((void *)0) } |
500 | }; |
501 | |
502 | u_int32_t |
503 | bge_readmem_ind(struct bge_softc *sc, int off) |
504 | { |
505 | struct pci_attach_args *pa = &(sc->bge_pa); |
506 | u_int32_t val; |
507 | |
508 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c && |
509 | off >= BGE_STATS_BLOCK0x00000300 && off < BGE_SEND_RING_1_TO_40x00004000) |
510 | return (0); |
511 | |
512 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR0x7C, off); |
513 | val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA0x84); |
514 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR0x7C, 0); |
515 | return (val); |
516 | } |
517 | |
518 | void |
519 | bge_writemem_ind(struct bge_softc *sc, int off, int val) |
520 | { |
521 | struct pci_attach_args *pa = &(sc->bge_pa); |
522 | |
523 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c && |
524 | off >= BGE_STATS_BLOCK0x00000300 && off < BGE_SEND_RING_1_TO_40x00004000) |
525 | return; |
526 | |
527 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR0x7C, off); |
528 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA0x84, val); |
529 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR0x7C, 0); |
530 | } |
531 | |
532 | void |
533 | bge_writereg_ind(struct bge_softc *sc, int off, int val) |
534 | { |
535 | struct pci_attach_args *pa = &(sc->bge_pa); |
536 | |
537 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR0x78, off); |
538 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA0x80, val); |
539 | } |
540 | |
541 | void |
542 | bge_writembx(struct bge_softc *sc, int off, int val) |
543 | { |
544 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) |
545 | off += BGE_LPMBX_IRQ0_HI0x5800 - BGE_MBX_IRQ0_HI0x0200; |
546 | |
547 | CSR_WRITE_4(sc, off, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (off), ( val))); |
548 | } |
549 | |
550 | /* |
551 | * Clear all stale locks and select the lock for this driver instance. |
552 | */ |
553 | void |
554 | bge_ape_lock_init(struct bge_softc *sc) |
555 | { |
556 | struct pci_attach_args *pa = &(sc->bge_pa); |
557 | uint32_t bit, regbase; |
558 | int i; |
559 | |
560 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57610x5761) |
561 | regbase = BGE_APE_LOCK_GRANT0x004C; |
562 | else |
563 | regbase = BGE_APE_PER_LOCK_GRANT0x8420; |
564 | |
565 | /* Clear any stale locks. */ |
566 | for (i = BGE_APE_LOCK_PHY00; i <= BGE_APE_LOCK_GPIO7; i++) { |
567 | switch (i) { |
568 | case BGE_APE_LOCK_PHY00: |
569 | case BGE_APE_LOCK_PHY12: |
570 | case BGE_APE_LOCK_PHY23: |
571 | case BGE_APE_LOCK_PHY35: |
572 | bit = BGE_APE_LOCK_GRANT_DRIVER00x00001000; |
573 | break; |
574 | default: |
575 | if (pa->pa_function == 0) |
576 | bit = BGE_APE_LOCK_GRANT_DRIVER00x00001000; |
577 | else |
578 | bit = (1 << pa->pa_function); |
579 | } |
580 | APE_WRITE_4(sc, regbase + 4 * i, bit)((sc->bge_apetag)->write_4((sc->bge_apehandle), (regbase + 4 * i), (bit))); |
581 | } |
582 | |
583 | /* Select the PHY lock based on the device's function number. */ |
584 | switch (pa->pa_function) { |
585 | case 0: |
586 | sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY00; |
587 | break; |
588 | case 1: |
589 | sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY12; |
590 | break; |
591 | case 2: |
592 | sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY23; |
593 | break; |
594 | case 3: |
595 | sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY35; |
596 | break; |
597 | default: |
598 | printf("%s: PHY lock not supported on function %d\n", |
599 | sc->bge_dev.dv_xname, pa->pa_function); |
600 | break; |
601 | } |
602 | } |
603 | |
604 | /* |
605 | * Check for APE firmware, set flags, and print version info. |
606 | */ |
607 | void |
608 | bge_ape_read_fw_ver(struct bge_softc *sc) |
609 | { |
610 | const char *fwtype; |
611 | uint32_t apedata, features; |
612 | |
613 | /* Check for a valid APE signature in shared memory. */ |
614 | apedata = APE_READ_4(sc, BGE_APE_SEG_SIG)((sc->bge_apetag)->read_4((sc->bge_apehandle), (0x4000 ))); |
615 | if (apedata != BGE_APE_SEG_SIG_MAGIC0x41504521) { |
616 | sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE0x00000002; |
617 | return; |
618 | } |
619 | |
620 | /* Check if APE firmware is running. */ |
621 | apedata = APE_READ_4(sc, BGE_APE_FW_STATUS)((sc->bge_apetag)->read_4((sc->bge_apehandle), (0x400C ))); |
622 | if ((apedata & BGE_APE_FW_STATUS_READY0x00000100) == 0) { |
623 | printf("%s: APE signature found but FW status not ready! " |
624 | "0x%08x\n", sc->bge_dev.dv_xname, apedata); |
625 | return; |
626 | } |
627 | |
628 | sc->bge_mfw_flags |= BGE_MFW_ON_APE0x00000002; |
629 | |
630 | /* Fetch the APE firmware type and version. */ |
631 | apedata = APE_READ_4(sc, BGE_APE_FW_VERSION)((sc->bge_apetag)->read_4((sc->bge_apehandle), (0x4018 ))); |
632 | features = APE_READ_4(sc, BGE_APE_FW_FEATURES)((sc->bge_apetag)->read_4((sc->bge_apehandle), (0x4010 ))); |
633 | if ((features & BGE_APE_FW_FEATURE_NCSI0x00000002) != 0) { |
634 | sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI0x00000004; |
635 | fwtype = "NCSI"; |
636 | } else if ((features & BGE_APE_FW_FEATURE_DASH0x00000001) != 0) { |
637 | sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH0x00000008; |
638 | fwtype = "DASH"; |
639 | } else |
640 | fwtype = "UNKN"; |
641 | |
642 | /* Print the APE firmware version. */ |
643 | printf(", APE firmware %s %d.%d.%d.%d", fwtype, |
644 | (apedata & BGE_APE_FW_VERSION_MAJMSK0xFF000000) >> BGE_APE_FW_VERSION_MAJSFT24, |
645 | (apedata & BGE_APE_FW_VERSION_MINMSK0x00FF0000) >> BGE_APE_FW_VERSION_MINSFT16, |
646 | (apedata & BGE_APE_FW_VERSION_REVMSK0x0000FF00) >> BGE_APE_FW_VERSION_REVSFT8, |
647 | (apedata & BGE_APE_FW_VERSION_BLDMSK0x000000FF)); |
648 | } |
649 | |
650 | int |
651 | bge_ape_lock(struct bge_softc *sc, int locknum) |
652 | { |
653 | struct pci_attach_args *pa = &(sc->bge_pa); |
654 | uint32_t bit, gnt, req, status; |
655 | int i, off; |
656 | |
657 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE0x00000002) == 0) |
658 | return (0); |
659 | |
660 | /* Lock request/grant registers have different bases. */ |
661 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57610x5761) { |
662 | req = BGE_APE_LOCK_REQ0x002C; |
663 | gnt = BGE_APE_LOCK_GRANT0x004C; |
664 | } else { |
665 | req = BGE_APE_PER_LOCK_REQ0x8400; |
666 | gnt = BGE_APE_PER_LOCK_GRANT0x8420; |
667 | } |
668 | |
669 | off = 4 * locknum; |
670 | |
671 | switch (locknum) { |
672 | case BGE_APE_LOCK_GPIO7: |
673 | /* Lock required when using GPIO. */ |
674 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57610x5761) |
675 | return (0); |
676 | if (pa->pa_function == 0) |
677 | bit = BGE_APE_LOCK_REQ_DRIVER00x00001000; |
678 | else |
679 | bit = (1 << pa->pa_function); |
680 | break; |
681 | case BGE_APE_LOCK_GRC1: |
682 | /* Lock required to reset the device. */ |
683 | if (pa->pa_function == 0) |
684 | bit = BGE_APE_LOCK_REQ_DRIVER00x00001000; |
685 | else |
686 | bit = (1 << pa->pa_function); |
687 | break; |
688 | case BGE_APE_LOCK_MEM4: |
689 | /* Lock required when accessing certain APE memory. */ |
690 | if (pa->pa_function == 0) |
691 | bit = BGE_APE_LOCK_REQ_DRIVER00x00001000; |
692 | else |
693 | bit = (1 << pa->pa_function); |
694 | break; |
695 | case BGE_APE_LOCK_PHY00: |
696 | case BGE_APE_LOCK_PHY12: |
697 | case BGE_APE_LOCK_PHY23: |
698 | case BGE_APE_LOCK_PHY35: |
699 | /* Lock required when accessing PHYs. */ |
700 | bit = BGE_APE_LOCK_REQ_DRIVER00x00001000; |
701 | break; |
702 | default: |
703 | return (EINVAL22); |
704 | } |
705 | |
706 | /* Request a lock. */ |
707 | APE_WRITE_4(sc, req + off, bit)((sc->bge_apetag)->write_4((sc->bge_apehandle), (req + off), (bit))); |
708 | |
709 | /* Wait up to 1 second to acquire lock. */ |
710 | for (i = 0; i < 20000; i++) { |
711 | status = APE_READ_4(sc, gnt + off)((sc->bge_apetag)->read_4((sc->bge_apehandle), (gnt + off))); |
712 | if (status == bit) |
713 | break; |
714 | DELAY(50)(*delay_func)(50); |
715 | } |
716 | |
717 | /* Handle any errors. */ |
718 | if (status != bit) { |
719 | printf("%s: APE lock %d request failed! " |
720 | "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n", |
721 | sc->bge_dev.dv_xname, |
722 | locknum, req + off, bit & 0xFFFF, gnt + off, |
723 | status & 0xFFFF); |
724 | /* Revoke the lock request. */ |
725 | APE_WRITE_4(sc, gnt + off, bit)((sc->bge_apetag)->write_4((sc->bge_apehandle), (gnt + off), (bit))); |
726 | return (EBUSY16); |
727 | } |
728 | |
729 | return (0); |
730 | } |
731 | |
732 | void |
733 | bge_ape_unlock(struct bge_softc *sc, int locknum) |
734 | { |
735 | struct pci_attach_args *pa = &(sc->bge_pa); |
736 | uint32_t bit, gnt; |
737 | int off; |
738 | |
739 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE0x00000002) == 0) |
740 | return; |
741 | |
742 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57610x5761) |
743 | gnt = BGE_APE_LOCK_GRANT0x004C; |
744 | else |
745 | gnt = BGE_APE_PER_LOCK_GRANT0x8420; |
746 | |
747 | off = 4 * locknum; |
748 | |
749 | switch (locknum) { |
750 | case BGE_APE_LOCK_GPIO7: |
751 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57610x5761) |
752 | return; |
753 | if (pa->pa_function == 0) |
754 | bit = BGE_APE_LOCK_GRANT_DRIVER00x00001000; |
755 | else |
756 | bit = (1 << pa->pa_function); |
757 | break; |
758 | case BGE_APE_LOCK_GRC1: |
759 | if (pa->pa_function == 0) |
760 | bit = BGE_APE_LOCK_GRANT_DRIVER00x00001000; |
761 | else |
762 | bit = (1 << pa->pa_function); |
763 | break; |
764 | case BGE_APE_LOCK_MEM4: |
765 | if (pa->pa_function == 0) |
766 | bit = BGE_APE_LOCK_GRANT_DRIVER00x00001000; |
767 | else |
768 | bit = (1 << pa->pa_function); |
769 | break; |
770 | case BGE_APE_LOCK_PHY00: |
771 | case BGE_APE_LOCK_PHY12: |
772 | case BGE_APE_LOCK_PHY23: |
773 | case BGE_APE_LOCK_PHY35: |
774 | bit = BGE_APE_LOCK_GRANT_DRIVER00x00001000; |
775 | break; |
776 | default: |
777 | return; |
778 | } |
779 | |
780 | APE_WRITE_4(sc, gnt + off, bit)((sc->bge_apetag)->write_4((sc->bge_apehandle), (gnt + off), (bit))); |
781 | } |
782 | |
783 | /* |
784 | * Send an event to the APE firmware. |
785 | */ |
786 | void |
787 | bge_ape_send_event(struct bge_softc *sc, uint32_t event) |
788 | { |
789 | uint32_t apedata; |
790 | int i; |
791 | |
792 | /* NCSI does not support APE events. */ |
793 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE0x00000002) == 0) |
794 | return; |
795 | |
796 | /* Wait up to 1ms for APE to service previous event. */ |
797 | for (i = 10; i > 0; i--) { |
798 | if (bge_ape_lock(sc, BGE_APE_LOCK_MEM4) != 0) |
799 | break; |
800 | apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS)((sc->bge_apetag)->read_4((sc->bge_apehandle), (0x4300 ))); |
801 | if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING0x80000000) == 0) { |
802 | APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4300 ), (event | 0x80000000))) |
803 | BGE_APE_EVENT_STATUS_EVENT_PENDING)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4300 ), (event | 0x80000000))); |
804 | bge_ape_unlock(sc, BGE_APE_LOCK_MEM4); |
805 | APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x000C ), (0x00000001))); |
806 | break; |
807 | } |
808 | bge_ape_unlock(sc, BGE_APE_LOCK_MEM4); |
809 | DELAY(100)(*delay_func)(100); |
810 | } |
811 | if (i == 0) { |
812 | printf("%s: APE event 0x%08x send timed out\n", |
813 | sc->bge_dev.dv_xname, event); |
814 | } |
815 | } |
816 | |
817 | void |
818 | bge_ape_driver_state_change(struct bge_softc *sc, int kind) |
819 | { |
820 | uint32_t apedata, event; |
821 | |
822 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE0x00000002) == 0) |
823 | return; |
824 | |
825 | switch (kind) { |
826 | case BGE_RESET_START1: |
827 | /* If this is the first load, clear the load counter. */ |
828 | apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG)((sc->bge_apetag)->read_4((sc->bge_apehandle), (0x4200 ))); |
829 | if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC0x484F5354) |
830 | APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4208 ), (0))); |
831 | else { |
832 | apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT)((sc->bge_apetag)->read_4((sc->bge_apehandle), (0x4208 ))); |
833 | APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4208 ), (++apedata))); |
834 | } |
835 | APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4200 ), (0x484F5354))) |
836 | BGE_APE_HOST_SEG_SIG_MAGIC)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4200 ), (0x484F5354))); |
837 | APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4204 ), (0x00000020))) |
838 | BGE_APE_HOST_SEG_LEN_MAGIC)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4204 ), (0x00000020))); |
839 | |
840 | /* Add some version info if bge(4) supports it. */ |
841 | APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x420C ), ((0xF6000000 | ((1) & 0xffd) << 16 | ((0) & 0xff ) << 8)))) |
842 | BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0))((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x420C ), ((0xF6000000 | ((1) & 0xffd) << 16 | ((0) & 0xff ) << 8)))); |
843 | APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4210 ), (0x00000001))) |
844 | BGE_APE_HOST_BEHAV_NO_PHYLOCK)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4210 ), (0x00000001))); |
845 | APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4214 ), (0))) |
846 | BGE_APE_HOST_HEARTBEAT_INT_DISABLE)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x4214 ), (0))); |
847 | APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x421C ), (0x00000001))) |
848 | BGE_APE_HOST_DRVR_STATE_START)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x421C ), (0x00000001))); |
849 | event = BGE_APE_EVENT_STATUS_STATE_START0x00010000; |
850 | break; |
851 | case BGE_RESET_SHUTDOWN0: |
852 | APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x421C ), (0x00000002))) |
853 | BGE_APE_HOST_DRVR_STATE_UNLOAD)((sc->bge_apetag)->write_4((sc->bge_apehandle), (0x421C ), (0x00000002))); |
854 | event = BGE_APE_EVENT_STATUS_STATE_UNLOAD0x00020000; |
855 | break; |
856 | case BGE_RESET_SUSPEND2: |
857 | event = BGE_APE_EVENT_STATUS_STATE_SUSPEND0x00040000; |
858 | break; |
859 | default: |
860 | return; |
861 | } |
862 | |
863 | bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT0x00000010 | |
864 | BGE_APE_EVENT_STATUS_STATE_CHNGE0x00000500); |
865 | } |
866 | |
867 | |
868 | u_int8_t |
869 | bge_nvram_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest) |
870 | { |
871 | u_int32_t access, byte = 0; |
872 | int i; |
873 | |
874 | /* Lock. */ |
875 | CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x7020) , (0x00000002))); |
876 | for (i = 0; i < 8000; i++) { |
877 | if (CSR_READ_4(sc, BGE_NVRAM_SWARB)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x7020)) ) & BGE_NVRAMSWARB_GNT10x00000200) |
878 | break; |
879 | DELAY(20)(*delay_func)(20); |
880 | } |
881 | if (i == 8000) |
882 | return (1); |
883 | |
884 | /* Enable access. */ |
885 | access = CSR_READ_4(sc, BGE_NVRAM_ACCESS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x7024)) ); |
886 | CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x7024) , (access | 0x00000001))); |
887 | |
888 | CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x700c) , (addr & 0xfffffffc))); |
889 | CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x7000) , ((0x00000080|0x00000100| 0x00000010|0x00000008)))); |
890 | for (i = 0; i < BGE_TIMEOUT100000 * 10; i++) { |
891 | DELAY(10)(*delay_func)(10); |
892 | if (CSR_READ_4(sc, BGE_NVRAM_CMD)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x7000)) ) & BGE_NVRAMCMD_DONE0x00000008) { |
893 | DELAY(10)(*delay_func)(10); |
894 | break; |
895 | } |
896 | } |
897 | |
898 | if (i == BGE_TIMEOUT100000 * 10) { |
899 | printf("%s: nvram read timed out\n", sc->bge_dev.dv_xname); |
900 | return (1); |
901 | } |
902 | |
903 | /* Get result. */ |
904 | byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x7010)) ); |
905 | |
906 | *dest = (swap32(byte)(__uint32_t)(__builtin_constant_p(byte) ? (__uint32_t)(((__uint32_t )(byte) & 0xff) << 24 | ((__uint32_t)(byte) & 0xff00 ) << 8 | ((__uint32_t)(byte) & 0xff0000) >> 8 | ((__uint32_t)(byte) & 0xff000000) >> 24) : __swap32md (byte)) >> ((addr % 4) * 8)) & 0xFF; |
907 | |
908 | /* Disable access. */ |
909 | CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x7024) , (access))); |
910 | |
911 | /* Unlock. */ |
912 | CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x7020) , (0x00000020))); |
913 | CSR_READ_4(sc, BGE_NVRAM_SWARB)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x7020)) ); |
914 | |
915 | return (0); |
916 | } |
917 | |
918 | /* |
919 | * Read a sequence of bytes from NVRAM. |
920 | */ |
921 | |
922 | int |
923 | bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt) |
924 | { |
925 | int err = 0, i; |
926 | u_int8_t byte = 0; |
927 | |
928 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM59060x0c) |
929 | return (1); |
930 | |
931 | for (i = 0; i < cnt; i++) { |
932 | err = bge_nvram_getbyte(sc, off + i, &byte); |
933 | if (err) |
934 | break; |
935 | *(dest + i) = byte; |
936 | } |
937 | |
938 | return (err ? 1 : 0); |
939 | } |
940 | |
941 | /* |
942 | * Read a byte of data stored in the EEPROM at address 'addr.' The |
943 | * BCM570x supports both the traditional bitbang interface and an |
944 | * auto access interface for reading the EEPROM. We use the auto |
945 | * access method. |
946 | */ |
947 | u_int8_t |
948 | bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest) |
949 | { |
950 | int i; |
951 | u_int32_t byte = 0; |
952 | |
953 | /* |
954 | * Enable use of auto EEPROM access so we can avoid |
955 | * having to use the bitbang method. |
956 | */ |
957 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6808) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6808 ))) | (0x01000000))))); |
958 | |
959 | /* Reset the EEPROM, load the clock period. */ |
960 | CSR_WRITE_4(sc, BGE_EE_ADDR,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6838) , (0x20000000|((0x60 & 0x1FF) << 16)))) |
961 | BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL))((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6838) , (0x20000000|((0x60 & 0x1FF) << 16)))); |
962 | DELAY(20)(*delay_func)(20); |
963 | |
964 | /* Issue the read EEPROM command. */ |
965 | CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6838) , ((((0x60 & 0x1FF) << 16)|((0 & 7) << 26 )| 0x02000000|0x80000000|0x40000000) | addr))); |
966 | |
967 | /* Wait for completion */ |
968 | for(i = 0; i < BGE_TIMEOUT100000 * 10; i++) { |
969 | DELAY(10)(*delay_func)(10); |
970 | if (CSR_READ_4(sc, BGE_EE_ADDR)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6838)) ) & BGE_EEADDR_DONE0x40000000) |
971 | break; |
972 | } |
973 | |
974 | if (i == BGE_TIMEOUT100000 * 10) { |
975 | printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname); |
976 | return (1); |
977 | } |
978 | |
979 | /* Get result. */ |
980 | byte = CSR_READ_4(sc, BGE_EE_DATA)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x683C)) ); |
981 | |
982 | *dest = (byte >> ((addr % 4) * 8)) & 0xFF; |
983 | |
984 | return (0); |
985 | } |
986 | |
987 | /* |
988 | * Read a sequence of bytes from the EEPROM. |
989 | */ |
990 | int |
991 | bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) |
992 | { |
993 | int i, error = 0; |
994 | u_int8_t byte = 0; |
995 | |
996 | for (i = 0; i < cnt; i++) { |
997 | error = bge_eeprom_getbyte(sc, off + i, &byte); |
998 | if (error) |
999 | break; |
1000 | *(dest + i) = byte; |
1001 | } |
1002 | |
1003 | return (error ? 1 : 0); |
1004 | } |
1005 | |
1006 | int |
1007 | bge_miibus_readreg(struct device *dev, int phy, int reg) |
1008 | { |
1009 | struct bge_softc *sc = (struct bge_softc *)dev; |
1010 | u_int32_t val, autopoll; |
1011 | int i; |
1012 | |
1013 | if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) |
1014 | return (0); |
1015 | |
1016 | /* Reading with autopolling on may trigger PCI errors */ |
1017 | autopoll = CSR_READ_4(sc, BGE_MI_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0454)) ); |
1018 | if (autopoll & BGE_MIMODE_AUTOPOLL0x00000010) { |
1019 | BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL)((sc)->bge_sts &= ~(0x00000004)); |
1020 | BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0454) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0454 ))) & ~(0x00000010))))); |
1021 | DELAY(80)(*delay_func)(80); |
1022 | } |
1023 | |
1024 | CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|((sc->bge_btag)->write_4((sc->bge_bhandle), (0x044C) , (0x08000000|0x20000000| ((phy & 0x1F) << 21)|((reg & 0x1F) << 16)))) |
1025 | BGE_MIPHY(phy)|BGE_MIREG(reg))((sc->bge_btag)->write_4((sc->bge_bhandle), (0x044C) , (0x08000000|0x20000000| ((phy & 0x1F) << 21)|((reg & 0x1F) << 16)))); |
1026 | CSR_READ_4(sc, BGE_MI_COMM)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x044C)) ); /* force write */ |
1027 | |
1028 | for (i = 0; i < 200; i++) { |
1029 | delay(1)(*delay_func)(1); |
1030 | val = CSR_READ_4(sc, BGE_MI_COMM)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x044C)) ); |
1031 | if (!(val & BGE_MICOMM_BUSY0x20000000)) |
1032 | break; |
1033 | delay(10)(*delay_func)(10); |
1034 | } |
1035 | |
1036 | if (i == 200) { |
1037 | printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); |
1038 | val = 0; |
1039 | goto done; |
1040 | } |
1041 | |
1042 | val = CSR_READ_4(sc, BGE_MI_COMM)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x044C)) ); |
1043 | |
1044 | done: |
1045 | if (autopoll & BGE_MIMODE_AUTOPOLL0x00000010) { |
1046 | BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL)((sc)->bge_sts |= (0x00000004)); |
1047 | BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0454) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0454 ))) | (0x00000010))))); |
1048 | DELAY(80)(*delay_func)(80); |
1049 | } |
1050 | |
1051 | bge_ape_unlock(sc, sc->bge_phy_ape_lock); |
1052 | |
1053 | if (val & BGE_MICOMM_READFAIL0x10000000) |
1054 | return (0); |
1055 | |
1056 | return (val & 0xFFFF); |
1057 | } |
1058 | |
1059 | void |
1060 | bge_miibus_writereg(struct device *dev, int phy, int reg, int val) |
1061 | { |
1062 | struct bge_softc *sc = (struct bge_softc *)dev; |
1063 | u_int32_t autopoll; |
1064 | int i; |
1065 | |
1066 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c && |
1067 | (reg == MII_100T2CR0x09 || reg == BRGPHY_MII_AUXCTL0x18)) |
1068 | return; |
1069 | |
1070 | if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) |
1071 | return; |
1072 | |
1073 | /* Reading with autopolling on may trigger PCI errors */ |
1074 | autopoll = CSR_READ_4(sc, BGE_MI_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0454)) ); |
1075 | if (autopoll & BGE_MIMODE_AUTOPOLL0x00000010) { |
1076 | DELAY(40)(*delay_func)(40); |
1077 | BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL)((sc)->bge_sts &= ~(0x00000004)); |
1078 | BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0454) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0454 ))) & ~(0x00000010))))); |
1079 | DELAY(40)(*delay_func)(40); /* 40 usec is supposed to be adequate */ |
1080 | } |
1081 | |
1082 | CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|((sc->bge_btag)->write_4((sc->bge_bhandle), (0x044C) , (0x04000000|0x20000000| ((phy & 0x1F) << 21)|((reg & 0x1F) << 16)|val))) |
1083 | BGE_MIPHY(phy)|BGE_MIREG(reg)|val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x044C) , (0x04000000|0x20000000| ((phy & 0x1F) << 21)|((reg & 0x1F) << 16)|val))); |
1084 | CSR_READ_4(sc, BGE_MI_COMM)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x044C)) ); /* force write */ |
1085 | |
1086 | for (i = 0; i < 200; i++) { |
1087 | delay(1)(*delay_func)(1); |
1088 | if (!(CSR_READ_4(sc, BGE_MI_COMM)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x044C)) ) & BGE_MICOMM_BUSY0x20000000)) |
1089 | break; |
1090 | delay(10)(*delay_func)(10); |
1091 | } |
1092 | |
1093 | if (autopoll & BGE_MIMODE_AUTOPOLL0x00000010) { |
1094 | BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL)((sc)->bge_sts |= (0x00000004)); |
1095 | BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0454) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0454 ))) | (0x00000010))))); |
1096 | DELAY(40)(*delay_func)(40); |
1097 | } |
1098 | |
1099 | bge_ape_unlock(sc, sc->bge_phy_ape_lock); |
1100 | |
1101 | if (i == 200) { |
1102 | printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); |
1103 | } |
1104 | } |
1105 | |
1106 | void |
1107 | bge_miibus_statchg(struct device *dev) |
1108 | { |
1109 | struct bge_softc *sc = (struct bge_softc *)dev; |
1110 | struct mii_data *mii = &sc->bge_mii; |
1111 | u_int32_t mac_mode, rx_mode, tx_mode; |
1112 | |
1113 | /* |
1114 | * Get flow control negotiation result. |
1115 | */ |
1116 | if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media)((mii->mii_media.ifm_cur->ifm_media) & 0x00000000000000ffULL ) == IFM_AUTO0ULL && |
1117 | (mii->mii_media_active & IFM_ETH_FMASK(0x0000040000000000ULL|0x0000000000020000ULL|0x0000000000040000ULL )) != sc->bge_flowflags) |
1118 | sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK(0x0000040000000000ULL|0x0000000000020000ULL|0x0000000000040000ULL ); |
1119 | |
1120 | if (!BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001)) && |
1121 | mii->mii_media_status & IFM_ACTIVE0x0000000000000002ULL && |
1122 | IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) != IFM_NONE2ULL) |
1123 | BGE_STS_SETBIT(sc, BGE_STS_LINK)((sc)->bge_sts |= (0x00000001)); |
1124 | else if (BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001)) && |
1125 | (!(mii->mii_media_status & IFM_ACTIVE0x0000000000000002ULL) || |
1126 | IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) == IFM_NONE2ULL)) |
1127 | BGE_STS_CLRBIT(sc, BGE_STS_LINK)((sc)->bge_sts &= ~(0x00000001)); |
1128 | |
1129 | if (!BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001))) |
1130 | return; |
1131 | |
1132 | /* Set the port mode (MII/GMII) to match the link speed. */ |
1133 | mac_mode = CSR_READ_4(sc, BGE_MAC_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400)) ) & |
1134 | ~(BGE_MACMODE_PORTMODE0x0000000C | BGE_MACMODE_HALF_DUPLEX0x00000002); |
1135 | tx_mode = CSR_READ_4(sc, BGE_TX_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x045C)) ); |
1136 | rx_mode = CSR_READ_4(sc, BGE_RX_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0468)) ); |
1137 | |
1138 | if (IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) == IFM_1000_T16 || |
1139 | IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) == IFM_1000_SX11) |
1140 | mac_mode |= BGE_PORTMODE_GMII0x00000008; |
1141 | else |
1142 | mac_mode |= BGE_PORTMODE_MII0x00000004; |
1143 | |
1144 | /* Set MAC flow control behavior to match link flow control settings. */ |
1145 | tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE0x00000010; |
1146 | rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE0x00000004; |
1147 | if (mii->mii_media_active & IFM_FDX0x0000010000000000ULL) { |
1148 | if (sc->bge_flowflags & IFM_ETH_TXPAUSE0x0000000000040000ULL) |
1149 | tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE0x00000010; |
1150 | if (sc->bge_flowflags & IFM_ETH_RXPAUSE0x0000000000020000ULL) |
1151 | rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE0x00000004; |
1152 | } else |
1153 | mac_mode |= BGE_MACMODE_HALF_DUPLEX0x00000002; |
1154 | |
1155 | CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0400) , (mac_mode))); |
1156 | DELAY(40)(*delay_func)(40); |
1157 | CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x045C) , (tx_mode))); |
1158 | CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0468) , (rx_mode))); |
1159 | } |
1160 | |
1161 | /* |
1162 | * Initialize a standard receive ring descriptor. |
1163 | */ |
1164 | int |
1165 | bge_newbuf(struct bge_softc *sc, int i) |
1166 | { |
1167 | bus_dmamap_t dmap = sc->bge_cdata.bge_rx_std_map[i]; |
1168 | struct bge_rx_bd *r = &sc->bge_rdata->bge_rx_std_ring[i]; |
1169 | struct mbuf *m; |
1170 | int error; |
1171 | |
1172 | m = MCLGETL(NULL, M_DONTWAIT, sc->bge_rx_std_len)m_clget((((void *)0)), (0x0002), (sc->bge_rx_std_len)); |
1173 | if (!m) |
1174 | return (ENOBUFS55); |
1175 | m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = sc->bge_rx_std_len; |
1176 | if (!(sc->bge_flags & BGE_RX_ALIGNBUG0x00000008)) |
1177 | m_adj(m, ETHER_ALIGN2); |
1178 | |
1179 | error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,(*(sc->bge_dmatag)->_dmamap_load_mbuf)((sc->bge_dmatag ), (dmap), (m), (0x0200|0x0001)) |
1180 | BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc->bge_dmatag)->_dmamap_load_mbuf)((sc->bge_dmatag ), (dmap), (m), (0x0200|0x0001)); |
1181 | if (error) { |
1182 | m_freem(m); |
1183 | return (ENOBUFS55); |
1184 | } |
1185 | |
1186 | bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmap), (0), (dmap->dm_mapsize), (0x01)) |
1187 | BUS_DMASYNC_PREREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmap), (0), (dmap->dm_mapsize), (0x01)); |
1188 | sc->bge_cdata.bge_rx_std_chain[i] = m; |
1189 | |
1190 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x08)) |
1191 | offsetof(struct bge_ring_data, bge_rx_std_ring) +(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x08)) |
1192 | i * sizeof (struct bge_rx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x08)) |
1193 | sizeof (struct bge_rx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x08)) |
1194 | BUS_DMASYNC_POSTWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x08)); |
1195 | |
1196 | BGE_HOSTADDR(r->bge_addr, dmap->dm_segs[0].ds_addr)do { (r->bge_addr).bge_addr_lo = ((u_int64_t) (dmap->dm_segs [0].ds_addr) & 0xffffffff); if (sizeof(bus_addr_t) == 8) ( r->bge_addr).bge_addr_hi = ((u_int64_t) (dmap->dm_segs[ 0].ds_addr) >> 32); else (r->bge_addr).bge_addr_hi = 0; } while(0); |
1197 | r->bge_flags = BGE_RXBDFLAG_END0x0004; |
1198 | r->bge_len = m->m_lenm_hdr.mh_len; |
1199 | r->bge_idx = i; |
1200 | |
1201 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x04)) |
1202 | offsetof(struct bge_ring_data, bge_rx_std_ring) +(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x04)) |
1203 | i * sizeof (struct bge_rx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x04)) |
1204 | sizeof (struct bge_rx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x04)) |
1205 | BUS_DMASYNC_PREWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_std_ring) + i * sizeof (struct bge_rx_bd)), (sizeof ( struct bge_rx_bd)), (0x04)); |
1206 | |
1207 | return (0); |
1208 | } |
1209 | |
1210 | /* |
1211 | * Initialize a Jumbo receive ring descriptor. |
1212 | */ |
1213 | int |
1214 | bge_newbuf_jumbo(struct bge_softc *sc, int i) |
1215 | { |
1216 | bus_dmamap_t dmap = sc->bge_cdata.bge_rx_jumbo_map[i]; |
1217 | struct bge_ext_rx_bd *r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; |
1218 | struct mbuf *m; |
1219 | int error; |
1220 | |
1221 | m = MCLGETL(NULL, M_DONTWAIT, BGE_JLEN)m_clget((((void *)0)), (0x0002), (((9022 + 2) + (sizeof(u_int64_t ) - ((9022 + 2) % sizeof(u_int64_t)))))); |
1222 | if (!m) |
1223 | return (ENOBUFS55); |
1224 | m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = BGE_JUMBO_FRAMELEN9022; |
1225 | if (!(sc->bge_flags & BGE_RX_ALIGNBUG0x00000008)) |
1226 | m_adj(m, ETHER_ALIGN2); |
1227 | |
1228 | error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,(*(sc->bge_dmatag)->_dmamap_load_mbuf)((sc->bge_dmatag ), (dmap), (m), (0x0200|0x0001)) |
1229 | BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc->bge_dmatag)->_dmamap_load_mbuf)((sc->bge_dmatag ), (dmap), (m), (0x0200|0x0001)); |
1230 | if (error) { |
1231 | m_freem(m); |
1232 | return (ENOBUFS55); |
1233 | } |
1234 | |
1235 | bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmap), (0), (dmap->dm_mapsize), (0x01)) |
1236 | BUS_DMASYNC_PREREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmap), (0), (dmap->dm_mapsize), (0x01)); |
1237 | sc->bge_cdata.bge_rx_jumbo_chain[i] = m; |
1238 | |
1239 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x08)) |
1240 | offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x08)) |
1241 | i * sizeof (struct bge_ext_rx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x08)) |
1242 | sizeof (struct bge_ext_rx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x08)) |
1243 | BUS_DMASYNC_POSTWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x08)); |
1244 | |
1245 | /* |
1246 | * Fill in the extended RX buffer descriptor. |
1247 | */ |
1248 | r->bge_bd.bge_flags = BGE_RXBDFLAG_JUMBO_RING0x0020 | BGE_RXBDFLAG_END0x0004; |
1249 | r->bge_bd.bge_idx = i; |
1250 | r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; |
1251 | switch (dmap->dm_nsegs) { |
1252 | case 4: |
1253 | BGE_HOSTADDR(r->bge_addr3, dmap->dm_segs[3].ds_addr)do { (r->bge_addr3).bge_addr_lo = ((u_int64_t) (dmap->dm_segs [3].ds_addr) & 0xffffffff); if (sizeof(bus_addr_t) == 8) ( r->bge_addr3).bge_addr_hi = ((u_int64_t) (dmap->dm_segs [3].ds_addr) >> 32); else (r->bge_addr3).bge_addr_hi = 0; } while(0); |
1254 | r->bge_len3 = dmap->dm_segs[3].ds_len; |
1255 | /* FALLTHROUGH */ |
1256 | case 3: |
1257 | BGE_HOSTADDR(r->bge_addr2, dmap->dm_segs[2].ds_addr)do { (r->bge_addr2).bge_addr_lo = ((u_int64_t) (dmap->dm_segs [2].ds_addr) & 0xffffffff); if (sizeof(bus_addr_t) == 8) ( r->bge_addr2).bge_addr_hi = ((u_int64_t) (dmap->dm_segs [2].ds_addr) >> 32); else (r->bge_addr2).bge_addr_hi = 0; } while(0); |
1258 | r->bge_len2 = dmap->dm_segs[2].ds_len; |
1259 | /* FALLTHROUGH */ |
1260 | case 2: |
1261 | BGE_HOSTADDR(r->bge_addr1, dmap->dm_segs[1].ds_addr)do { (r->bge_addr1).bge_addr_lo = ((u_int64_t) (dmap->dm_segs [1].ds_addr) & 0xffffffff); if (sizeof(bus_addr_t) == 8) ( r->bge_addr1).bge_addr_hi = ((u_int64_t) (dmap->dm_segs [1].ds_addr) >> 32); else (r->bge_addr1).bge_addr_hi = 0; } while(0); |
1262 | r->bge_len1 = dmap->dm_segs[1].ds_len; |
1263 | /* FALLTHROUGH */ |
1264 | case 1: |
1265 | BGE_HOSTADDR(r->bge_bd.bge_addr, dmap->dm_segs[0].ds_addr)do { (r->bge_bd.bge_addr).bge_addr_lo = ((u_int64_t) (dmap ->dm_segs[0].ds_addr) & 0xffffffff); if (sizeof(bus_addr_t ) == 8) (r->bge_bd.bge_addr).bge_addr_hi = ((u_int64_t) (dmap ->dm_segs[0].ds_addr) >> 32); else (r->bge_bd.bge_addr ).bge_addr_hi = 0; } while(0); |
1266 | r->bge_bd.bge_len = dmap->dm_segs[0].ds_len; |
1267 | break; |
1268 | default: |
1269 | panic("%s: %d segments", __func__, dmap->dm_nsegs); |
1270 | } |
1271 | |
1272 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x04)) |
1273 | offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x04)) |
1274 | i * sizeof (struct bge_ext_rx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x04)) |
1275 | sizeof (struct bge_ext_rx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x04)) |
1276 | BUS_DMASYNC_PREWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_rx_jumbo_ring) + i * sizeof (struct bge_ext_rx_bd)), (sizeof (struct bge_ext_rx_bd)), (0x04)); |
1277 | |
1278 | return (0); |
1279 | } |
1280 | |
1281 | int |
1282 | bge_init_rx_ring_std(struct bge_softc *sc) |
1283 | { |
1284 | int i; |
1285 | |
1286 | if (ISSET(sc->bge_flags, BGE_RXRING_VALID)((sc->bge_flags) & (0x00000002))) |
1287 | return (0); |
1288 | |
1289 | for (i = 0; i < BGE_STD_RX_RING_CNT512; i++) { |
1290 | if (bus_dmamap_create(sc->bge_dmatag, sc->bge_rx_std_len, 1,(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (sc->bge_rx_std_len), (1), (sc->bge_rx_std_len), (0), (0x0001 | 0x0002), (&sc->bge_cdata.bge_rx_std_map[i]) ) |
1291 | sc->bge_rx_std_len, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (sc->bge_rx_std_len), (1), (sc->bge_rx_std_len), (0), (0x0001 | 0x0002), (&sc->bge_cdata.bge_rx_std_map[i]) ) |
1292 | &sc->bge_cdata.bge_rx_std_map[i])(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (sc->bge_rx_std_len), (1), (sc->bge_rx_std_len), (0), (0x0001 | 0x0002), (&sc->bge_cdata.bge_rx_std_map[i]) ) != 0) { |
1293 | printf("%s: unable to create dmamap for slot %d\n", |
1294 | sc->bge_dev.dv_xname, i); |
1295 | goto uncreate; |
1296 | } |
1297 | bzero(&sc->bge_rdata->bge_rx_std_ring[i],__builtin_bzero((&sc->bge_rdata->bge_rx_std_ring[i] ), (sizeof(struct bge_rx_bd))) |
1298 | sizeof(struct bge_rx_bd))__builtin_bzero((&sc->bge_rdata->bge_rx_std_ring[i] ), (sizeof(struct bge_rx_bd))); |
1299 | } |
1300 | |
1301 | sc->bge_std = BGE_STD_RX_RING_CNT512 - 1; |
1302 | |
1303 | /* lwm must be greater than the replenish threshold */ |
1304 | if_rxr_init(&sc->bge_std_ring, 17, BGE_STD_RX_RING_CNT512); |
1305 | bge_fill_rx_ring_std(sc); |
1306 | |
1307 | SET(sc->bge_flags, BGE_RXRING_VALID)((sc->bge_flags) |= (0x00000002)); |
1308 | |
1309 | return (0); |
1310 | |
1311 | uncreate: |
1312 | while (--i) { |
1313 | bus_dmamap_destroy(sc->bge_dmatag,(*(sc->bge_dmatag)->_dmamap_destroy)((sc->bge_dmatag ), (sc->bge_cdata.bge_rx_std_map[i])) |
1314 | sc->bge_cdata.bge_rx_std_map[i])(*(sc->bge_dmatag)->_dmamap_destroy)((sc->bge_dmatag ), (sc->bge_cdata.bge_rx_std_map[i])); |
1315 | } |
1316 | return (1); |
1317 | } |
1318 | |
1319 | /* |
1320 | * When the refill timeout for a ring is active, that ring is so empty |
1321 | * that no more packets can be received on it, so the interrupt handler |
1322 | * will not attempt to refill it, meaning we don't need to protect against |
1323 | * interrupts here. |
1324 | */ |
1325 | |
1326 | void |
1327 | bge_rxtick(void *arg) |
1328 | { |
1329 | struct bge_softc *sc = arg; |
1330 | |
1331 | if (ISSET(sc->bge_flags, BGE_RXRING_VALID)((sc->bge_flags) & (0x00000002)) && |
1332 | if_rxr_inuse(&sc->bge_std_ring)((&sc->bge_std_ring)->rxr_alive) <= 8) |
1333 | bge_fill_rx_ring_std(sc); |
1334 | } |
1335 | |
1336 | void |
1337 | bge_rxtick_jumbo(void *arg) |
1338 | { |
1339 | struct bge_softc *sc = arg; |
1340 | |
1341 | if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)((sc->bge_flags) & (0x00000004)) && |
1342 | if_rxr_inuse(&sc->bge_jumbo_ring)((&sc->bge_jumbo_ring)->rxr_alive) <= 8) |
1343 | bge_fill_rx_ring_jumbo(sc); |
1344 | } |
1345 | |
1346 | void |
1347 | bge_fill_rx_ring_std(struct bge_softc *sc) |
1348 | { |
1349 | int i; |
1350 | int post = 0; |
1351 | u_int slots; |
1352 | |
1353 | i = sc->bge_std; |
1354 | for (slots = if_rxr_get(&sc->bge_std_ring, BGE_STD_RX_RING_CNT512); |
1355 | slots > 0; slots--) { |
1356 | BGE_INC(i, BGE_STD_RX_RING_CNT)(i) = (i + 1) % 512; |
1357 | |
1358 | if (bge_newbuf(sc, i) != 0) |
1359 | break; |
1360 | |
1361 | sc->bge_std = i; |
1362 | post = 1; |
1363 | } |
1364 | if_rxr_put(&sc->bge_std_ring, slots)do { (&sc->bge_std_ring)->rxr_alive -= (slots); } while (0); |
1365 | |
1366 | if (post) |
1367 | bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO0x026C, sc->bge_std); |
1368 | |
1369 | /* |
1370 | * bge always needs more than 8 packets on the ring. if we cant do |
1371 | * that now, then try again later. |
1372 | */ |
1373 | if (if_rxr_inuse(&sc->bge_std_ring)((&sc->bge_std_ring)->rxr_alive) <= 8) |
1374 | timeout_add(&sc->bge_rxtimeout, 1); |
1375 | } |
1376 | |
1377 | void |
1378 | bge_free_rx_ring_std(struct bge_softc *sc) |
1379 | { |
1380 | bus_dmamap_t dmap; |
1381 | struct mbuf *m; |
1382 | int i; |
1383 | |
1384 | if (!ISSET(sc->bge_flags, BGE_RXRING_VALID)((sc->bge_flags) & (0x00000002))) |
1385 | return; |
1386 | |
1387 | for (i = 0; i < BGE_STD_RX_RING_CNT512; i++) { |
1388 | dmap = sc->bge_cdata.bge_rx_std_map[i]; |
1389 | m = sc->bge_cdata.bge_rx_std_chain[i]; |
1390 | if (m != NULL((void *)0)) { |
1391 | bus_dmamap_sync(sc->bge_dmatag, dmap, 0,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmap), (0), (dmap->dm_mapsize), (0x02)) |
1392 | dmap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmap), (0), (dmap->dm_mapsize), (0x02)); |
1393 | bus_dmamap_unload(sc->bge_dmatag, dmap)(*(sc->bge_dmatag)->_dmamap_unload)((sc->bge_dmatag) , (dmap)); |
1394 | m_freem(m); |
1395 | sc->bge_cdata.bge_rx_std_chain[i] = NULL((void *)0); |
1396 | } |
1397 | bus_dmamap_destroy(sc->bge_dmatag, dmap)(*(sc->bge_dmatag)->_dmamap_destroy)((sc->bge_dmatag ), (dmap)); |
1398 | sc->bge_cdata.bge_rx_std_map[i] = NULL((void *)0); |
1399 | bzero(&sc->bge_rdata->bge_rx_std_ring[i],__builtin_bzero((&sc->bge_rdata->bge_rx_std_ring[i] ), (sizeof(struct bge_rx_bd))) |
1400 | sizeof(struct bge_rx_bd))__builtin_bzero((&sc->bge_rdata->bge_rx_std_ring[i] ), (sizeof(struct bge_rx_bd))); |
1401 | } |
1402 | |
1403 | CLR(sc->bge_flags, BGE_RXRING_VALID)((sc->bge_flags) &= ~(0x00000002)); |
1404 | } |
1405 | |
1406 | int |
1407 | bge_init_rx_ring_jumbo(struct bge_softc *sc) |
1408 | { |
1409 | volatile struct bge_rcb *rcb; |
1410 | int i; |
1411 | |
1412 | if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)((sc->bge_flags) & (0x00000004))) |
1413 | return (0); |
1414 | |
1415 | for (i = 0; i < BGE_JUMBO_RX_RING_CNT256; i++) { |
1416 | if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN, 4, BGE_JLEN, 0,(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (((9022 + 2) + (sizeof(u_int64_t) - ((9022 + 2) % sizeof(u_int64_t ))))), (4), (((9022 + 2) + (sizeof(u_int64_t) - ((9022 + 2) % sizeof(u_int64_t))))), (0), (0x0001 | 0x0002), (&sc-> bge_cdata.bge_rx_jumbo_map[i])) |
1417 | BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (((9022 + 2) + (sizeof(u_int64_t) - ((9022 + 2) % sizeof(u_int64_t ))))), (4), (((9022 + 2) + (sizeof(u_int64_t) - ((9022 + 2) % sizeof(u_int64_t))))), (0), (0x0001 | 0x0002), (&sc-> bge_cdata.bge_rx_jumbo_map[i])) |
1418 | &sc->bge_cdata.bge_rx_jumbo_map[i])(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (((9022 + 2) + (sizeof(u_int64_t) - ((9022 + 2) % sizeof(u_int64_t ))))), (4), (((9022 + 2) + (sizeof(u_int64_t) - ((9022 + 2) % sizeof(u_int64_t))))), (0), (0x0001 | 0x0002), (&sc-> bge_cdata.bge_rx_jumbo_map[i])) != 0) { |
1419 | printf("%s: unable to create dmamap for slot %d\n", |
1420 | sc->bge_dev.dv_xname, i); |
1421 | goto uncreate; |
1422 | } |
1423 | bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],__builtin_bzero((&sc->bge_rdata->bge_rx_jumbo_ring[ i]), (sizeof(struct bge_ext_rx_bd))) |
1424 | sizeof(struct bge_ext_rx_bd))__builtin_bzero((&sc->bge_rdata->bge_rx_jumbo_ring[ i]), (sizeof(struct bge_ext_rx_bd))); |
1425 | } |
1426 | |
1427 | sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT256 - 1; |
1428 | |
1429 | /* lwm must be greater than the replenish threshold */ |
1430 | if_rxr_init(&sc->bge_jumbo_ring, 17, BGE_JUMBO_RX_RING_CNT256); |
1431 | bge_fill_rx_ring_jumbo(sc); |
1432 | |
1433 | SET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)((sc->bge_flags) |= (0x00000004)); |
1434 | |
1435 | rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; |
1436 | rcb->bge_maxlen_flags = |
1437 | BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD)((0) << 16 | (0x0001)); |
1438 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2448) , (rcb->bge_maxlen_flags))); |
1439 | |
1440 | return (0); |
1441 | |
1442 | uncreate: |
1443 | while (--i) { |
1444 | bus_dmamap_destroy(sc->bge_dmatag,(*(sc->bge_dmatag)->_dmamap_destroy)((sc->bge_dmatag ), (sc->bge_cdata.bge_rx_jumbo_map[i])) |
1445 | sc->bge_cdata.bge_rx_jumbo_map[i])(*(sc->bge_dmatag)->_dmamap_destroy)((sc->bge_dmatag ), (sc->bge_cdata.bge_rx_jumbo_map[i])); |
1446 | } |
1447 | return (1); |
1448 | } |
1449 | |
1450 | void |
1451 | bge_fill_rx_ring_jumbo(struct bge_softc *sc) |
1452 | { |
1453 | int i; |
1454 | int post = 0; |
1455 | u_int slots; |
1456 | |
1457 | i = sc->bge_jumbo; |
1458 | for (slots = if_rxr_get(&sc->bge_jumbo_ring, BGE_JUMBO_RX_RING_CNT256); |
1459 | slots > 0; slots--) { |
1460 | BGE_INC(i, BGE_JUMBO_RX_RING_CNT)(i) = (i + 1) % 256; |
1461 | |
1462 | if (bge_newbuf_jumbo(sc, i) != 0) |
1463 | break; |
1464 | |
1465 | sc->bge_jumbo = i; |
1466 | post = 1; |
1467 | } |
1468 | if_rxr_put(&sc->bge_jumbo_ring, slots)do { (&sc->bge_jumbo_ring)->rxr_alive -= (slots); } while (0); |
1469 | |
1470 | if (post) |
1471 | bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO0x0274, sc->bge_jumbo); |
1472 | |
1473 | /* |
1474 | * bge always needs more than 8 packets on the ring. if we cant do |
1475 | * that now, then try again later. |
1476 | */ |
1477 | if (if_rxr_inuse(&sc->bge_jumbo_ring)((&sc->bge_jumbo_ring)->rxr_alive) <= 8) |
1478 | timeout_add(&sc->bge_rxtimeout_jumbo, 1); |
1479 | } |
1480 | |
1481 | void |
1482 | bge_free_rx_ring_jumbo(struct bge_softc *sc) |
1483 | { |
1484 | bus_dmamap_t dmap; |
1485 | struct mbuf *m; |
1486 | int i; |
1487 | |
1488 | if (!ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)((sc->bge_flags) & (0x00000004))) |
1489 | return; |
1490 | |
1491 | for (i = 0; i < BGE_JUMBO_RX_RING_CNT256; i++) { |
1492 | dmap = sc->bge_cdata.bge_rx_jumbo_map[i]; |
1493 | m = sc->bge_cdata.bge_rx_jumbo_chain[i]; |
1494 | if (m != NULL((void *)0)) { |
1495 | bus_dmamap_sync(sc->bge_dmatag, dmap, 0,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmap), (0), (dmap->dm_mapsize), (0x02)) |
1496 | dmap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmap), (0), (dmap->dm_mapsize), (0x02)); |
1497 | bus_dmamap_unload(sc->bge_dmatag, dmap)(*(sc->bge_dmatag)->_dmamap_unload)((sc->bge_dmatag) , (dmap)); |
1498 | m_freem(m); |
1499 | sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL((void *)0); |
1500 | } |
1501 | bus_dmamap_destroy(sc->bge_dmatag, dmap)(*(sc->bge_dmatag)->_dmamap_destroy)((sc->bge_dmatag ), (dmap)); |
1502 | sc->bge_cdata.bge_rx_jumbo_map[i] = NULL((void *)0); |
1503 | bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],__builtin_bzero((&sc->bge_rdata->bge_rx_jumbo_ring[ i]), (sizeof(struct bge_ext_rx_bd))) |
1504 | sizeof(struct bge_ext_rx_bd))__builtin_bzero((&sc->bge_rdata->bge_rx_jumbo_ring[ i]), (sizeof(struct bge_ext_rx_bd))); |
1505 | } |
1506 | |
1507 | CLR(sc->bge_flags, BGE_JUMBO_RXRING_VALID)((sc->bge_flags) &= ~(0x00000004)); |
1508 | } |
1509 | |
1510 | void |
1511 | bge_free_tx_ring(struct bge_softc *sc) |
1512 | { |
1513 | int i; |
1514 | |
1515 | if (!(sc->bge_flags & BGE_TXRING_VALID0x00000001)) |
1516 | return; |
1517 | |
1518 | for (i = 0; i < BGE_TX_RING_CNT512; i++) { |
1519 | if (sc->bge_cdata.bge_tx_chain[i] != NULL((void *)0)) { |
1520 | m_freem(sc->bge_cdata.bge_tx_chain[i]); |
1521 | sc->bge_cdata.bge_tx_chain[i] = NULL((void *)0); |
1522 | sc->bge_cdata.bge_tx_map[i] = NULL((void *)0); |
1523 | } |
1524 | bzero(&sc->bge_rdata->bge_tx_ring[i],__builtin_bzero((&sc->bge_rdata->bge_tx_ring[i]), ( sizeof(struct bge_tx_bd))) |
1525 | sizeof(struct bge_tx_bd))__builtin_bzero((&sc->bge_rdata->bge_tx_ring[i]), ( sizeof(struct bge_tx_bd))); |
1526 | |
1527 | bus_dmamap_destroy(sc->bge_dmatag, sc->bge_txdma[i])(*(sc->bge_dmatag)->_dmamap_destroy)((sc->bge_dmatag ), (sc->bge_txdma[i])); |
1528 | } |
1529 | |
1530 | sc->bge_flags &= ~BGE_TXRING_VALID0x00000001; |
1531 | } |
1532 | |
1533 | int |
1534 | bge_init_tx_ring(struct bge_softc *sc) |
1535 | { |
1536 | int i; |
1537 | bus_size_t txsegsz, txmaxsegsz; |
1538 | |
1539 | if (sc->bge_flags & BGE_TXRING_VALID0x00000001) |
1540 | return (0); |
1541 | |
1542 | sc->bge_txcnt = 0; |
1543 | sc->bge_tx_saved_considx = 0; |
1544 | |
1545 | /* Initialize transmit producer index for host-memory send ring. */ |
1546 | sc->bge_tx_prodidx = 0; |
1547 | bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO0x0304, sc->bge_tx_prodidx); |
1548 | if (BGE_CHIPREV(sc->bge_chipid)((sc->bge_chipid) >> 8) == BGE_CHIPREV_5700_BX0x71) |
1549 | bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO0x0304, sc->bge_tx_prodidx); |
1550 | |
1551 | /* NIC-memory send ring not used; initialize to zero. */ |
1552 | bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO0x0384, 0); |
1553 | if (BGE_CHIPREV(sc->bge_chipid)((sc->bge_chipid) >> 8) == BGE_CHIPREV_5700_BX0x71) |
1554 | bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO0x0384, 0); |
1555 | |
1556 | if (BGE_IS_JUMBO_CAPABLE(sc)((sc)->bge_flags & 0x00000100)) { |
1557 | txsegsz = 4096; |
1558 | txmaxsegsz = BGE_JLEN((9022 + 2) + (sizeof(u_int64_t) - ((9022 + 2) % sizeof(u_int64_t )))); |
1559 | } else { |
1560 | txsegsz = MCLBYTES(1 << 11); |
1561 | txmaxsegsz = MCLBYTES(1 << 11); |
1562 | } |
1563 | |
1564 | for (i = 0; i < BGE_TX_RING_CNT512; i++) { |
1565 | if (bus_dmamap_create(sc->bge_dmatag, txmaxsegsz,(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (txmaxsegsz), (30), (txsegsz), (0), (0x0001), (&sc-> bge_txdma[i])) |
1566 | BGE_NTXSEG, txsegsz, 0, BUS_DMA_NOWAIT, &sc->bge_txdma[i])(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (txmaxsegsz), (30), (txsegsz), (0), (0x0001), (&sc-> bge_txdma[i]))) |
1567 | return (ENOBUFS55); |
1568 | } |
1569 | |
1570 | sc->bge_flags |= BGE_TXRING_VALID0x00000001; |
1571 | |
1572 | return (0); |
1573 | } |
1574 | |
1575 | void |
1576 | bge_iff(struct bge_softc *sc) |
1577 | { |
1578 | struct arpcom *ac = &sc->arpcom; |
1579 | struct ifnet *ifp = &ac->ac_if; |
1580 | struct ether_multi *enm; |
1581 | struct ether_multistep step; |
1582 | u_int8_t hashes[16]; |
1583 | u_int32_t h, rxmode; |
1584 | |
1585 | /* First, zot all the existing filters. */ |
1586 | rxmode = CSR_READ_4(sc, BGE_RX_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0468)) ) & ~BGE_RXMODE_RX_PROMISC0x00000100; |
1587 | ifp->if_flags &= ~IFF_ALLMULTI0x200; |
1588 | memset(hashes, 0x00, sizeof(hashes))__builtin_memset((hashes), (0x00), (sizeof(hashes))); |
1589 | |
1590 | if (ifp->if_flags & IFF_PROMISC0x100) { |
1591 | ifp->if_flags |= IFF_ALLMULTI0x200; |
1592 | rxmode |= BGE_RXMODE_RX_PROMISC0x00000100; |
1593 | } else if (ac->ac_multirangecnt > 0) { |
1594 | ifp->if_flags |= IFF_ALLMULTI0x200; |
1595 | memset(hashes, 0xff, sizeof(hashes))__builtin_memset((hashes), (0xff), (sizeof(hashes))); |
1596 | } else { |
1597 | ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first ); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step )).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while ( 0); |
1598 | while (enm != NULL((void *)0)) { |
1599 | h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN6); |
1600 | |
1601 | setbit(hashes, h & 0x7F)((hashes)[(h & 0x7F)>>3] |= 1<<((h & 0x7F )&(8 -1))); |
1602 | |
1603 | ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm = (((enm))->enm_list.le_next); } while ( 0); |
1604 | } |
1605 | } |
1606 | |
1607 | bus_space_write_raw_region_4(sc->bge_btag, sc->bge_bhandle, BGE_MAR0,((sc->bge_btag)->write_region_4((sc->bge_bhandle), ( 0x0470), (const u_int32_t *)(hashes), (sizeof(hashes)) >> 2)) |
1608 | hashes, sizeof(hashes))((sc->bge_btag)->write_region_4((sc->bge_bhandle), ( 0x0470), (const u_int32_t *)(hashes), (sizeof(hashes)) >> 2)); |
1609 | CSR_WRITE_4(sc, BGE_RX_MODE, rxmode)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0468) , (rxmode))); |
1610 | } |
1611 | |
1612 | void |
1613 | bge_sig_pre_reset(struct bge_softc *sc, int type) |
1614 | { |
1615 | /* no bge_asf_mode. */ |
1616 | |
1617 | if (type == BGE_RESET_START1 || type == BGE_RESET_SUSPEND2) |
1618 | bge_ape_driver_state_change(sc, type); |
1619 | } |
1620 | |
1621 | void |
1622 | bge_sig_post_reset(struct bge_softc *sc, int type) |
1623 | { |
1624 | /* no bge_asf_mode. */ |
1625 | |
1626 | if (type == BGE_RESET_SHUTDOWN0) |
1627 | bge_ape_driver_state_change(sc, type); |
1628 | } |
1629 | |
1630 | void |
1631 | bge_sig_legacy(struct bge_softc *sc, int type) |
1632 | { |
1633 | /* no bge_asf_mode. */ |
1634 | } |
1635 | |
1636 | void |
1637 | bge_stop_fw(struct bge_softc *sc, int type) |
1638 | { |
1639 | /* no bge_asf_mode. */ |
1640 | } |
1641 | |
1642 | u_int32_t |
1643 | bge_dma_swap_options(struct bge_softc *sc) |
1644 | { |
1645 | u_int32_t dma_options = BGE_DMA_SWAP_OPTIONS0x00000004| 0x00000010|0x00000020; |
1646 | |
1647 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720) { |
1648 | dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA0x00000040 | |
1649 | BGE_MODECTL_WORDSWAP_B2HRX_DATA0x00000080 | BGE_MODECTL_B2HRX_ENABLE0x00008000 | |
1650 | BGE_MODECTL_HTX2B_ENABLE0x00040000; |
1651 | } |
1652 | |
1653 | return (dma_options); |
1654 | } |
1655 | |
1656 | int |
1657 | bge_phy_addr(struct bge_softc *sc) |
1658 | { |
1659 | struct pci_attach_args *pa = &(sc->bge_pa); |
1660 | int phy_addr = 1; |
1661 | |
1662 | switch (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12)) { |
1663 | case BGE_ASICREV_BCM57170x5717: |
1664 | case BGE_ASICREV_BCM57190x5719: |
1665 | case BGE_ASICREV_BCM57200x5720: |
1666 | phy_addr = pa->pa_function; |
1667 | if (sc->bge_chipid != BGE_CHIPID_BCM5717_A00x05717000) { |
1668 | phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x05B4)) ) & |
1669 | BGE_SGDIGSTS_IS_SERDES0x00000100) ? 8 : 1; |
1670 | } else { |
1671 | phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x3664)) ) & |
1672 | BGE_CPMU_PHY_STRAP_IS_SERDES0x00000020) ? 8 : 1; |
1673 | } |
1674 | } |
1675 | |
1676 | return (phy_addr); |
1677 | } |
1678 | |
1679 | /* |
1680 | * Do endian, PCI and DMA initialization. |
1681 | */ |
1682 | void |
1683 | bge_chipinit(struct bge_softc *sc) |
1684 | { |
1685 | struct pci_attach_args *pa = &(sc->bge_pa); |
1686 | u_int32_t dma_rw_ctl, misc_ctl, mode_ctl; |
1687 | int i; |
1688 | |
1689 | /* Set endianness before we access any non-PCI registers. */ |
1690 | misc_ctl = BGE_INIT(0x00000008|0x00000001| 0x00000002|0x00000080); |
1691 | if (sc->bge_flags & BGE_TAGGED_STATUS0x00200000) |
1692 | misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS0x00000200; |
1693 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL0x68, |
1694 | misc_ctl); |
1695 | |
1696 | /* |
1697 | * Clear the MAC statistics block in the NIC's |
1698 | * internal memory. |
1699 | */ |
1700 | for (i = BGE_STATS_BLOCK0x00000300; |
1701 | i < BGE_STATS_BLOCK_END0x00000AFF + 1; i += sizeof(u_int32_t)) |
1702 | BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0)do { pci_conf_write(pa->pa_pc, pa->pa_tag, 0x7C, (0xFFFF8000 & i)); ((sc->bge_btag)->write_4((sc->bge_bhandle ), (0x00008000 + (i & 0x7FFF)), (0))); } while(0); |
1703 | |
1704 | for (i = BGE_STATUS_BLOCK0x00000B00; |
1705 | i < BGE_STATUS_BLOCK_END0x00000B4F + 1; i += sizeof(u_int32_t)) |
1706 | BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0)do { pci_conf_write(pa->pa_pc, pa->pa_tag, 0x7C, (0xFFFF8000 & i)); ((sc->bge_btag)->write_4((sc->bge_bhandle ), (0x00008000 + (i & 0x7FFF)), (0))); } while(0); |
1707 | |
1708 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM577650x57785 || |
1709 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM577660x57766) { |
1710 | /* |
1711 | * For the 57766 and non Ax versions of 57765, bootcode |
1712 | * needs to setup the PCIE Fast Training Sequence (FTS) |
1713 | * value to prevent transmit hangs. |
1714 | */ |
1715 | if (BGE_CHIPREV(sc->bge_chipid)((sc->bge_chipid) >> 8) != BGE_CHIPREV_57765_AX0x577850) { |
1716 | CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3668) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x3668 ))) | 0x00040000))) |
1717 | CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL) |((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3668) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x3668 ))) | 0x00040000))) |
1718 | BGE_CPMU_PADRNG_CTL_RDIV2)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3668) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x3668 ))) | 0x00040000))); |
1719 | } |
1720 | } |
1721 | |
1722 | /* |
1723 | * Set up the PCI DMA control register. |
1724 | */ |
1725 | dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6)((6) << 24) | |
1726 | BGE_PCIDMARWCTL_WR_CMD_SHIFT(7)((7) << 28); |
1727 | |
1728 | if (sc->bge_flags & BGE_PCIE0x00000020) { |
1729 | if (sc->bge_mps >= 256) |
1730 | dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7)((7) << 19); |
1731 | else |
1732 | dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3)((3) << 19); |
1733 | } else if (sc->bge_flags & BGE_PCIX0x00000010) { |
1734 | /* PCI-X bus */ |
1735 | if (BGE_IS_5714_FAMILY(sc)((sc)->bge_flags & 0x00008000)) { |
1736 | /* 256 bytes for read and write. */ |
1737 | dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2)((2) << 16) | |
1738 | BGE_PCIDMARWCTL_WR_WAT_SHIFT(2)((2) << 19); |
1739 | |
1740 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57800x08) |
1741 | dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL0x00004000; |
1742 | else |
1743 | dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL0x00008000; |
1744 | } else if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57040x02) { |
1745 | /* 1536 bytes for read, 384 bytes for write. */ |
1746 | dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7)((7) << 16) | |
1747 | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3)((3) << 19); |
1748 | } else { |
1749 | /* 384 bytes for read and write. */ |
1750 | dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3)((3) << 16) | |
1751 | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3)((3) << 19) | |
1752 | (0x0F); |
1753 | } |
1754 | |
1755 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57030x01 || |
1756 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57040x02) { |
1757 | u_int32_t tmp; |
1758 | |
1759 | /* Set ONEDMA_ATONCE for hardware workaround. */ |
1760 | tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x74))) & 0x1f; |
1761 | if (tmp == 6 || tmp == 7) |
1762 | dma_rw_ctl |= |
1763 | BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL0x00004000; |
1764 | |
1765 | /* Set PCI-X DMA write workaround. */ |
1766 | dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE0x00800000; |
1767 | } |
1768 | } else { |
1769 | /* Conventional PCI bus: 256 bytes for read and write. */ |
1770 | dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7)((7) << 16) | |
1771 | BGE_PCIDMARWCTL_WR_WAT_SHIFT(7)((7) << 19); |
1772 | |
1773 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57050x03 && |
1774 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57500x04) |
1775 | dma_rw_ctl |= 0x0F; |
1776 | } |
1777 | |
1778 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57000x07 || |
1779 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57010x00) |
1780 | dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM0x00400000 | |
1781 | BGE_PCIDMARWCTL_ASRT_ALL_BE0x00800000; |
1782 | |
1783 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57030x01 || |
1784 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57040x02) |
1785 | dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA0x000000FF; |
1786 | |
1787 | if (BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000)) { |
1788 | dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT0x00000001; |
1789 | if (sc->bge_chipid == BGE_CHIPID_BCM57765_A00x57785000) |
1790 | dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK0x00000380; |
1791 | |
1792 | /* |
1793 | * Enable HW workaround for controllers that misinterpret |
1794 | * a status tag update and leave interrupts permanently |
1795 | * disabled. |
1796 | */ |
1797 | if (!BGE_IS_57765_PLUS(sc)((sc)->bge_flags & 0x00040000) && |
1798 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57170x5717 && |
1799 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57620x5762) |
1800 | dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA0x00000080; |
1801 | } |
1802 | |
1803 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL0x6C, dma_rw_ctl); |
1804 | |
1805 | /* |
1806 | * Set up general mode register. |
1807 | */ |
1808 | mode_ctl = bge_dma_swap_options(sc); |
1809 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720 || |
1810 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762) { |
1811 | /* Retain Host-2-BMC settings written by APE firmware. */ |
1812 | mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6800)) ) & |
1813 | (BGE_MODECTL_BYTESWAP_B2HRX_DATA0x00000040 | |
1814 | BGE_MODECTL_WORDSWAP_B2HRX_DATA0x00000080 | |
1815 | BGE_MODECTL_B2HRX_ENABLE0x00008000 | BGE_MODECTL_HTX2B_ENABLE0x00040000); |
1816 | } |
1817 | mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR0x04000000 | BGE_MODECTL_HOST_SEND_BDS0x00020000 | |
1818 | BGE_MODECTL_TX_NO_PHDR_CSUM0x00100000; |
1819 | |
1820 | /* |
1821 | * BCM5701 B5 have a bug causing data corruption when using |
1822 | * 64-bit DMA reads, which can be terminated early and then |
1823 | * completed later as 32-bit accesses, in combination with |
1824 | * certain bridges. |
1825 | */ |
1826 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57010x00 && |
1827 | sc->bge_chipid == BGE_CHIPID_BCM5701_B50x0105) |
1828 | mode_ctl |= BGE_MODECTL_FORCE_PCI320x00008000; |
1829 | |
1830 | CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6800) , (mode_ctl))); |
1831 | |
1832 | /* |
1833 | * Disable memory write invalidate. Apparently it is not supported |
1834 | * properly by these devices. |
1835 | */ |
1836 | PCI_CLRBIT(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,pci_conf_write(pa->pa_pc, pa->pa_tag, 0x04, (pci_conf_read (pa->pa_pc, pa->pa_tag, 0x04) & ~(0x00000010))) |
1837 | PCI_COMMAND_INVALIDATE_ENABLE)pci_conf_write(pa->pa_pc, pa->pa_tag, 0x04, (pci_conf_read (pa->pa_pc, pa->pa_tag, 0x04) & ~(0x00000010))); |
1838 | |
1839 | #ifdef __brokenalpha__ |
1840 | /* |
1841 | * Must ensure that we do not cross an 8K (bytes) boundary |
1842 | * for DMA reads. Our highest limit is 1K bytes. This is a |
1843 | * restriction on some ALPHA platforms with early revision |
1844 | * 21174 PCI chipsets, such as the AlphaPC 164lx |
1845 | */ |
1846 | PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,pci_conf_write(pa->pa_pc, pa->pa_tag, 0x6C, (pci_conf_read (pa->pa_pc, pa->pa_tag, 0x6C) | (BGE_PCI_READ_BNDRY_1024 ))) |
1847 | BGE_PCI_READ_BNDRY_1024)pci_conf_write(pa->pa_pc, pa->pa_tag, 0x6C, (pci_conf_read (pa->pa_pc, pa->pa_tag, 0x6C) | (BGE_PCI_READ_BNDRY_1024 ))); |
1848 | #endif |
1849 | |
1850 | /* Set the timer prescaler (always 66MHz) */ |
1851 | CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6804) , ((0x41 << 1)))); |
1852 | |
1853 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) { |
1854 | DELAY(40)(*delay_func)(40); /* XXX */ |
1855 | |
1856 | /* Put PHY into ready state */ |
1857 | BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6804) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6804 ))) & ~(0x00200000))))); |
1858 | CSR_READ_4(sc, BGE_MISC_CFG)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6804)) ); /* Flush */ |
1859 | DELAY(40)(*delay_func)(40); |
1860 | } |
1861 | } |
1862 | |
1863 | int |
1864 | bge_blockinit(struct bge_softc *sc) |
1865 | { |
1866 | volatile struct bge_rcb *rcb; |
1867 | vaddr_t rcb_addr; |
1868 | bge_hostaddr taddr; |
1869 | u_int32_t dmactl, rdmareg, mimode, val; |
1870 | int i, limit; |
1871 | |
1872 | /* |
1873 | * Initialize the memory window pointer register so that |
1874 | * we can access the first 32K of internal NIC RAM. This will |
1875 | * allow us to set up the TX send ring RCBs and the RX return |
1876 | * ring RCBs, plus other things which live in NIC memory. |
1877 | */ |
1878 | CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x7C), ( 0))); |
1879 | |
1880 | /* Configure mbuf memory pool */ |
1881 | if (!BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000)) { |
1882 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4408) , (0x00008000))) |
1883 | BGE_BUFFPOOL_1)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4408) , (0x00008000))); |
1884 | |
1885 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57040x02) |
1886 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x440C) , (0x10000))); |
1887 | else |
1888 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x440C) , (0x18000))); |
1889 | |
1890 | /* Configure DMA resource pool */ |
1891 | CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x442C) , (0x00002000))) |
1892 | BGE_DMA_DESCRIPTORS)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x442C) , (0x00002000))); |
1893 | CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4430) , (0x2000))); |
1894 | } |
1895 | |
1896 | /* Configure mbuf pool watermarks */ |
1897 | /* new Broadcom docs strongly recommend these: */ |
1898 | if (BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000)) { |
1899 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4410) , (0x0))); |
1900 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4414) , (0x2a))); |
1901 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4418) , (0xa0))); |
1902 | } else if (BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000)) { |
1903 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4410) , (0x0))); |
1904 | |
1905 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) { |
1906 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4414) , (0x04))); |
1907 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4418) , (0x10))); |
1908 | } else { |
1909 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4414) , (0x10))); |
1910 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4418) , (0x60))); |
1911 | } |
1912 | } else { |
1913 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4410) , (0x50))); |
1914 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4414) , (0x20))); |
1915 | CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4418) , (0x60))); |
1916 | } |
1917 | |
1918 | /* Configure DMA resource watermarks */ |
1919 | CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4434) , (5))); |
1920 | CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4438) , (10))); |
1921 | |
1922 | /* Enable buffer manager */ |
1923 | val = BGE_BMANMODE_ENABLE0x00000002 | BGE_BMANMODE_LOMBUF_ATTN0x00000010; |
1924 | /* |
1925 | * Change the arbitration algorithm of TXMBUF read request to |
1926 | * round-robin instead of priority based for BCM5719. When |
1927 | * TXFIFO is almost empty, RDMA will hold its request until |
1928 | * TXFIFO is not almost empty. |
1929 | */ |
1930 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719) |
1931 | val |= BGE_BMANMODE_NO_TX_UNDERRUN0x80000000; |
1932 | CSR_WRITE_4(sc, BGE_BMAN_MODE, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4400) , (val))); |
1933 | |
1934 | /* Poll for buffer manager start indication */ |
1935 | for (i = 0; i < 2000; i++) { |
1936 | if (CSR_READ_4(sc, BGE_BMAN_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4400)) ) & BGE_BMANMODE_ENABLE0x00000002) |
1937 | break; |
1938 | DELAY(10)(*delay_func)(10); |
1939 | } |
1940 | |
1941 | if (i == 2000) { |
1942 | printf("%s: buffer manager failed to start\n", |
1943 | sc->bge_dev.dv_xname); |
1944 | return (ENXIO6); |
1945 | } |
1946 | |
1947 | /* Enable flow-through queues */ |
1948 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x5C00) , (0xFFFFFFFF))); |
1949 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x5C00) , (0))); |
1950 | |
1951 | /* Wait until queue initialization is complete */ |
1952 | for (i = 0; i < 2000; i++) { |
1953 | if (CSR_READ_4(sc, BGE_FTQ_RESET)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x5C00)) ) == 0) |
1954 | break; |
1955 | DELAY(10)(*delay_func)(10); |
1956 | } |
1957 | |
1958 | if (i == 2000) { |
1959 | printf("%s: flow-through queue init failed\n", |
1960 | sc->bge_dev.dv_xname); |
1961 | return (ENXIO6); |
1962 | } |
1963 | |
1964 | /* |
1965 | * Summary of rings supported by the controller: |
1966 | * |
1967 | * Standard Receive Producer Ring |
1968 | * - This ring is used to feed receive buffers for "standard" |
1969 | * sized frames (typically 1536 bytes) to the controller. |
1970 | * |
1971 | * Jumbo Receive Producer Ring |
1972 | * - This ring is used to feed receive buffers for jumbo sized |
1973 | * frames (i.e. anything bigger than the "standard" frames) |
1974 | * to the controller. |
1975 | * |
1976 | * Mini Receive Producer Ring |
1977 | * - This ring is used to feed receive buffers for "mini" |
1978 | * sized frames to the controller. |
1979 | * - This feature required external memory for the controller |
1980 | * but was never used in a production system. Should always |
1981 | * be disabled. |
1982 | * |
1983 | * Receive Return Ring |
1984 | * - After the controller has placed an incoming frame into a |
1985 | * receive buffer that buffer is moved into a receive return |
1986 | * ring. The driver is then responsible to passing the |
1987 | * buffer up to the stack. Many versions of the controller |
1988 | * support multiple RR rings. |
1989 | * |
1990 | * Send Ring |
1991 | * - This ring is used for outgoing frames. Many versions of |
1992 | * the controller support multiple send rings. |
1993 | */ |
1994 | |
1995 | /* Initialize the standard RX ring control block */ |
1996 | rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; |
1997 | BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring))do { (rcb->bge_hostaddr).bge_addr_lo = ((u_int64_t) (((sc) ->bge_ring_map->dm_segs[0].ds_addr + __builtin_offsetof (struct bge_ring_data, bge_rx_std_ring))) & 0xffffffff); if (sizeof(bus_addr_t) == 8) (rcb->bge_hostaddr).bge_addr_hi = ((u_int64_t) (((sc)->bge_ring_map->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data, bge_rx_std_ring)) ) >> 32); else (rcb->bge_hostaddr).bge_addr_hi = 0; } while(0); |
1998 | if (BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000)) { |
1999 | /* |
2000 | * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) |
2001 | * Bits 15-2 : Maximum RX frame size |
2002 | * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled |
2003 | * Bit 0 : Reserved |
2004 | */ |
2005 | rcb->bge_maxlen_flags = |
2006 | BGE_RCB_MAXLEN_FLAGS(512, ETHER_MAX_DIX_LEN << 2)((512) << 16 | (1536 << 2)); |
2007 | } else if (BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000)) { |
2008 | /* |
2009 | * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) |
2010 | * Bits 15-2 : Reserved (should be 0) |
2011 | * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled |
2012 | * Bit 0 : Reserved |
2013 | */ |
2014 | rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0)((512) << 16 | (0)); |
2015 | } else { |
2016 | /* |
2017 | * Ring size is always XXX entries |
2018 | * Bits 31-16: Maximum RX frame size |
2019 | * Bits 15-2 : Reserved (should be 0) |
2020 | * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled |
2021 | * Bit 0 : Reserved |
2022 | */ |
2023 | rcb->bge_maxlen_flags = |
2024 | BGE_RCB_MAXLEN_FLAGS(ETHER_MAX_DIX_LEN, 0)((1536) << 16 | (0)); |
2025 | } |
2026 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57170x5717 || |
2027 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719 || |
2028 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720) |
2029 | rcb->bge_nicaddr = BGE_STD_RX_RINGS_57170x00040000; |
2030 | else |
2031 | rcb->bge_nicaddr = BGE_STD_RX_RINGS0x00006000; |
2032 | /* Write the standard receive producer ring control block. */ |
2033 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2450) , (rcb->bge_hostaddr.bge_addr_hi))); |
2034 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2454) , (rcb->bge_hostaddr.bge_addr_lo))); |
2035 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2458) , (rcb->bge_maxlen_flags))); |
2036 | CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x245C) , (rcb->bge_nicaddr))); |
2037 | |
2038 | /* Reset the standard receive producer ring producer index. */ |
2039 | bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO0x026C, 0); |
2040 | |
2041 | /* |
2042 | * Initialize the Jumbo RX ring control block |
2043 | * We set the 'ring disabled' bit in the flags |
2044 | * field until we're actually ready to start |
2045 | * using this ring (i.e. once we set the MTU |
2046 | * high enough to require it). |
2047 | */ |
2048 | if (sc->bge_flags & BGE_JUMBO_RING0x01000000) { |
2049 | rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; |
2050 | BGE_HOSTADDR(rcb->bge_hostaddr,do { (rcb->bge_hostaddr).bge_addr_lo = ((u_int64_t) (((sc) ->bge_ring_map->dm_segs[0].ds_addr + __builtin_offsetof (struct bge_ring_data, bge_rx_jumbo_ring))) & 0xffffffff) ; if (sizeof(bus_addr_t) == 8) (rcb->bge_hostaddr).bge_addr_hi = ((u_int64_t) (((sc)->bge_ring_map->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data, bge_rx_jumbo_ring ))) >> 32); else (rcb->bge_hostaddr).bge_addr_hi = 0 ; } while(0) |
2051 | BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring))do { (rcb->bge_hostaddr).bge_addr_lo = ((u_int64_t) (((sc) ->bge_ring_map->dm_segs[0].ds_addr + __builtin_offsetof (struct bge_ring_data, bge_rx_jumbo_ring))) & 0xffffffff) ; if (sizeof(bus_addr_t) == 8) (rcb->bge_hostaddr).bge_addr_hi = ((u_int64_t) (((sc)->bge_ring_map->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data, bge_rx_jumbo_ring ))) >> 32); else (rcb->bge_hostaddr).bge_addr_hi = 0 ; } while(0); |
2052 | rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,((0) << 16 | (0x0001 | 0x0002)) |
2053 | BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED)((0) << 16 | (0x0001 | 0x0002)); |
2054 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57170x5717 || |
2055 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719 || |
2056 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720) |
2057 | rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_57170x00044400; |
2058 | else |
2059 | rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS0x00007000; |
2060 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2440) , (rcb->bge_hostaddr.bge_addr_hi))) |
2061 | rcb->bge_hostaddr.bge_addr_hi)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2440) , (rcb->bge_hostaddr.bge_addr_hi))); |
2062 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2444) , (rcb->bge_hostaddr.bge_addr_lo))) |
2063 | rcb->bge_hostaddr.bge_addr_lo)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2444) , (rcb->bge_hostaddr.bge_addr_lo))); |
2064 | /* Program the jumbo receive producer ring RCB parameters. */ |
2065 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2448) , (rcb->bge_maxlen_flags))) |
2066 | rcb->bge_maxlen_flags)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2448) , (rcb->bge_maxlen_flags))); |
2067 | CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x244C) , (rcb->bge_nicaddr))); |
2068 | /* Reset the jumbo receive producer ring producer index. */ |
2069 | bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO0x0274, 0); |
2070 | } |
2071 | |
2072 | /* Disable the mini receive producer ring RCB. */ |
2073 | if (BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000)) { |
2074 | /* Set up dummy disabled mini ring RCB */ |
2075 | rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; |
2076 | rcb->bge_maxlen_flags = |
2077 | BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)((0) << 16 | (0x0002)); |
2078 | CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2468) , (rcb->bge_maxlen_flags))) |
2079 | rcb->bge_maxlen_flags)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2468) , (rcb->bge_maxlen_flags))); |
2080 | /* Reset the mini receive producer ring producer index. */ |
2081 | bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO0x027C, 0); |
2082 | |
2083 | /* XXX why? */ |
2084 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_info)), (sizeof (struct bge_gib)), (0x01|0x04)) |
2085 | offsetof(struct bge_ring_data, bge_info),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_info)), (sizeof (struct bge_gib)), (0x01|0x04)) |
2086 | sizeof (struct bge_gib),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_info)), (sizeof (struct bge_gib)), (0x01|0x04)) |
2087 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_info)), (sizeof (struct bge_gib)), (0x01|0x04)); |
2088 | } |
2089 | |
2090 | /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ |
2091 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) { |
2092 | if (sc->bge_chipid == BGE_CHIPID_BCM5906_A00xc000 || |
2093 | sc->bge_chipid == BGE_CHIPID_BCM5906_A10xc001 || |
2094 | sc->bge_chipid == BGE_CHIPID_BCM5906_A20xc002) |
2095 | CSR_WRITE_4(sc, BGE_ISO_PKT_TX,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0C20) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0C20 ))) & ~3) | 2))) |
2096 | (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0C20) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0C20 ))) & ~3) | 2))); |
2097 | } |
2098 | /* |
2099 | * The BD ring replenish thresholds control how often the |
2100 | * hardware fetches new BD's from the producer rings in host |
2101 | * memory. Setting the value too low on a busy system can |
2102 | * starve the hardware and recue the throughput. |
2103 | * |
2104 | * Set the BD ring replenish thresholds. The recommended |
2105 | * values are 1/8th the number of descriptors allocated to |
2106 | * each ring, but since we try to avoid filling the entire |
2107 | * ring we set these to the minimal value of 8. This needs to |
2108 | * be done on several of the supported chip revisions anyway, |
2109 | * to work around HW bugs. |
2110 | */ |
2111 | CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2C18) , (8))); |
2112 | if (sc->bge_flags & BGE_JUMBO_RING0x01000000) |
2113 | CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2C1C) , (8))); |
2114 | |
2115 | if (BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000)) { |
2116 | CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2D00) , (4))); |
2117 | CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2D04) , (4))); |
2118 | } |
2119 | |
2120 | /* |
2121 | * Disable all send rings by setting the 'ring disabled' bit |
2122 | * in the flags field of all the TX send ring control blocks, |
2123 | * located in NIC memory. |
2124 | */ |
2125 | if (BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000)) { |
2126 | /* 5700 to 5704 had 16 send rings. */ |
2127 | limit = BGE_TX_RINGS_EXTSSRAM_MAX16; |
2128 | } else if (BGE_IS_57765_PLUS(sc)((sc)->bge_flags & 0x00040000) || |
2129 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762) |
2130 | limit = 2; |
2131 | else if (BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000)) |
2132 | limit = 4; |
2133 | else |
2134 | limit = 1; |
2135 | rcb_addr = BGE_MEMWIN_START0x00008000 + BGE_SEND_RING_RCB0x00000100; |
2136 | for (i = 0; i < limit; i++) { |
2137 | RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_maxlen_flags)), ((( 0) << 16 | (0x0002))))) |
2138 | BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED))((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_maxlen_flags)), ((( 0) << 16 | (0x0002))))); |
2139 | RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_nicaddr)), (0))); |
2140 | rcb_addr += sizeof(struct bge_rcb); |
2141 | } |
2142 | |
2143 | /* Configure send ring RCB 0 (we use only the first ring) */ |
2144 | rcb_addr = BGE_MEMWIN_START0x00008000 + BGE_SEND_RING_RCB0x00000100; |
2145 | BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring))do { (taddr).bge_addr_lo = ((u_int64_t) (((sc)->bge_ring_map ->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data , bge_tx_ring))) & 0xffffffff); if (sizeof(bus_addr_t) == 8) (taddr).bge_addr_hi = ((u_int64_t) (((sc)->bge_ring_map ->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data , bge_tx_ring))) >> 32); else (taddr).bge_addr_hi = 0; } while(0); |
2146 | RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_hostaddr.bge_addr_hi )), (taddr.bge_addr_hi))); |
2147 | RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_hostaddr.bge_addr_lo )), (taddr.bge_addr_lo))); |
2148 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57170x5717 || |
2149 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719 || |
2150 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720) |
2151 | RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_nicaddr)), (0x00004000 ))); |
2152 | else |
2153 | RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_nicaddr)), (0x00004000 + ((0 * sizeof(struct bge_tx_bd) * 512) / 4)))) |
2154 | BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT))((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_nicaddr)), (0x00004000 + ((0 * sizeof(struct bge_tx_bd) * 512) / 4)))); |
2155 | RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_maxlen_flags)), ((( 512) << 16 | (0))))) |
2156 | BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0))((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_maxlen_flags)), ((( 512) << 16 | (0))))); |
2157 | |
2158 | /* |
2159 | * Disable all receive return rings by setting the |
2160 | * 'ring disabled' bit in the flags field of all the receive |
2161 | * return ring control blocks, located in NIC memory. |
2162 | */ |
2163 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57170x5717 || |
2164 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719 || |
2165 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720) { |
2166 | /* Should be 17, use 16 until we get an SRAM map. */ |
2167 | limit = 16; |
2168 | } else if (BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000)) |
2169 | limit = BGE_RX_RINGS_MAX16; |
2170 | else if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57550x0a || |
2171 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762 || |
2172 | BGE_IS_57765_PLUS(sc)((sc)->bge_flags & 0x00040000)) |
2173 | limit = 4; |
2174 | else |
2175 | limit = 1; |
2176 | /* Disable all receive return rings */ |
2177 | rcb_addr = BGE_MEMWIN_START0x00008000 + BGE_RX_RETURN_RING_RCB0x00000200; |
2178 | for (i = 0; i < limit; i++) { |
2179 | RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_hostaddr.bge_addr_hi )), (0))); |
2180 | RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_hostaddr.bge_addr_lo )), (0))); |
2181 | RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_maxlen_flags)), ((( sc->bge_return_ring_cnt) << 16 | (0x0002))))) |
2182 | BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_maxlen_flags)), ((( sc->bge_return_ring_cnt) << 16 | (0x0002))))) |
2183 | BGE_RCB_FLAG_RING_DISABLED))((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_maxlen_flags)), ((( sc->bge_return_ring_cnt) << 16 | (0x0002))))); |
2184 | RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_nicaddr)), (0))); |
2185 | bge_writembx(sc, BGE_MBX_RX_CONS0_LO0x0284 + |
2186 | (i * (sizeof(u_int64_t))), 0); |
2187 | rcb_addr += sizeof(struct bge_rcb); |
2188 | } |
2189 | |
2190 | /* |
2191 | * Set up receive return ring 0. Note that the NIC address |
2192 | * for RX return rings is 0x0. The return rings live entirely |
2193 | * within the host, so the nicaddr field in the RCB isn't used. |
2194 | */ |
2195 | rcb_addr = BGE_MEMWIN_START0x00008000 + BGE_RX_RETURN_RING_RCB0x00000200; |
2196 | BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring))do { (taddr).bge_addr_lo = ((u_int64_t) (((sc)->bge_ring_map ->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data , bge_rx_return_ring))) & 0xffffffff); if (sizeof(bus_addr_t ) == 8) (taddr).bge_addr_hi = ((u_int64_t) (((sc)->bge_ring_map ->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data , bge_rx_return_ring))) >> 32); else (taddr).bge_addr_hi = 0; } while(0); |
2197 | RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_hostaddr.bge_addr_hi )), (taddr.bge_addr_hi))); |
2198 | RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_hostaddr.bge_addr_lo )), (taddr.bge_addr_lo))); |
2199 | RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000)((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_nicaddr)), (0x00000000 ))); |
2200 | RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_maxlen_flags)), ((( sc->bge_return_ring_cnt) << 16 | (0))))) |
2201 | BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0))((sc->bge_btag)->write_4((sc->bge_bhandle), (rcb_addr + __builtin_offsetof(struct bge_rcb, bge_maxlen_flags)), ((( sc->bge_return_ring_cnt) << 16 | (0))))); |
2202 | |
2203 | /* Set random backoff seed for TX */ |
2204 | CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0438) , ((sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + sc ->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) & 0x3FF ))) |
2205 | (sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0438) , ((sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + sc ->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) & 0x3FF ))) |
2206 | sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0438) , ((sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + sc ->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) & 0x3FF ))) |
2207 | sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) &((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0438) , ((sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + sc ->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) & 0x3FF ))) |
2208 | BGE_TX_BACKOFF_SEED_MASK)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0438) , ((sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + sc ->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) & 0x3FF ))); |
2209 | |
2210 | /* Set inter-packet gap */ |
2211 | val = 0x2620; |
2212 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720 || |
2213 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762) |
2214 | val |= CSR_READ_4(sc, BGE_TX_LENGTHS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0464)) ) & |
2215 | (BGE_TXLEN_JMB_FRM_LEN_MSK0x00FF0000 | BGE_TXLEN_CNT_DN_VAL_MSK0xFF000000); |
2216 | CSR_WRITE_4(sc, BGE_TX_LENGTHS, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0464) , (val))); |
2217 | |
2218 | /* |
2219 | * Specify which ring to use for packets that don't match |
2220 | * any RX rules. |
2221 | */ |
2222 | CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0500) , (0x08))); |
2223 | |
2224 | /* |
2225 | * Configure number of RX lists. One interrupt distribution |
2226 | * list, sixteen active lists, one bad frames class. |
2227 | */ |
2228 | CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2010) , (0x181))); |
2229 | |
2230 | /* Initialize RX list placement stats mask. */ |
2231 | CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007BFFFF)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2018) , (0x007BFFFF))); |
2232 | CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2014) , (0x1))); |
2233 | |
2234 | /* Disable host coalescing until we get it set up */ |
2235 | CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C00) , (0x00000000))); |
2236 | |
2237 | /* Poll to make sure it's shut down. */ |
2238 | for (i = 0; i < 2000; i++) { |
2239 | if (!(CSR_READ_4(sc, BGE_HCC_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x3C00)) ) & BGE_HCCMODE_ENABLE0x00000002)) |
2240 | break; |
2241 | DELAY(10)(*delay_func)(10); |
2242 | } |
2243 | |
2244 | if (i == 2000) { |
2245 | printf("%s: host coalescing engine failed to idle\n", |
2246 | sc->bge_dev.dv_xname); |
2247 | return (ENXIO6); |
2248 | } |
2249 | |
2250 | /* Set up host coalescing defaults */ |
2251 | CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C08) , (sc->bge_rx_coal_ticks))); |
2252 | CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C0C) , (sc->bge_tx_coal_ticks))); |
2253 | CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C10) , (sc->bge_rx_max_coal_bds))); |
2254 | CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C14) , (sc->bge_tx_max_coal_bds))); |
2255 | if (!(BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000))) { |
2256 | CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C18) , (0))); |
2257 | CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C1C) , (0))); |
2258 | } |
2259 | CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C20) , (0))); |
2260 | CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C24) , (0))); |
2261 | |
2262 | /* Set up address of statistics block */ |
2263 | if (!(BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000))) { |
2264 | BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats))do { (taddr).bge_addr_lo = ((u_int64_t) (((sc)->bge_ring_map ->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data , bge_info.bge_stats))) & 0xffffffff); if (sizeof(bus_addr_t ) == 8) (taddr).bge_addr_hi = ((u_int64_t) (((sc)->bge_ring_map ->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data , bge_info.bge_stats))) >> 32); else (taddr).bge_addr_hi = 0; } while(0); |
2265 | CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C30) , (taddr.bge_addr_hi))); |
2266 | CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C34) , (taddr.bge_addr_lo))); |
2267 | |
2268 | CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C40) , (0x00000300))); |
2269 | CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C44) , (0x00000B00))); |
2270 | CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C28) , (sc->bge_stat_ticks))); |
2271 | } |
2272 | |
2273 | /* Set up address of status block */ |
2274 | BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block))do { (taddr).bge_addr_lo = ((u_int64_t) (((sc)->bge_ring_map ->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data , bge_status_block))) & 0xffffffff); if (sizeof(bus_addr_t ) == 8) (taddr).bge_addr_hi = ((u_int64_t) (((sc)->bge_ring_map ->dm_segs[0].ds_addr + __builtin_offsetof(struct bge_ring_data , bge_status_block))) >> 32); else (taddr).bge_addr_hi = 0; } while(0); |
2275 | CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C38) , (taddr.bge_addr_hi))); |
2276 | CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C3C) , (taddr.bge_addr_lo))); |
2277 | |
2278 | sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; |
2279 | sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; |
2280 | |
2281 | /* Set up status block size. */ |
2282 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57000x07 && |
2283 | sc->bge_chipid != BGE_CHIPID_BCM5700_C00x7200) { |
2284 | val = BGE_STATBLKSZ_FULL0x00000000; |
2285 | bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ)__builtin_bzero((&sc->bge_rdata->bge_status_block), (sizeof (struct bge_status_block))); |
2286 | } else { |
2287 | val = BGE_STATBLKSZ_32BYTE0x00000100; |
2288 | bzero(&sc->bge_rdata->bge_status_block, 32)__builtin_bzero((&sc->bge_rdata->bge_status_block), (32)); |
2289 | } |
2290 | |
2291 | /* Turn on host coalescing state machine */ |
2292 | CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C00) , (val | 0x00000002))); |
2293 | |
2294 | /* Turn on RX BD completion state machine and enable attentions */ |
2295 | CSR_WRITE_4(sc, BGE_RBDC_MODE,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3000) , (0x00000002|0x00000004))) |
2296 | BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3000) , (0x00000002|0x00000004))); |
2297 | |
2298 | /* Turn on RX list placement state machine */ |
2299 | CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2000) , (0x00000002))); |
2300 | |
2301 | /* Turn on RX list selector state machine. */ |
2302 | if (!(BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000))) |
2303 | CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3400) , (0x00000002))); |
2304 | |
2305 | val = BGE_MACMODE_TXDMA_ENB0x00200000 | BGE_MACMODE_RXDMA_ENB0x00400000 | |
2306 | BGE_MACMODE_RX_STATS_CLEAR0x00001000 | BGE_MACMODE_TX_STATS_CLEAR0x00008000 | |
2307 | BGE_MACMODE_RX_STATS_ENB0x00000800 | BGE_MACMODE_TX_STATS_ENB0x00004000 | |
2308 | BGE_MACMODE_FRMHDR_DMA_ENB0x00800000; |
2309 | |
2310 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200) |
2311 | val |= BGE_PORTMODE_TBI0x0000000C; |
2312 | else if (sc->bge_flags & BGE_FIBER_MII0x00000400) |
2313 | val |= BGE_PORTMODE_GMII0x00000008; |
2314 | else |
2315 | val |= BGE_PORTMODE_MII0x00000004; |
2316 | |
2317 | /* Allow APE to send/receive frames. */ |
2318 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE0x00000002) != 0) |
2319 | val |= BGE_MACMODE_APE_RX_EN0x08000000 | BGE_MACMODE_APE_TX_EN0x10000000; |
2320 | |
2321 | /* Turn on DMA, clear stats */ |
2322 | CSR_WRITE_4(sc, BGE_MAC_MODE, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0400) , (val))); |
2323 | DELAY(40)(*delay_func)(40); |
2324 | |
2325 | /* Set misc. local control, enable interrupts on attentions */ |
2326 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6808) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6808 ))) | (0x00000008))))); |
2327 | |
2328 | #ifdef notdef |
2329 | /* Assert GPIO pins for PHY reset */ |
2330 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6808) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6808 ))) | (0x00004000| 0x00008000|0x00010000))))) |
2331 | BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6808) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6808 ))) | (0x00004000| 0x00008000|0x00010000))))); |
2332 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6808) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6808 ))) | (0x00000800| 0x00001000|0x00002000))))) |
2333 | BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6808) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6808 ))) | (0x00000800| 0x00001000|0x00002000))))); |
2334 | #endif |
2335 | |
2336 | /* Turn on DMA completion state machine */ |
2337 | if (!(BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000))) |
2338 | CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6400) , (0x00000002))); |
2339 | |
2340 | val = BGE_WDMAMODE_ENABLE0x00000002|BGE_WDMAMODE_ALL_ATTNS0x000003FC; |
2341 | |
2342 | /* Enable host coalescing bug fix. */ |
2343 | if (BGE_IS_5755_PLUS(sc)((sc)->bge_flags & 0x00004000)) |
2344 | val |= BGE_WDMAMODE_STATUS_TAG_FIX0x20000000; |
2345 | |
2346 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57850x5785) |
2347 | val |= BGE_WDMAMODE_BURST_ALL_DATA0xC0000000; |
2348 | |
2349 | /* Turn on write DMA state machine */ |
2350 | CSR_WRITE_4(sc, BGE_WDMA_MODE, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4C00) , (val))); |
2351 | DELAY(40)(*delay_func)(40); |
2352 | |
2353 | val = BGE_RDMAMODE_ENABLE0x00000002|BGE_RDMAMODE_ALL_ATTNS0x000003FC; |
2354 | |
2355 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57170x5717) |
2356 | val |= BGE_RDMAMODE_MULT_DMA_RD_DIS0x01000000; |
2357 | |
2358 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57840x5784 || |
2359 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57850x5785 || |
2360 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM577800x57780) |
2361 | val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN0x00000800 | |
2362 | BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN0x00001000 | |
2363 | BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN0x00002000; |
2364 | |
2365 | if (sc->bge_flags & BGE_PCIE0x00000020) |
2366 | val |= BGE_RDMAMODE_FIFO_LONG_BURST0x00030000; |
2367 | |
2368 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720 || |
2369 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762) { |
2370 | val |= CSR_READ_4(sc, BGE_RDMA_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4800)) ) & |
2371 | BGE_RDMAMODE_H2BNC_VLAN_DET0x20000000; |
2372 | /* |
2373 | * Allow multiple outstanding read requests from |
2374 | * non-LSO read DMA engine. |
2375 | */ |
2376 | val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS0x01000000; |
2377 | } |
2378 | |
2379 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57610x5761 || |
2380 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57840x5784 || |
2381 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57850x5785 || |
2382 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM577800x57780 || |
2383 | BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000) || BGE_IS_57765_PLUS(sc)((sc)->bge_flags & 0x00040000)) { |
2384 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762) |
2385 | rdmareg = BGE_RDMA_RSRVCTRL_REG20x4890; |
2386 | else |
2387 | rdmareg = BGE_RDMA_RSRVCTRL0x4900; |
2388 | dmactl = CSR_READ_4(sc, rdmareg)((sc->bge_btag)->read_4((sc->bge_bhandle), (rdmareg) )); |
2389 | /* |
2390 | * Adjust tx margin to prevent TX data corruption and |
2391 | * fix internal FIFO overflow. |
2392 | */ |
2393 | if (sc->bge_chipid == BGE_CHIPID_BCM5719_A00x05719000 || |
2394 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762) { |
2395 | dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK0x00000FF0 | |
2396 | BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK0x000FF000 | |
2397 | BGE_RDMA_RSRVCTRL_TXMRGN_MASK0xFFE00000); |
2398 | dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K0x00000C00 | |
2399 | BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K0x000C0000 | |
2400 | BGE_RDMA_RSRVCTRL_TXMRGN_320B0x28000000; |
2401 | } |
2402 | /* |
2403 | * Enable fix for read DMA FIFO overruns. |
2404 | * The fix is to limit the number of RX BDs |
2405 | * the hardware would fetch at a time. |
2406 | */ |
2407 | CSR_WRITE_4(sc, rdmareg, dmactl |((sc->bge_btag)->write_4((sc->bge_bhandle), (rdmareg ), (dmactl | 0x00000004))) |
2408 | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX)((sc->bge_btag)->write_4((sc->bge_bhandle), (rdmareg ), (dmactl | 0x00000004))); |
2409 | } |
2410 | |
2411 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719) { |
2412 | CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910 ))) | 0x00030000 | 0x000C0000))) |
2413 | CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910 ))) | 0x00030000 | 0x000C0000))) |
2414 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910 ))) | 0x00030000 | 0x000C0000))) |
2415 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910 ))) | 0x00030000 | 0x000C0000))); |
2416 | } else if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720) { |
2417 | /* |
2418 | * Allow 4KB burst length reads for non-LSO frames. |
2419 | * Enable 512B burst length reads for buffer descriptors. |
2420 | */ |
2421 | CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910 ))) | 0x00020000 | 0x000C0000))) |
2422 | CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910 ))) | 0x00020000 | 0x000C0000))) |
2423 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910 ))) | 0x00020000 | 0x000C0000))) |
2424 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910 ))) | 0x00020000 | 0x000C0000))); |
2425 | } else if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762) { |
2426 | CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x48A0) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x48A0 ))) | 0x00030000 | 0x000C0000))) |
2427 | CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) |((sc->bge_btag)->write_4((sc->bge_bhandle), (0x48A0) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x48A0 ))) | 0x00030000 | 0x000C0000))) |
2428 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |((sc->bge_btag)->write_4((sc->bge_bhandle), (0x48A0) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x48A0 ))) | 0x00030000 | 0x000C0000))) |
2429 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x48A0) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x48A0 ))) | 0x00030000 | 0x000C0000))); |
2430 | } |
2431 | |
2432 | CSR_WRITE_4(sc, BGE_RDMA_MODE, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4800) , (val))); |
2433 | DELAY(40)(*delay_func)(40); |
2434 | |
2435 | if (sc->bge_flags & BGE_RDMA_BUG0x00800000) { |
2436 | for (i = 0; i < BGE_NUM_RDMA_CHANNELS4 / 2; i++) { |
2437 | val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4BE0 + i * 4))); |
2438 | if ((val & 0xFFFF) > ETHER_MAX_LEN1518) |
2439 | break; |
2440 | if (((val >> 16) & 0xFFFF) > ETHER_MAX_LEN1518) |
2441 | break; |
2442 | } |
2443 | if (i != BGE_NUM_RDMA_CHANNELS4 / 2) { |
2444 | val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910)) ); |
2445 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719) |
2446 | val |= BGE_RDMA_TX_LENGTH_WA_57190x02000000; |
2447 | else |
2448 | val |= BGE_RDMA_TX_LENGTH_WA_57200x00200000; |
2449 | CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (val))); |
2450 | } |
2451 | } |
2452 | |
2453 | /* Turn on RX data completion state machine */ |
2454 | CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2800) , (0x00000002))); |
2455 | |
2456 | /* Turn on RX BD initiator state machine */ |
2457 | CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2C00) , (0x00000002))); |
2458 | |
2459 | /* Turn on RX data and RX BD initiator state machine */ |
2460 | CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x2400) , (0x00000002))); |
2461 | |
2462 | /* Turn on Mbuf cluster free state machine */ |
2463 | if (!BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000)) |
2464 | CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3800) , (0x00000002))); |
2465 | |
2466 | /* Turn on send BD completion state machine */ |
2467 | CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x1C00) , (0x00000002))); |
2468 | |
2469 | /* Turn on send data completion state machine */ |
2470 | val = BGE_SDCMODE_ENABLE0x00000002; |
2471 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57610x5761) |
2472 | val |= BGE_SDCMODE_CDELAY0x00000010; |
2473 | CSR_WRITE_4(sc, BGE_SDC_MODE, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x1000) , (val))); |
2474 | |
2475 | /* Turn on send data initiator state machine */ |
2476 | CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0C00) , (0x00000002))); |
2477 | |
2478 | /* Turn on send BD initiator state machine */ |
2479 | CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x1800) , (0x00000002))); |
2480 | |
2481 | /* Turn on send BD selector state machine */ |
2482 | CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x1400) , (0x00000002))); |
2483 | |
2484 | CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007BFFFF)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0C0C) , (0x007BFFFF))); |
2485 | CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0C08) , (0x00000001|0x00000002))) |
2486 | BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0C08) , (0x00000001|0x00000002))); |
2487 | |
2488 | /* ack/clear link change events */ |
2489 | CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0x00000010 | 0x00000008 | 0x00400000 | 0x00001000))) |
2490 | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0x00000010 | 0x00000008 | 0x00400000 | 0x00001000))) |
2491 | BGE_MACSTAT_LINK_CHANGED)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0x00000010 | 0x00000008 | 0x00400000 | 0x00001000))); |
2492 | |
2493 | /* Enable PHY auto polling (for MII/GMII only) */ |
2494 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200) { |
2495 | CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0450) , (0x00000001))); |
2496 | } else { |
2497 | if ((sc->bge_flags & BGE_CPMU_PRESENT0x00100000) != 0) |
2498 | mimode = BGE_MIMODE_500KHZ_CONST0x00008000; |
2499 | else |
2500 | mimode = BGE_MIMODE_BASE0x000C0000; |
2501 | if (BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000) || |
2502 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57050x03) { |
2503 | mimode |= BGE_MIMODE_AUTOPOLL0x00000010; |
2504 | BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL)((sc)->bge_sts |= (0x00000004)); |
2505 | } |
2506 | mimode |= BGE_MIMODE_PHYADDR(sc->bge_phy_addr)((sc->bge_phy_addr & 0x1F) << 5); |
2507 | CSR_WRITE_4(sc, BGE_MI_MODE, mimode)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0454) , (mimode))); |
2508 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57000x07) |
2509 | CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0408) , (0x00800000))) |
2510 | BGE_EVTENB_MI_INTERRUPT)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0408) , (0x00800000))); |
2511 | } |
2512 | |
2513 | /* |
2514 | * Clear any pending link state attention. |
2515 | * Otherwise some link state change events may be lost until attention |
2516 | * is cleared by bge_intr() -> bge_link_upd() sequence. |
2517 | * It's not necessary on newer BCM chips - perhaps enabling link |
2518 | * state change attentions implies clearing pending attention. |
2519 | */ |
2520 | CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0x00000010| 0x00000008|0x00400000| 0x00001000))) |
2521 | BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0x00000010| 0x00000008|0x00400000| 0x00001000))) |
2522 | BGE_MACSTAT_LINK_CHANGED)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0x00000010| 0x00000008|0x00400000| 0x00001000))); |
2523 | |
2524 | /* Enable link state change attentions. */ |
2525 | BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0408) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0408 ))) | (0x00001000))))); |
2526 | |
2527 | return (0); |
2528 | } |
2529 | |
2530 | const struct bge_revision * |
2531 | bge_lookup_rev(u_int32_t chipid) |
2532 | { |
2533 | const struct bge_revision *br; |
2534 | |
2535 | for (br = bge_revisions; br->br_name != NULL((void *)0); br++) { |
2536 | if (br->br_chipid == chipid) |
2537 | return (br); |
2538 | } |
2539 | |
2540 | for (br = bge_majorrevs; br->br_name != NULL((void *)0); br++) { |
2541 | if (br->br_chipid == BGE_ASICREV(chipid)((chipid) >> 12)) |
2542 | return (br); |
2543 | } |
2544 | |
2545 | return (NULL((void *)0)); |
2546 | } |
2547 | |
2548 | int |
2549 | bge_can_use_msi(struct bge_softc *sc) |
2550 | { |
2551 | int can_use_msi = 0; |
2552 | |
2553 | switch (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12)) { |
2554 | case BGE_ASICREV_BCM5714_A00x05: |
2555 | case BGE_ASICREV_BCM57140x09: |
2556 | /* |
2557 | * Apparently, MSI doesn't work when these chips are |
2558 | * configured in single-port mode. |
2559 | */ |
2560 | break; |
2561 | case BGE_ASICREV_BCM57500x04: |
2562 | if (BGE_CHIPREV(sc->bge_chipid)((sc->bge_chipid) >> 8) != BGE_CHIPREV_5750_AX0x40 && |
2563 | BGE_CHIPREV(sc->bge_chipid)((sc->bge_chipid) >> 8) != BGE_CHIPREV_5750_BX0x41) |
2564 | can_use_msi = 1; |
2565 | break; |
2566 | default: |
2567 | if (BGE_IS_575X_PLUS(sc)((sc)->bge_flags & 0x00002000)) |
2568 | can_use_msi = 1; |
2569 | } |
2570 | |
2571 | return (can_use_msi); |
2572 | } |
2573 | |
2574 | /* |
2575 | * Probe for a Broadcom chip. Check the PCI vendor and device IDs |
2576 | * against our list and return its name if we find a match. Note |
2577 | * that since the Broadcom controller contains VPD support, we |
2578 | * can get the device name string from the controller itself instead |
2579 | * of the compiled-in string. This is a little slow, but it guarantees |
2580 | * we'll always announce the right product name. |
2581 | */ |
2582 | int |
2583 | bge_probe(struct device *parent, void *match, void *aux) |
2584 | { |
2585 | return (pci_matchbyid(aux, bge_devices, nitems(bge_devices)(sizeof((bge_devices)) / sizeof((bge_devices)[0])))); |
2586 | } |
2587 | |
2588 | void |
2589 | bge_attach(struct device *parent, struct device *self, void *aux) |
2590 | { |
2591 | struct bge_softc *sc = (struct bge_softc *)self; |
2592 | struct pci_attach_args *pa = aux; |
2593 | pci_chipset_tag_t pc = pa->pa_pc; |
2594 | const struct bge_revision *br; |
2595 | pcireg_t pm_ctl, memtype, subid, reg; |
2596 | pci_intr_handle_t ih; |
2597 | const char *intrstr = NULL((void *)0); |
2598 | int gotenaddr = 0; |
2599 | u_int32_t hwcfg = 0; |
2600 | u_int32_t mac_addr = 0; |
2601 | u_int32_t misccfg; |
2602 | struct ifnet *ifp; |
2603 | caddr_t kva; |
2604 | #ifdef __sparc64__ |
2605 | char name[32]; |
2606 | #endif |
2607 | |
2608 | sc->bge_pa = *pa; |
2609 | |
2610 | subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG0x2c); |
2611 | |
2612 | /* |
2613 | * Map control/status registers. |
2614 | */ |
2615 | DPRINTFN(5, ("Map control/status regs\n")); |
2616 | |
2617 | DPRINTFN(5, ("pci_mapreg_map\n")); |
2618 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR00x10); |
2619 | if (pci_mapreg_map(pa, BGE_PCI_BAR00x10, memtype, 0, &sc->bge_btag, |
2620 | &sc->bge_bhandle, NULL((void *)0), &sc->bge_bsize, 0)) { |
2621 | printf(": can't find mem space\n"); |
2622 | return; |
2623 | } |
2624 | |
2625 | /* |
2626 | * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) |
2627 | * can clobber the chip's PCI config-space power control registers, |
2628 | * leaving the card in D3 powersave state. |
2629 | * We do not have memory-mapped registers in this state, |
2630 | * so force device into D0 state before starting initialization. |
2631 | */ |
2632 | pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD0x4C); |
2633 | pm_ctl &= ~(PCI_PWR_D00|PCI_PWR_D11|PCI_PWR_D22|PCI_PWR_D33); |
2634 | pm_ctl |= (1 << 8) | PCI_PWR_D00 ; /* D0 state */ |
2635 | pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD0x4C, pm_ctl); |
2636 | DELAY(1000)(*delay_func)(1000); /* 27 usec is allegedly sufficient */ |
2637 | |
2638 | /* |
2639 | * Save ASIC rev. |
2640 | */ |
2641 | sc->bge_chipid = |
2642 | (pci_conf_read(pc, pa->pa_tag, BGE_PCI_MISC_CTL0x68) |
2643 | >> BGE_PCIMISCCTL_ASICREV_SHIFT16); |
2644 | |
2645 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_USE_PRODID_REG0x0f) { |
2646 | switch (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff)) { |
2647 | case PCI_PRODUCT_BROADCOM_BCM57170x1655: |
2648 | case PCI_PRODUCT_BROADCOM_BCM57180x1656: |
2649 | case PCI_PRODUCT_BROADCOM_BCM57190x1657: |
2650 | case PCI_PRODUCT_BROADCOM_BCM57200x165f: |
2651 | case PCI_PRODUCT_BROADCOM_BCM57250x1643: |
2652 | case PCI_PRODUCT_BROADCOM_BCM57270x16f3: |
2653 | case PCI_PRODUCT_BROADCOM_BCM57620x1687: |
2654 | case PCI_PRODUCT_BROADCOM_BCM577640x1642: |
2655 | case PCI_PRODUCT_BROADCOM_BCM577670x1683: |
2656 | case PCI_PRODUCT_BROADCOM_BCM577870x1641: |
2657 | sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, |
2658 | BGE_PCI_GEN2_PRODID_ASICREV0xF4); |
2659 | break; |
2660 | case PCI_PRODUCT_BROADCOM_BCM577610x16b0: |
2661 | case PCI_PRODUCT_BROADCOM_BCM577620x1682: |
2662 | case PCI_PRODUCT_BROADCOM_BCM577650x16b4: |
2663 | case PCI_PRODUCT_BROADCOM_BCM577660x1686: |
2664 | case PCI_PRODUCT_BROADCOM_BCM577810x16b1: |
2665 | case PCI_PRODUCT_BROADCOM_BCM577820x16b7: |
2666 | case PCI_PRODUCT_BROADCOM_BCM577850x16b5: |
2667 | case PCI_PRODUCT_BROADCOM_BCM577860x16b3: |
2668 | case PCI_PRODUCT_BROADCOM_BCM577910x16b2: |
2669 | case PCI_PRODUCT_BROADCOM_BCM577950x16b6: |
2670 | sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, |
2671 | BGE_PCI_GEN15_PRODID_ASICREV0xFC); |
2672 | break; |
2673 | default: |
2674 | sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, |
2675 | BGE_PCI_PRODID_ASICREV0xBC); |
2676 | break; |
2677 | } |
2678 | } |
2679 | |
2680 | sc->bge_phy_addr = bge_phy_addr(sc); |
2681 | |
2682 | printf(", "); |
2683 | br = bge_lookup_rev(sc->bge_chipid); |
2684 | if (br == NULL((void *)0)) |
2685 | printf("unknown ASIC (0x%x)", sc->bge_chipid); |
2686 | else |
2687 | printf("%s (0x%x)", br->br_name, sc->bge_chipid); |
2688 | |
2689 | /* |
2690 | * PCI Express or PCI-X controller check. |
2691 | */ |
2692 | if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS0x10, |
2693 | &sc->bge_expcap, NULL((void *)0)) != 0) { |
2694 | /* Extract supported maximum payload size. */ |
2695 | reg = pci_conf_read(pa->pa_pc, pa->pa_tag, sc->bge_expcap + |
2696 | PCI_PCIE_DCAP0x04); |
2697 | sc->bge_mps = 128 << (reg & 0x7); |
2698 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719 || |
2699 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720) |
2700 | sc->bge_expmrq = (fls(2048) - 8) << 12; |
2701 | else |
2702 | sc->bge_expmrq = (fls(4096) - 8) << 12; |
2703 | /* Disable PCIe Active State Power Management (ASPM). */ |
2704 | reg = pci_conf_read(pa->pa_pc, pa->pa_tag, |
2705 | sc->bge_expcap + PCI_PCIE_LCSR0x10); |
2706 | reg &= ~(PCI_PCIE_LCSR_ASPM_L0S0x00000001 | PCI_PCIE_LCSR_ASPM_L10x00000002); |
2707 | pci_conf_write(pa->pa_pc, pa->pa_tag, |
2708 | sc->bge_expcap + PCI_PCIE_LCSR0x10, reg); |
2709 | sc->bge_flags |= BGE_PCIE0x00000020; |
2710 | } else { |
2711 | if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE0x70) & |
2712 | BGE_PCISTATE_PCI_BUSMODE0x00000004) == 0) |
2713 | sc->bge_flags |= BGE_PCIX0x00000010; |
2714 | } |
2715 | |
2716 | /* |
2717 | * SEEPROM check. |
2718 | */ |
2719 | #ifdef __sparc64__ |
2720 | /* |
2721 | * Onboard interfaces on UltraSPARC systems generally don't |
2722 | * have a SEEPROM fitted. These interfaces, and cards that |
2723 | * have FCode, are named "network" by the PROM, whereas cards |
2724 | * without FCode show up as "ethernet". Since we don't really |
2725 | * need the information from the SEEPROM on cards that have |
2726 | * FCode it's fine to pretend they don't have one. |
2727 | */ |
2728 | if (OF_getprop(PCITAG_NODE(pa->pa_tag), "name", name, |
2729 | sizeof(name)) > 0 && strcmp(name, "network") == 0) |
2730 | sc->bge_flags |= BGE_NO_EEPROM0x00000080; |
2731 | #endif |
2732 | |
2733 | /* Save chipset family. */ |
2734 | switch (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12)) { |
2735 | case BGE_ASICREV_BCM57620x5762: |
2736 | case BGE_ASICREV_BCM577650x57785: |
2737 | case BGE_ASICREV_BCM577660x57766: |
2738 | sc->bge_flags |= BGE_57765_PLUS0x00040000; |
2739 | /* FALLTHROUGH */ |
2740 | case BGE_ASICREV_BCM57170x5717: |
2741 | case BGE_ASICREV_BCM57190x5719: |
2742 | case BGE_ASICREV_BCM57200x5720: |
2743 | sc->bge_flags |= BGE_5717_PLUS0x00020000 | BGE_5755_PLUS0x00004000 | BGE_575X_PLUS0x00002000 | |
2744 | BGE_5705_PLUS0x00001000 | BGE_JUMBO_CAPABLE0x00000100 | BGE_JUMBO_RING0x01000000 | |
2745 | BGE_JUMBO_FRAME0x04000000; |
2746 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719 || |
2747 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720) { |
2748 | /* |
2749 | * Enable work around for DMA engine miscalculation |
2750 | * of TXMBUF available space. |
2751 | */ |
2752 | sc->bge_flags |= BGE_RDMA_BUG0x00800000; |
2753 | |
2754 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719 && |
2755 | sc->bge_chipid == BGE_CHIPID_BCM5719_A00x05719000) { |
2756 | /* Jumbo frame on BCM5719 A0 does not work. */ |
2757 | sc->bge_flags &= ~(BGE_JUMBO_CAPABLE0x00000100 | |
2758 | BGE_JUMBO_RING0x01000000 | BGE_JUMBO_FRAME0x04000000); |
2759 | } |
2760 | } |
2761 | break; |
2762 | case BGE_ASICREV_BCM57550x0a: |
2763 | case BGE_ASICREV_BCM57610x5761: |
2764 | case BGE_ASICREV_BCM57840x5784: |
2765 | case BGE_ASICREV_BCM57850x5785: |
2766 | case BGE_ASICREV_BCM57870x0b: |
2767 | case BGE_ASICREV_BCM577800x57780: |
2768 | sc->bge_flags |= BGE_5755_PLUS0x00004000 | BGE_575X_PLUS0x00002000 | BGE_5705_PLUS0x00001000; |
2769 | break; |
2770 | case BGE_ASICREV_BCM57000x07: |
2771 | case BGE_ASICREV_BCM57010x00: |
2772 | case BGE_ASICREV_BCM57030x01: |
2773 | case BGE_ASICREV_BCM57040x02: |
2774 | sc->bge_flags |= BGE_5700_FAMILY0x00010000 | BGE_JUMBO_CAPABLE0x00000100 | BGE_JUMBO_RING0x01000000; |
2775 | break; |
2776 | case BGE_ASICREV_BCM5714_A00x05: |
2777 | case BGE_ASICREV_BCM57800x08: |
2778 | case BGE_ASICREV_BCM57140x09: |
2779 | sc->bge_flags |= BGE_5714_FAMILY0x00008000 | BGE_JUMBO_CAPABLE0x00000100 | BGE_JUMBO_STD0x02000000; |
2780 | /* FALLTHROUGH */ |
2781 | case BGE_ASICREV_BCM57500x04: |
2782 | case BGE_ASICREV_BCM57520x06: |
2783 | case BGE_ASICREV_BCM59060x0c: |
2784 | sc->bge_flags |= BGE_575X_PLUS0x00002000; |
2785 | /* FALLTHROUGH */ |
2786 | case BGE_ASICREV_BCM57050x03: |
2787 | sc->bge_flags |= BGE_5705_PLUS0x00001000; |
2788 | break; |
2789 | } |
2790 | |
2791 | if (sc->bge_flags & BGE_JUMBO_STD0x02000000) |
2792 | sc->bge_rx_std_len = BGE_JLEN((9022 + 2) + (sizeof(u_int64_t) - ((9022 + 2) % sizeof(u_int64_t )))); |
2793 | else |
2794 | sc->bge_rx_std_len = MCLBYTES(1 << 11); |
2795 | |
2796 | /* |
2797 | * When using the BCM5701 in PCI-X mode, data corruption has |
2798 | * been observed in the first few bytes of some received packets. |
2799 | * Aligning the packet buffer in memory eliminates the corruption. |
2800 | * Unfortunately, this misaligns the packet payloads. On platforms |
2801 | * which do not support unaligned accesses, we will realign the |
2802 | * payloads by copying the received packets. |
2803 | */ |
2804 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57010x00 && |
2805 | sc->bge_flags & BGE_PCIX0x00000010) |
2806 | sc->bge_flags |= BGE_RX_ALIGNBUG0x00000008; |
2807 | |
2808 | if ((BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57000x07 || |
2809 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57010x00) && |
2810 | PCI_VENDOR(subid)(((subid) >> 0) & 0xffff) == DELL_VENDORID0x1028) |
2811 | sc->bge_phy_flags |= BGE_PHY_NO_3LED0x00000001; |
2812 | |
2813 | misccfg = CSR_READ_4(sc, BGE_MISC_CFG)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6804)) ); |
2814 | misccfg &= BGE_MISCCFG_BOARD_ID_MASK0x0001E000; |
2815 | |
2816 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57050x03 && |
2817 | (misccfg == BGE_MISCCFG_BOARD_ID_57880x00010000 || |
2818 | misccfg == BGE_MISCCFG_BOARD_ID_5788M0x00018000)) |
2819 | sc->bge_flags |= BGE_IS_57880x00000800; |
2820 | |
2821 | if ((BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57030x01 && |
2822 | (misccfg == 0x4000 || misccfg == 0x8000)) || |
2823 | (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57050x03 && |
2824 | PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff) == PCI_VENDOR_BROADCOM0x14e4 && |
2825 | (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM59010x170d || |
2826 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM5901A20x170e || |
2827 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM5705F0x166e)) || |
2828 | (PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff) == PCI_VENDOR_BROADCOM0x14e4 && |
2829 | (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM5751F0x167e || |
2830 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM5753F0x16fe || |
2831 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM5787F0x167f)) || |
2832 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM577900x1694 || |
2833 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM577910x16b2 || |
2834 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM577950x16b6 || |
2835 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) |
2836 | sc->bge_phy_flags |= BGE_PHY_10_100_ONLY0x00000002; |
2837 | |
2838 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57000x07 || |
2839 | (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57050x03 && |
2840 | (sc->bge_chipid != BGE_CHIPID_BCM5705_A00x3000 && |
2841 | sc->bge_chipid != BGE_CHIPID_BCM5705_A10x3001)) || |
2842 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) |
2843 | sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED0x00000100; |
2844 | |
2845 | if (sc->bge_chipid == BGE_CHIPID_BCM5701_A00x0000 || |
2846 | sc->bge_chipid == BGE_CHIPID_BCM5701_B00x0100) |
2847 | sc->bge_phy_flags |= BGE_PHY_CRC_BUG0x00000004; |
2848 | if (BGE_CHIPREV(sc->bge_chipid)((sc->bge_chipid) >> 8) == BGE_CHIPREV_5703_AX0x10 || |
2849 | BGE_CHIPREV(sc->bge_chipid)((sc->bge_chipid) >> 8) == BGE_CHIPREV_5704_AX0x20) |
2850 | sc->bge_phy_flags |= BGE_PHY_ADC_BUG0x00000008; |
2851 | if (sc->bge_chipid == BGE_CHIPID_BCM5704_A00x2000) |
2852 | sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG0x00008010; |
2853 | |
2854 | if ((BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000)) && |
2855 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM59060x0c && |
2856 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57850x5785 && |
2857 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM577800x57780 && |
2858 | !BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000)) { |
2859 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57550x0a || |
2860 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57610x5761 || |
2861 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57840x5784 || |
2862 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57870x0b) { |
2863 | if (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) != PCI_PRODUCT_BROADCOM_BCM57220x165a && |
2864 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) != PCI_PRODUCT_BROADCOM_BCM57560x1674) |
2865 | sc->bge_phy_flags |= BGE_PHY_JITTER_BUG0x00000020; |
2866 | if (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_BROADCOM_BCM5755M0x1673) |
2867 | sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM0x00000080; |
2868 | } else |
2869 | sc->bge_phy_flags |= BGE_PHY_BER_BUG0x00000040; |
2870 | } |
2871 | |
2872 | /* Identify chips with APE processor. */ |
2873 | switch (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12)) { |
2874 | case BGE_ASICREV_BCM57170x5717: |
2875 | case BGE_ASICREV_BCM57190x5719: |
2876 | case BGE_ASICREV_BCM57200x5720: |
2877 | case BGE_ASICREV_BCM57610x5761: |
2878 | case BGE_ASICREV_BCM57620x5762: |
2879 | sc->bge_flags |= BGE_APE0x00080000; |
2880 | break; |
2881 | } |
2882 | |
2883 | /* Chips with APE need BAR2 access for APE registers/memory. */ |
2884 | if ((sc->bge_flags & BGE_APE0x00080000) != 0) { |
2885 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR20x18); |
2886 | if (pci_mapreg_map(pa, BGE_PCI_BAR20x18, memtype, 0, |
2887 | &sc->bge_apetag, &sc->bge_apehandle, NULL((void *)0), |
2888 | &sc->bge_apesize, 0)) { |
2889 | printf(": couldn't map BAR2 memory\n"); |
2890 | goto fail_1; |
2891 | } |
2892 | |
2893 | /* Enable APE register/memory access by host driver. */ |
2894 | reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE0x70); |
2895 | reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR0x00010000 | |
2896 | BGE_PCISTATE_ALLOW_APE_SHMEM_WR0x00020000 | |
2897 | BGE_PCISTATE_ALLOW_APE_PSPACE_WR0x00040000; |
2898 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE0x70, reg); |
2899 | |
2900 | bge_ape_lock_init(sc); |
2901 | bge_ape_read_fw_ver(sc); |
2902 | } |
2903 | |
2904 | /* Identify the chips that use an CPMU. */ |
2905 | if (BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000) || |
2906 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57840x5784 || |
2907 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57610x5761 || |
2908 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57850x5785 || |
2909 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM577800x57780) |
2910 | sc->bge_flags |= BGE_CPMU_PRESENT0x00100000; |
2911 | |
2912 | if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI0x05, |
2913 | &sc->bge_msicap, NULL((void *)0))) { |
2914 | if (bge_can_use_msi(sc) == 0) |
2915 | pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED0x20; |
2916 | } |
2917 | |
2918 | DPRINTFN(5, ("pci_intr_map\n")); |
2919 | if (pci_intr_map_msi(pa, &ih) == 0) |
2920 | sc->bge_flags |= BGE_MSI0x00400000; |
2921 | else if (pci_intr_map(pa, &ih)) { |
2922 | printf(": couldn't map interrupt\n"); |
2923 | goto fail_1; |
2924 | } |
2925 | |
2926 | /* |
2927 | * All controllers except BCM5700 supports tagged status but |
2928 | * we use tagged status only for MSI case on BCM5717. Otherwise |
2929 | * MSI on BCM5717 does not work. |
2930 | */ |
2931 | if (BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000) && sc->bge_flags & BGE_MSI0x00400000) |
2932 | sc->bge_flags |= BGE_TAGGED_STATUS0x00200000; |
2933 | |
2934 | DPRINTFN(5, ("pci_intr_string\n")); |
2935 | intrstr = pci_intr_string(pc, ih); |
2936 | |
2937 | /* Try to reset the chip. */ |
2938 | DPRINTFN(5, ("bge_reset\n")); |
2939 | bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN0); |
2940 | bge_reset(sc); |
2941 | |
2942 | bge_sig_legacy(sc, BGE_RESET_SHUTDOWN0); |
2943 | bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN0); |
2944 | |
2945 | bge_chipinit(sc); |
2946 | |
2947 | #if defined(__sparc64__) || defined(__HAVE_FDT) |
2948 | if (!gotenaddr && PCITAG_NODE(pa->pa_tag)) { |
2949 | if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address", |
2950 | sc->arpcom.ac_enaddr, ETHER_ADDR_LEN6) == ETHER_ADDR_LEN6) |
2951 | gotenaddr = 1; |
2952 | } |
2953 | #endif |
2954 | |
2955 | /* |
2956 | * Get station address from the EEPROM. |
2957 | */ |
2958 | if (!gotenaddr) { |
2959 | mac_addr = bge_readmem_ind(sc, 0x0c14); |
2960 | if ((mac_addr >> 16) == 0x484b) { |
2961 | sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8); |
2962 | sc->arpcom.ac_enaddr[1] = (u_char)mac_addr; |
2963 | mac_addr = bge_readmem_ind(sc, 0x0c18); |
2964 | sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24); |
2965 | sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16); |
2966 | sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8); |
2967 | sc->arpcom.ac_enaddr[5] = (u_char)mac_addr; |
2968 | gotenaddr = 1; |
2969 | } |
2970 | } |
2971 | if (!gotenaddr) { |
2972 | int mac_offset = BGE_EE_MAC_OFFSET0x7C; |
2973 | |
2974 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) |
2975 | mac_offset = BGE_EE_MAC_OFFSET_59060x10; |
2976 | |
2977 | if (bge_read_nvram(sc, (caddr_t)&sc->arpcom.ac_enaddr, |
2978 | mac_offset + 2, ETHER_ADDR_LEN6) == 0) |
2979 | gotenaddr = 1; |
2980 | } |
2981 | if (!gotenaddr && (!(sc->bge_flags & BGE_NO_EEPROM0x00000080))) { |
2982 | if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, |
2983 | BGE_EE_MAC_OFFSET0x7C + 2, ETHER_ADDR_LEN6) == 0) |
2984 | gotenaddr = 1; |
2985 | } |
2986 | |
2987 | #ifdef __sparc64__ |
2988 | if (!gotenaddr) { |
2989 | extern void myetheraddr(u_char *); |
2990 | |
2991 | myetheraddr(sc->arpcom.ac_enaddr); |
2992 | gotenaddr = 1; |
2993 | } |
2994 | #endif |
2995 | |
2996 | if (!gotenaddr) { |
2997 | printf(": failed to read station address\n"); |
2998 | goto fail_2; |
2999 | } |
3000 | |
3001 | /* Allocate the general information block and ring buffers. */ |
3002 | sc->bge_dmatag = pa->pa_dmat; |
3003 | DPRINTFN(5, ("bus_dmamem_alloc\n")); |
3004 | if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),(*(sc->bge_dmatag)->_dmamem_alloc)((sc->bge_dmatag), (sizeof(struct bge_ring_data)), ((1 << 12)), (0), (& sc->bge_ring_seg), (1), (&sc->bge_ring_nseg), (0x0001 )) |
3005 | PAGE_SIZE, 0, &sc->bge_ring_seg, 1, &sc->bge_ring_nseg,(*(sc->bge_dmatag)->_dmamem_alloc)((sc->bge_dmatag), (sizeof(struct bge_ring_data)), ((1 << 12)), (0), (& sc->bge_ring_seg), (1), (&sc->bge_ring_nseg), (0x0001 )) |
3006 | BUS_DMA_NOWAIT)(*(sc->bge_dmatag)->_dmamem_alloc)((sc->bge_dmatag), (sizeof(struct bge_ring_data)), ((1 << 12)), (0), (& sc->bge_ring_seg), (1), (&sc->bge_ring_nseg), (0x0001 ))) { |
3007 | printf(": can't alloc rx buffers\n"); |
3008 | goto fail_2; |
3009 | } |
3010 | DPRINTFN(5, ("bus_dmamem_map\n")); |
3011 | if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_ring_seg,(*(sc->bge_dmatag)->_dmamem_map)((sc->bge_dmatag), ( &sc->bge_ring_seg), (sc->bge_ring_nseg), (sizeof(struct bge_ring_data)), (&kva), (0x0001)) |
3012 | sc->bge_ring_nseg, sizeof(struct bge_ring_data), &kva,(*(sc->bge_dmatag)->_dmamem_map)((sc->bge_dmatag), ( &sc->bge_ring_seg), (sc->bge_ring_nseg), (sizeof(struct bge_ring_data)), (&kva), (0x0001)) |
3013 | BUS_DMA_NOWAIT)(*(sc->bge_dmatag)->_dmamem_map)((sc->bge_dmatag), ( &sc->bge_ring_seg), (sc->bge_ring_nseg), (sizeof(struct bge_ring_data)), (&kva), (0x0001))) { |
3014 | printf(": can't map dma buffers (%lu bytes)\n", |
3015 | sizeof(struct bge_ring_data)); |
3016 | goto fail_3; |
3017 | } |
3018 | DPRINTFN(5, ("bus_dmamap_create\n")); |
3019 | if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (sizeof(struct bge_ring_data)), (1), (sizeof(struct bge_ring_data )), (0), (0x0001), (&sc->bge_ring_map)) |
3020 | sizeof(struct bge_ring_data), 0,(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (sizeof(struct bge_ring_data)), (1), (sizeof(struct bge_ring_data )), (0), (0x0001), (&sc->bge_ring_map)) |
3021 | BUS_DMA_NOWAIT, &sc->bge_ring_map)(*(sc->bge_dmatag)->_dmamap_create)((sc->bge_dmatag) , (sizeof(struct bge_ring_data)), (1), (sizeof(struct bge_ring_data )), (0), (0x0001), (&sc->bge_ring_map))) { |
3022 | printf(": can't create dma map\n"); |
3023 | goto fail_4; |
3024 | } |
3025 | DPRINTFN(5, ("bus_dmamap_load\n")); |
3026 | if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,(*(sc->bge_dmatag)->_dmamap_load)((sc->bge_dmatag), ( sc->bge_ring_map), (kva), (sizeof(struct bge_ring_data)), ( ((void *)0)), (0x0001)) |
3027 | sizeof(struct bge_ring_data), NULL,(*(sc->bge_dmatag)->_dmamap_load)((sc->bge_dmatag), ( sc->bge_ring_map), (kva), (sizeof(struct bge_ring_data)), ( ((void *)0)), (0x0001)) |
3028 | BUS_DMA_NOWAIT)(*(sc->bge_dmatag)->_dmamap_load)((sc->bge_dmatag), ( sc->bge_ring_map), (kva), (sizeof(struct bge_ring_data)), ( ((void *)0)), (0x0001))) { |
3029 | goto fail_5; |
3030 | } |
3031 | |
3032 | DPRINTFN(5, ("bzero\n")); |
3033 | sc->bge_rdata = (struct bge_ring_data *)kva; |
3034 | |
3035 | bzero(sc->bge_rdata, sizeof(struct bge_ring_data))__builtin_bzero((sc->bge_rdata), (sizeof(struct bge_ring_data ))); |
3036 | |
3037 | /* Set default tuneable values. */ |
3038 | sc->bge_stat_ticks = BGE_TICKS_PER_SEC1000000; |
3039 | sc->bge_rx_coal_ticks = 150; |
3040 | sc->bge_rx_max_coal_bds = 64; |
3041 | sc->bge_tx_coal_ticks = 300; |
3042 | sc->bge_tx_max_coal_bds = 400; |
3043 | |
3044 | /* 5705 limits RX return ring to 512 entries. */ |
3045 | if (BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000) || BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000)) |
3046 | sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT1024; |
3047 | else |
3048 | sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705512; |
3049 | |
3050 | mtx_init(&sc->bge_kstat_mtx, IPL_SOFTCLOCK)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc-> bge_kstat_mtx), ((((0x1)) > 0x0 && ((0x1)) < 0x9 ) ? 0x9 : ((0x1)))); } while (0); |
3051 | #if NKSTAT1 > 0 |
3052 | if (BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000)) |
3053 | bge_kstat_attach(sc); |
3054 | #endif |
3055 | |
3056 | /* Set up ifnet structure */ |
3057 | ifp = &sc->arpcom.ac_if; |
3058 | ifp->if_softc = sc; |
3059 | ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000; |
3060 | ifp->if_xflags = IFXF_MPSAFE0x1; |
3061 | ifp->if_ioctl = bge_ioctl; |
3062 | ifp->if_qstart = bge_start; |
3063 | ifp->if_watchdog = bge_watchdog; |
3064 | ifq_init_maxlen(&ifp->if_snd, BGE_TX_RING_CNT512 - 1); |
3065 | |
3066 | DPRINTFN(5, ("bcopy\n")); |
3067 | bcopy(sc->bge_dev.dv_xname, ifp->if_xname, IFNAMSIZ16); |
3068 | |
3069 | ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010; |
3070 | |
3071 | #if NVLAN1 > 0 |
3072 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020; |
3073 | #endif |
3074 | |
3075 | /* |
3076 | * 5700 B0 chips do not support checksumming correctly due |
3077 | * to hardware bugs. |
3078 | * |
3079 | * It seems all controllers have a bug that can generate UDP |
3080 | * datagrams with a checksum value 0 when TX UDP checksum |
3081 | * offloading is enabled. Generating UDP checksum value 0 is |
3082 | * a violation of RFC 768. |
3083 | */ |
3084 | if (sc->bge_chipid != BGE_CHIPID_BCM5700_B00x7100) |
3085 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_CSUM_IPv40x00000001 | IFCAP_CSUM_TCPv40x00000002; |
3086 | |
3087 | if (BGE_IS_JUMBO_CAPABLE(sc)((sc)->bge_flags & 0x00000100)) |
3088 | ifp->if_hardmtu = BGE_JUMBO_MTU(9022 - ((6 * 2) + 2) - 4 - 4); |
3089 | |
3090 | /* |
3091 | * Do MII setup. |
3092 | */ |
3093 | DPRINTFN(5, ("mii setup\n")); |
3094 | sc->bge_mii.mii_ifp = ifp; |
3095 | sc->bge_mii.mii_readreg = bge_miibus_readreg; |
3096 | sc->bge_mii.mii_writereg = bge_miibus_writereg; |
3097 | sc->bge_mii.mii_statchg = bge_miibus_statchg; |
3098 | |
3099 | /* |
3100 | * Figure out what sort of media we have by checking the hardware |
3101 | * config word in the first 32K of internal NIC memory, or fall back to |
3102 | * examining the EEPROM if necessary. Note: on some BCM5700 cards, |
3103 | * this value seems to be unset. If that's the case, we have to rely on |
3104 | * identifying the NIC by its PCI subsystem ID, as we do below for the |
3105 | * SysKonnect SK-9D41. |
3106 | */ |
3107 | if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG0x00000B54) == BGE_MAGIC_NUMBER0x4B657654) |
3108 | hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG0x00000B58); |
3109 | else if (!(sc->bge_flags & BGE_NO_EEPROM0x00000080)) { |
3110 | if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET0xC8, |
3111 | sizeof(hwcfg))) { |
3112 | printf(": failed to read media type\n"); |
3113 | goto fail_6; |
3114 | } |
3115 | hwcfg = ntohl(hwcfg)(__uint32_t)(__builtin_constant_p(hwcfg) ? (__uint32_t)(((__uint32_t )(hwcfg) & 0xff) << 24 | ((__uint32_t)(hwcfg) & 0xff00) << 8 | ((__uint32_t)(hwcfg) & 0xff0000) >> 8 | ((__uint32_t)(hwcfg) & 0xff000000) >> 24) : __swap32md (hwcfg)); |
3116 | } |
3117 | |
3118 | /* The SysKonnect SK-9D41 is a 1000baseSX card. */ |
3119 | if (PCI_PRODUCT(subid)(((subid) >> 16) & 0xffff) == SK_SUBSYSID_9D410x4441 || |
3120 | (hwcfg & BGE_HWCFG_MEDIA0x00000030) == BGE_MEDIA_FIBER0x00000020) { |
3121 | if (BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000)) |
3122 | sc->bge_flags |= BGE_FIBER_TBI0x00000200; |
3123 | else |
3124 | sc->bge_flags |= BGE_FIBER_MII0x00000400; |
3125 | } |
3126 | |
3127 | /* Take advantage of single-shot MSI. */ |
3128 | if (BGE_IS_5755_PLUS(sc)((sc)->bge_flags & 0x00004000) && sc->bge_flags & BGE_MSI0x00400000) |
3129 | CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6000) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6000 ))) & ~0x00000020))) |
3130 | ~BGE_MSIMODE_ONE_SHOT_DISABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6000) , (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6000 ))) & ~0x00000020))); |
3131 | |
3132 | /* Hookup IRQ last. */ |
3133 | DPRINTFN(5, ("pci_intr_establish\n")); |
3134 | sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET0x4 | IPL_MPSAFE0x100, |
3135 | bge_intr, sc, sc->bge_dev.dv_xname); |
3136 | if (sc->bge_intrhand == NULL((void *)0)) { |
3137 | printf(": couldn't establish interrupt"); |
3138 | if (intrstr != NULL((void *)0)) |
3139 | printf(" at %s", intrstr); |
3140 | printf("\n"); |
3141 | goto fail_6; |
3142 | } |
3143 | |
3144 | /* |
3145 | * A Broadcom chip was detected. Inform the world. |
3146 | */ |
3147 | printf(": %s, address %s\n", intrstr, |
3148 | ether_sprintf(sc->arpcom.ac_enaddr)); |
3149 | |
3150 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200) { |
3151 | ifmedia_init(&sc->bge_ifmedia, IFM_IMASK0xff00000000000000ULL, bge_ifmedia_upd, |
3152 | bge_ifmedia_sts); |
3153 | ifmedia_add(&sc->bge_ifmedia, IFM_ETHER0x0000000000000100ULL|IFM_1000_SX11, 0, NULL((void *)0)); |
3154 | ifmedia_add(&sc->bge_ifmedia, IFM_ETHER0x0000000000000100ULL|IFM_1000_SX11|IFM_FDX0x0000010000000000ULL, |
3155 | 0, NULL((void *)0)); |
3156 | ifmedia_add(&sc->bge_ifmedia, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL, 0, NULL((void *)0)); |
3157 | ifmedia_set(&sc->bge_ifmedia, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL); |
3158 | sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; |
3159 | } else { |
3160 | int mii_flags; |
3161 | |
3162 | /* |
3163 | * Do transceiver setup. |
3164 | */ |
3165 | ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, |
3166 | bge_ifmedia_sts); |
3167 | mii_flags = MIIF_DOPAUSE0x0100; |
3168 | if (sc->bge_flags & BGE_FIBER_MII0x00000400) |
3169 | mii_flags |= MIIF_HAVEFIBER0x0020; |
3170 | mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff, |
3171 | sc->bge_phy_addr, MII_OFFSET_ANY-1, mii_flags); |
3172 | |
3173 | if (LIST_FIRST(&sc->bge_mii.mii_phys)((&sc->bge_mii.mii_phys)->lh_first) == NULL((void *)0)) { |
3174 | printf("%s: no PHY found!\n", sc->bge_dev.dv_xname); |
3175 | ifmedia_add(&sc->bge_mii.mii_media, |
3176 | IFM_ETHER0x0000000000000100ULL|IFM_MANUAL1ULL, 0, NULL((void *)0)); |
3177 | ifmedia_set(&sc->bge_mii.mii_media, |
3178 | IFM_ETHER0x0000000000000100ULL|IFM_MANUAL1ULL); |
3179 | } else |
3180 | ifmedia_set(&sc->bge_mii.mii_media, |
3181 | IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL); |
3182 | } |
3183 | |
3184 | /* |
3185 | * Call MI attach routine. |
3186 | */ |
3187 | if_attach(ifp); |
3188 | ether_ifattach(ifp); |
3189 | |
3190 | timeout_set(&sc->bge_timeout, bge_tick, sc); |
3191 | timeout_set(&sc->bge_rxtimeout, bge_rxtick, sc); |
3192 | timeout_set(&sc->bge_rxtimeout_jumbo, bge_rxtick_jumbo, sc); |
3193 | return; |
3194 | |
3195 | fail_6: |
3196 | bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map)(*(sc->bge_dmatag)->_dmamap_unload)((sc->bge_dmatag) , (sc->bge_ring_map)); |
3197 | |
3198 | fail_5: |
3199 | bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map)(*(sc->bge_dmatag)->_dmamap_destroy)((sc->bge_dmatag ), (sc->bge_ring_map)); |
3200 | |
3201 | fail_4: |
3202 | bus_dmamem_unmap(sc->bge_dmatag, (caddr_t)sc->bge_rdata,(*(sc->bge_dmatag)->_dmamem_unmap)((sc->bge_dmatag), ((caddr_t)sc->bge_rdata), (sizeof(struct bge_ring_data))) |
3203 | sizeof(struct bge_ring_data))(*(sc->bge_dmatag)->_dmamem_unmap)((sc->bge_dmatag), ((caddr_t)sc->bge_rdata), (sizeof(struct bge_ring_data))); |
3204 | |
3205 | fail_3: |
3206 | bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, sc->bge_ring_nseg)(*(sc->bge_dmatag)->_dmamem_free)((sc->bge_dmatag), ( &sc->bge_ring_seg), (sc->bge_ring_nseg)); |
3207 | |
3208 | fail_2: |
3209 | if ((sc->bge_flags & BGE_APE0x00080000) != 0) |
3210 | bus_space_unmap(sc->bge_apetag, sc->bge_apehandle, |
3211 | sc->bge_apesize); |
3212 | |
3213 | fail_1: |
3214 | bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize); |
3215 | } |
3216 | |
3217 | int |
3218 | bge_detach(struct device *self, int flags) |
3219 | { |
3220 | struct bge_softc *sc = (struct bge_softc *)self; |
3221 | struct ifnet *ifp = &sc->arpcom.ac_if; |
3222 | |
3223 | bge_stop(sc, 1); |
3224 | |
3225 | if (sc->bge_intrhand) |
3226 | pci_intr_disestablish(sc->bge_pa.pa_pc, sc->bge_intrhand); |
3227 | |
3228 | /* Detach any PHYs we might have. */ |
3229 | if (LIST_FIRST(&sc->bge_mii.mii_phys)((&sc->bge_mii.mii_phys)->lh_first) != NULL((void *)0)) |
3230 | mii_detach(&sc->bge_mii, MII_PHY_ANY-1, MII_OFFSET_ANY-1); |
3231 | |
3232 | /* Delete any remaining media. */ |
3233 | ifmedia_delete_instance(&sc->bge_mii.mii_media, IFM_INST_ANY((uint64_t) -1)); |
3234 | |
3235 | ether_ifdetach(ifp); |
3236 | if_detach(ifp); |
3237 | |
3238 | bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map)(*(sc->bge_dmatag)->_dmamap_unload)((sc->bge_dmatag) , (sc->bge_ring_map)); |
3239 | bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map)(*(sc->bge_dmatag)->_dmamap_destroy)((sc->bge_dmatag ), (sc->bge_ring_map)); |
3240 | bus_dmamem_unmap(sc->bge_dmatag, (caddr_t)sc->bge_rdata,(*(sc->bge_dmatag)->_dmamem_unmap)((sc->bge_dmatag), ((caddr_t)sc->bge_rdata), (sizeof(struct bge_ring_data))) |
3241 | sizeof(struct bge_ring_data))(*(sc->bge_dmatag)->_dmamem_unmap)((sc->bge_dmatag), ((caddr_t)sc->bge_rdata), (sizeof(struct bge_ring_data))); |
3242 | bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, sc->bge_ring_nseg)(*(sc->bge_dmatag)->_dmamem_free)((sc->bge_dmatag), ( &sc->bge_ring_seg), (sc->bge_ring_nseg)); |
3243 | |
3244 | if ((sc->bge_flags & BGE_APE0x00080000) != 0) |
3245 | bus_space_unmap(sc->bge_apetag, sc->bge_apehandle, |
3246 | sc->bge_apesize); |
3247 | |
3248 | bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize); |
3249 | return (0); |
3250 | } |
3251 | |
3252 | int |
3253 | bge_activate(struct device *self, int act) |
3254 | { |
3255 | struct bge_softc *sc = (struct bge_softc *)self; |
3256 | struct ifnet *ifp = &sc->arpcom.ac_if; |
3257 | int rv = 0; |
3258 | |
3259 | switch (act) { |
3260 | case DVACT_SUSPEND3: |
3261 | rv = config_activate_children(self, act); |
3262 | if (ifp->if_flags & IFF_RUNNING0x40) |
3263 | bge_stop(sc, 0); |
3264 | break; |
3265 | case DVACT_RESUME4: |
3266 | if (ifp->if_flags & IFF_UP0x1) |
3267 | bge_init(sc); |
3268 | break; |
3269 | default: |
3270 | rv = config_activate_children(self, act); |
3271 | break; |
3272 | } |
3273 | return (rv); |
3274 | } |
3275 | |
3276 | void |
3277 | bge_reset(struct bge_softc *sc) |
3278 | { |
3279 | struct pci_attach_args *pa = &sc->bge_pa; |
3280 | pcireg_t cachesize, command, devctl; |
3281 | u_int32_t reset, mac_mode, mac_mode_mask, val; |
3282 | void (*write_op)(struct bge_softc *, int, int); |
3283 | int i; |
3284 | |
3285 | mac_mode_mask = BGE_MACMODE_HALF_DUPLEX0x00000002 | BGE_MACMODE_PORTMODE0x0000000C; |
3286 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE0x00000002) != 0) |
3287 | mac_mode_mask |= BGE_MACMODE_APE_RX_EN0x08000000 | BGE_MACMODE_APE_TX_EN0x10000000; |
3288 | mac_mode = CSR_READ_4(sc, BGE_MAC_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400)) ) & mac_mode_mask; |
3289 | |
3290 | if (BGE_IS_575X_PLUS(sc)((sc)->bge_flags & 0x00002000) && !BGE_IS_5714_FAMILY(sc)((sc)->bge_flags & 0x00008000) && |
3291 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM59060x0c) { |
3292 | if (sc->bge_flags & BGE_PCIE0x00000020) |
3293 | write_op = bge_writembx; |
3294 | else |
3295 | write_op = bge_writemem_ind; |
3296 | } else |
3297 | write_op = bge_writereg_ind; |
3298 | |
3299 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57000x07 && |
3300 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57010x00 && |
3301 | !(sc->bge_flags & BGE_NO_EEPROM0x00000080)) { |
3302 | CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x7020) , (0x00000002))); |
3303 | for (i = 0; i < 8000; i++) { |
3304 | if (CSR_READ_4(sc, BGE_NVRAM_SWARB)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x7020)) ) & |
3305 | BGE_NVRAMSWARB_GNT10x00000200) |
3306 | break; |
3307 | DELAY(20)(*delay_func)(20); |
3308 | } |
3309 | if (i == 8000) |
3310 | printf("%s: nvram lock timed out\n", |
3311 | sc->bge_dev.dv_xname); |
3312 | } |
3313 | /* Take APE lock when performing reset. */ |
3314 | bge_ape_lock(sc, BGE_APE_LOCK_GRC1); |
3315 | |
3316 | /* Save some important PCI state. */ |
3317 | cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ0x0C); |
3318 | command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD0x04); |
3319 | |
3320 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL0x68, |
3321 | BGE_PCIMISCCTL_INDIRECT_ACCESS0x00000080 | BGE_PCIMISCCTL_MASK_PCI_INTR0x00000002 | |
3322 | BGE_PCIMISCCTL_ENDIAN_WORDSWAP0x00000008 | BGE_PCIMISCCTL_PCISTATE_RW0x00000010); |
3323 | |
3324 | /* Disable fastboot on controllers that support it. */ |
3325 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57520x06 || |
3326 | BGE_IS_5755_PLUS(sc)((sc)->bge_flags & 0x00004000)) |
3327 | CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6894) , (0))); |
3328 | |
3329 | /* |
3330 | * Write the magic number to SRAM at offset 0xB50. |
3331 | * When firmware finishes its initialization it will |
3332 | * write ~BGE_SRAM_FW_MB_MAGIC to the same location. |
3333 | */ |
3334 | bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM0x00000B50, BGE_MAGIC_NUMBER0x4B657654); |
3335 | |
3336 | reset = BGE_MISCCFG_RESET_CORE_CLOCKS0x00000001 | BGE_32BITTIME_66MHZ(0x41 << 1); |
3337 | |
3338 | if (sc->bge_flags & BGE_PCIE0x00000020) { |
3339 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57850x5785 && |
3340 | !BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000)) { |
3341 | if (CSR_READ_4(sc, 0x7e2c)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x7e2c)) ) == 0x60) { |
3342 | /* PCI Express 1.0 system */ |
3343 | CSR_WRITE_4(sc, 0x7e2c, 0x20)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x7e2c) , (0x20))); |
3344 | } |
3345 | } |
3346 | if (sc->bge_chipid != BGE_CHIPID_BCM5750_A00x4000) { |
3347 | /* |
3348 | * Prevent PCI Express link training |
3349 | * during global reset. |
3350 | */ |
3351 | CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29))((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6804) , ((1<<29)))); |
3352 | reset |= (1<<29); |
3353 | } |
3354 | } |
3355 | |
3356 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) { |
3357 | val = CSR_READ_4(sc, BGE_VCPU_STATUS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x5100)) ); |
3358 | CSR_WRITE_4(sc, BGE_VCPU_STATUS,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x5100) , (val | 0x08000000))) |
3359 | val | BGE_VCPU_STATUS_DRV_RESET)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x5100) , (val | 0x08000000))); |
3360 | val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6890)) ); |
3361 | CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6890) , (val & ~0x00400000))) |
3362 | val & ~BGE_VCPU_EXT_CTRL_HALT_CPU)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6890) , (val & ~0x00400000))); |
3363 | |
3364 | sc->bge_flags |= BGE_NO_EEPROM0x00000080; |
3365 | } |
3366 | |
3367 | /* |
3368 | * Set GPHY Power Down Override to leave GPHY |
3369 | * powered up in D0 uninitialized. |
3370 | */ |
3371 | if (BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000) && |
3372 | (sc->bge_flags & BGE_CPMU_PRESENT0x00100000) == 0) |
3373 | reset |= BGE_MISCCFG_KEEP_GPHY_POWER0x04000000; |
3374 | |
3375 | /* Issue global reset */ |
3376 | write_op(sc, BGE_MISC_CFG0x6804, reset); |
3377 | |
3378 | if (sc->bge_flags & BGE_PCIE0x00000020) |
3379 | DELAY(100 * 1000)(*delay_func)(100 * 1000); |
3380 | else |
3381 | DELAY(1000)(*delay_func)(1000); |
3382 | |
3383 | if (sc->bge_flags & BGE_PCIE0x00000020) { |
3384 | if (sc->bge_chipid == BGE_CHIPID_BCM5750_A00x4000) { |
3385 | pcireg_t v; |
3386 | |
3387 | DELAY(500000)(*delay_func)(500000); /* wait for link training to complete */ |
3388 | v = pci_conf_read(pa->pa_pc, pa->pa_tag, 0xc4); |
3389 | pci_conf_write(pa->pa_pc, pa->pa_tag, 0xc4, v | (1<<15)); |
3390 | } |
3391 | |
3392 | devctl = pci_conf_read(pa->pa_pc, pa->pa_tag, sc->bge_expcap + |
3393 | PCI_PCIE_DCSR0x08); |
3394 | /* Clear enable no snoop and disable relaxed ordering. */ |
3395 | devctl &= ~(PCI_PCIE_DCSR_ERO0x00000010 | PCI_PCIE_DCSR_ENS0x00000800); |
3396 | /* Set PCI Express max payload size. */ |
3397 | devctl = (devctl & ~PCI_PCIE_DCSR_MPS0x00007000) | sc->bge_expmrq; |
3398 | /* Clear error status. */ |
3399 | devctl |= PCI_PCIE_DCSR_CEE0x00010000 | PCI_PCIE_DCSR_NFE0x00020000 | |
3400 | PCI_PCIE_DCSR_FEE0x00040000 | PCI_PCIE_DCSR_URE0x00080000; |
3401 | pci_conf_write(pa->pa_pc, pa->pa_tag, sc->bge_expcap + |
3402 | PCI_PCIE_DCSR0x08, devctl); |
3403 | } |
3404 | |
3405 | /* Reset some of the PCI state that got zapped by reset */ |
3406 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL0x68, |
3407 | BGE_PCIMISCCTL_INDIRECT_ACCESS0x00000080 | BGE_PCIMISCCTL_MASK_PCI_INTR0x00000002 | |
3408 | BGE_PCIMISCCTL_ENDIAN_WORDSWAP0x00000008 | BGE_PCIMISCCTL_PCISTATE_RW0x00000010); |
3409 | val = BGE_PCISTATE_ROM_ENABLE0x00000020 | BGE_PCISTATE_ROM_RETRY_ENABLE0x00000040; |
3410 | if (sc->bge_chipid == BGE_CHIPID_BCM5704_A00x2000 && |
3411 | (sc->bge_flags & BGE_PCIX0x00000010) != 0) |
3412 | val |= BGE_PCISTATE_RETRY_SAME_DMA0x00002000; |
3413 | if ((sc->bge_mfw_flags & BGE_MFW_ON_APE0x00000002) != 0) |
3414 | val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR0x00010000 | |
3415 | BGE_PCISTATE_ALLOW_APE_SHMEM_WR0x00020000 | |
3416 | BGE_PCISTATE_ALLOW_APE_PSPACE_WR0x00040000; |
3417 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE0x70, val); |
3418 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ0x0C, cachesize); |
3419 | pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD0x04, command); |
3420 | |
3421 | /* Re-enable MSI, if necessary, and enable memory arbiter. */ |
3422 | if (BGE_IS_5714_FAMILY(sc)((sc)->bge_flags & 0x00008000)) { |
3423 | /* This chip disables MSI on reset. */ |
3424 | if (sc->bge_flags & BGE_MSI0x00400000) { |
3425 | val = pci_conf_read(pa->pa_pc, pa->pa_tag, |
3426 | sc->bge_msicap + PCI_MSI_MC0x00); |
3427 | pci_conf_write(pa->pa_pc, pa->pa_tag, |
3428 | sc->bge_msicap + PCI_MSI_MC0x00, |
3429 | val | PCI_MSI_MC_MSIE0x00010000); |
3430 | val = CSR_READ_4(sc, BGE_MSI_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6000)) ); |
3431 | CSR_WRITE_4(sc, BGE_MSI_MODE,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6000) , (val | 0x00000002))) |
3432 | val | BGE_MSIMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6000) , (val | 0x00000002))); |
3433 | } |
3434 | val = CSR_READ_4(sc, BGE_MARB_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4000)) ); |
3435 | CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4000) , (0x00000002 | val))); |
3436 | } else |
3437 | CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4000) , (0x00000002))); |
3438 | |
3439 | /* Fix up byte swapping */ |
3440 | CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc))((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6800) , (bge_dma_swap_options(sc)))); |
3441 | |
3442 | val = CSR_READ_4(sc, BGE_MAC_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400)) ); |
3443 | val = (val & ~mac_mode_mask) | mac_mode; |
3444 | CSR_WRITE_4(sc, BGE_MAC_MODE, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0400) , (val))); |
3445 | DELAY(40)(*delay_func)(40); |
3446 | |
3447 | bge_ape_unlock(sc, BGE_APE_LOCK_GRC1); |
3448 | |
3449 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) { |
3450 | for (i = 0; i < BGE_TIMEOUT100000; i++) { |
3451 | val = CSR_READ_4(sc, BGE_VCPU_STATUS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x5100)) ); |
3452 | if (val & BGE_VCPU_STATUS_INIT_DONE0x04000000) |
3453 | break; |
3454 | DELAY(100)(*delay_func)(100); |
3455 | } |
3456 | |
3457 | if (i >= BGE_TIMEOUT100000) |
3458 | printf("%s: reset timed out\n", sc->bge_dev.dv_xname); |
3459 | } else { |
3460 | /* |
3461 | * Poll until we see 1's complement of the magic number. |
3462 | * This indicates that the firmware initialization |
3463 | * is complete. We expect this to fail if no SEEPROM |
3464 | * is fitted. |
3465 | */ |
3466 | for (i = 0; i < BGE_TIMEOUT100000 * 10; i++) { |
3467 | val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM0x00000B50); |
3468 | if (val == ~BGE_MAGIC_NUMBER0x4B657654) |
3469 | break; |
3470 | DELAY(10)(*delay_func)(10); |
3471 | } |
3472 | |
3473 | if ((i >= BGE_TIMEOUT100000 * 10) && |
3474 | (!(sc->bge_flags & BGE_NO_EEPROM0x00000080))) |
3475 | printf("%s: firmware handshake timed out\n", |
3476 | sc->bge_dev.dv_xname); |
3477 | /* BCM57765 A0 needs additional time before accessing. */ |
3478 | if (sc->bge_chipid == BGE_CHIPID_BCM57765_A00x57785000) |
3479 | DELAY(10 * 1000)(*delay_func)(10 * 1000); /* XXX */ |
3480 | } |
3481 | |
3482 | /* |
3483 | * The 5704 in TBI mode apparently needs some special |
3484 | * adjustment to ensure the SERDES drive level is set |
3485 | * to 1.2V. |
3486 | */ |
3487 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200 && |
3488 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57040x02) { |
3489 | val = CSR_READ_4(sc, BGE_SERDES_CFG)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0590)) ); |
3490 | val = (val & ~0xFFF) | 0x880; |
3491 | CSR_WRITE_4(sc, BGE_SERDES_CFG, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0590) , (val))); |
3492 | } |
3493 | |
3494 | if (sc->bge_flags & BGE_PCIE0x00000020 && |
3495 | !BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000) && |
3496 | sc->bge_chipid != BGE_CHIPID_BCM5750_A00x4000 && |
3497 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57850x5785) { |
3498 | /* Enable Data FIFO protection. */ |
3499 | val = CSR_READ_4(sc, 0x7c00)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x7c00)) ); |
3500 | CSR_WRITE_4(sc, 0x7c00, val | (1<<25))((sc->bge_btag)->write_4((sc->bge_bhandle), (0x7c00) , (val | (1<<25)))); |
3501 | } |
3502 | |
3503 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720) |
3504 | BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3624) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x3624 ))) & ~(0x80000000))))) |
3505 | CPMU_CLCK_ORIDE_MAC_ORIDE_EN)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3624) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x3624 ))) & ~(0x80000000))))); |
3506 | } |
3507 | |
3508 | /* |
3509 | * Frame reception handling. This is called if there's a frame |
3510 | * on the receive return list. |
3511 | * |
3512 | * Note: we have to be able to handle two possibilities here: |
3513 | * 1) the frame is from the jumbo receive ring |
3514 | * 2) the frame is from the standard receive ring |
3515 | */ |
3516 | |
3517 | void |
3518 | bge_rxeof(struct bge_softc *sc) |
3519 | { |
3520 | struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 }; |
3521 | struct ifnet *ifp; |
3522 | uint16_t rx_prod, rx_cons; |
3523 | int stdcnt = 0, jumbocnt = 0; |
3524 | bus_dmamap_t dmamap; |
3525 | bus_addr_t offset, toff; |
3526 | bus_size_t tlen; |
3527 | int tosync; |
3528 | int livelocked; |
3529 | |
3530 | rx_cons = sc->bge_rx_saved_considx; |
3531 | rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx; |
3532 | |
3533 | /* Nothing to do */ |
3534 | if (rx_cons == rx_prod) |
3535 | return; |
3536 | |
3537 | ifp = &sc->arpcom.ac_if; |
3538 | |
3539 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 )) |
3540 | offsetof(struct bge_ring_data, bge_status_block),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 )) |
3541 | sizeof (struct bge_status_block),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 )) |
3542 | BUS_DMASYNC_POSTREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 )); |
3543 | |
3544 | offset = offsetof(struct bge_ring_data, bge_rx_return_ring)__builtin_offsetof(struct bge_ring_data, bge_rx_return_ring); |
3545 | tosync = rx_prod - rx_cons; |
3546 | |
3547 | toff = offset + (rx_cons * sizeof (struct bge_rx_bd)); |
3548 | |
3549 | if (tosync < 0) { |
3550 | tlen = (sc->bge_return_ring_cnt - rx_cons) * |
3551 | sizeof (struct bge_rx_bd); |
3552 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (toff), (tlen), (0x02)) |
3553 | toff, tlen, BUS_DMASYNC_POSTREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (toff), (tlen), (0x02)); |
3554 | tosync = -tosync; |
3555 | } |
3556 | |
3557 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (offset), (tosync * sizeof (struct bge_rx_bd )), (0x02)) |
3558 | offset, tosync * sizeof (struct bge_rx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (offset), (tosync * sizeof (struct bge_rx_bd )), (0x02)) |
3559 | BUS_DMASYNC_POSTREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (offset), (tosync * sizeof (struct bge_rx_bd )), (0x02)); |
3560 | |
3561 | while (rx_cons != rx_prod) { |
3562 | struct bge_rx_bd *cur_rx; |
3563 | u_int32_t rxidx; |
3564 | struct mbuf *m = NULL((void *)0); |
3565 | |
3566 | cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons]; |
3567 | |
3568 | rxidx = cur_rx->bge_idx; |
3569 | BGE_INC(rx_cons, sc->bge_return_ring_cnt)(rx_cons) = (rx_cons + 1) % sc->bge_return_ring_cnt; |
3570 | |
3571 | if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING0x0020) { |
3572 | m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; |
3573 | sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL((void *)0); |
3574 | |
3575 | jumbocnt++; |
3576 | |
3577 | dmamap = sc->bge_cdata.bge_rx_jumbo_map[rxidx]; |
3578 | bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmamap), (0), (dmamap->dm_mapsize), (0x02)) |
3579 | dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmamap), (0), (dmamap->dm_mapsize), (0x02)); |
3580 | bus_dmamap_unload(sc->bge_dmatag, dmamap)(*(sc->bge_dmatag)->_dmamap_unload)((sc->bge_dmatag) , (dmamap)); |
3581 | |
3582 | if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR0x0400) { |
3583 | m_freem(m); |
3584 | continue; |
3585 | } |
3586 | } else { |
3587 | m = sc->bge_cdata.bge_rx_std_chain[rxidx]; |
3588 | sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL((void *)0); |
3589 | |
3590 | stdcnt++; |
3591 | |
3592 | dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; |
3593 | bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmamap), (0), (dmamap->dm_mapsize), (0x02)) |
3594 | dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmamap), (0), (dmamap->dm_mapsize), (0x02)); |
3595 | bus_dmamap_unload(sc->bge_dmatag, dmamap)(*(sc->bge_dmatag)->_dmamap_unload)((sc->bge_dmatag) , (dmamap)); |
3596 | |
3597 | if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR0x0400) { |
3598 | m_freem(m); |
3599 | continue; |
3600 | } |
3601 | } |
3602 | |
3603 | #ifdef __STRICT_ALIGNMENT |
3604 | /* |
3605 | * The i386 allows unaligned accesses, but for other |
3606 | * platforms we must make sure the payload is aligned. |
3607 | */ |
3608 | if (sc->bge_flags & BGE_RX_ALIGNBUG0x00000008) { |
3609 | bcopy(m->m_datam_hdr.mh_data, m->m_datam_hdr.mh_data + ETHER_ALIGN2, |
3610 | cur_rx->bge_len); |
3611 | m->m_datam_hdr.mh_data += ETHER_ALIGN2; |
3612 | } |
3613 | #endif |
3614 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = cur_rx->bge_len - ETHER_CRC_LEN4; |
3615 | |
3616 | bge_rxcsum(sc, cur_rx, m); |
3617 | |
3618 | #if NVLAN1 > 0 |
3619 | if (ifp->if_capabilitiesif_data.ifi_capabilities & IFCAP_VLAN_HWTAGGING0x00000020 && |
3620 | cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG0x0040) { |
3621 | m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag = cur_rx->bge_vlan_tag; |
3622 | m->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020; |
3623 | } |
3624 | #endif |
3625 | |
3626 | ml_enqueue(&ml, m); |
3627 | } |
3628 | |
3629 | sc->bge_rx_saved_considx = rx_cons; |
3630 | bge_writembx(sc, BGE_MBX_RX_CONS0_LO0x0284, sc->bge_rx_saved_considx); |
3631 | |
3632 | livelocked = ifiq_input(&ifp->if_rcv, &ml); |
3633 | if (stdcnt) { |
3634 | if_rxr_put(&sc->bge_std_ring, stdcnt)do { (&sc->bge_std_ring)->rxr_alive -= (stdcnt); } while (0); |
3635 | if (livelocked) |
3636 | if_rxr_livelocked(&sc->bge_std_ring); |
3637 | bge_fill_rx_ring_std(sc); |
3638 | } |
3639 | if (jumbocnt) { |
3640 | if_rxr_put(&sc->bge_jumbo_ring, jumbocnt)do { (&sc->bge_jumbo_ring)->rxr_alive -= (jumbocnt) ; } while (0); |
3641 | if (livelocked) |
3642 | if_rxr_livelocked(&sc->bge_jumbo_ring); |
3643 | bge_fill_rx_ring_jumbo(sc); |
3644 | } |
3645 | } |
3646 | |
3647 | void |
3648 | bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m) |
3649 | { |
3650 | if (sc->bge_chipid == BGE_CHIPID_BCM5700_B00x7100) { |
3651 | /* |
3652 | * 5700 B0 chips do not support checksumming correctly due |
3653 | * to hardware bugs. |
3654 | */ |
3655 | return; |
3656 | } else if (BGE_IS_5717_PLUS(sc)((sc)->bge_flags & 0x00020000)) { |
3657 | if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV60x8000) == 0) { |
3658 | if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM0x1000 && |
3659 | (cur_rx->bge_error_flag & |
3660 | BGE_RXERRFLAG_IP_CSUM_NOK0x1000) == 0) |
3661 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK0x0008; |
3662 | |
3663 | if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM0x2000) { |
3664 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= |
3665 | M_TCP_CSUM_IN_OK0x0020|M_UDP_CSUM_IN_OK0x0080; |
3666 | } |
3667 | } |
3668 | } else { |
3669 | if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM0x1000 && |
3670 | cur_rx->bge_ip_csum == 0xFFFF) |
3671 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK0x0008; |
3672 | |
3673 | if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM0x2000 && |
3674 | m->m_pkthdrM_dat.MH.MH_pkthdr.len >= ETHER_MIN_NOPAD(64 - 4) && |
3675 | cur_rx->bge_tcp_udp_csum == 0xFFFF) { |
3676 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= |
3677 | M_TCP_CSUM_IN_OK0x0020|M_UDP_CSUM_IN_OK0x0080; |
3678 | } |
3679 | } |
3680 | } |
3681 | |
3682 | void |
3683 | bge_txeof(struct bge_softc *sc) |
3684 | { |
3685 | struct bge_tx_bd *cur_tx = NULL((void *)0); |
3686 | struct ifnet *ifp; |
3687 | bus_dmamap_t dmamap; |
3688 | bus_addr_t offset, toff; |
3689 | bus_size_t tlen; |
3690 | int tosync, freed, txcnt; |
3691 | u_int32_t cons, newcons; |
3692 | struct mbuf *m; |
3693 | |
3694 | /* Nothing to do */ |
3695 | cons = sc->bge_tx_saved_considx; |
3696 | newcons = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx; |
3697 | if (cons == newcons) |
3698 | return; |
3699 | |
3700 | ifp = &sc->arpcom.ac_if; |
3701 | |
3702 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 )) |
3703 | offsetof(struct bge_ring_data, bge_status_block),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 )) |
3704 | sizeof (struct bge_status_block),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 )) |
3705 | BUS_DMASYNC_POSTREAD)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 )); |
3706 | |
3707 | offset = offsetof(struct bge_ring_data, bge_tx_ring)__builtin_offsetof(struct bge_ring_data, bge_tx_ring); |
3708 | tosync = newcons - cons; |
3709 | |
3710 | toff = offset + (cons * sizeof (struct bge_tx_bd)); |
3711 | |
3712 | if (tosync < 0) { |
3713 | tlen = (BGE_TX_RING_CNT512 - cons) * sizeof (struct bge_tx_bd); |
3714 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (toff), (tlen), (0x02|0x08)) |
3715 | toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (toff), (tlen), (0x02|0x08)); |
3716 | tosync = -tosync; |
3717 | } |
3718 | |
3719 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (offset), (tosync * sizeof (struct bge_tx_bd )), (0x02|0x08)) |
3720 | offset, tosync * sizeof (struct bge_tx_bd),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (offset), (tosync * sizeof (struct bge_tx_bd )), (0x02|0x08)) |
3721 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (offset), (tosync * sizeof (struct bge_tx_bd )), (0x02|0x08)); |
3722 | |
3723 | /* |
3724 | * Go through our tx ring and free mbufs for those |
3725 | * frames that have been sent. |
3726 | */ |
3727 | freed = 0; |
3728 | while (cons != newcons) { |
3729 | cur_tx = &sc->bge_rdata->bge_tx_ring[cons]; |
Value stored to 'cur_tx' is never read | |
3730 | m = sc->bge_cdata.bge_tx_chain[cons]; |
3731 | if (m != NULL((void *)0)) { |
3732 | dmamap = sc->bge_cdata.bge_tx_map[cons]; |
3733 | |
3734 | sc->bge_cdata.bge_tx_chain[cons] = NULL((void *)0); |
3735 | sc->bge_cdata.bge_tx_map[cons] = NULL((void *)0); |
3736 | bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmamap), (0), (dmamap->dm_mapsize), (0x08)) |
3737 | dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmamap), (0), (dmamap->dm_mapsize), (0x08)); |
3738 | bus_dmamap_unload(sc->bge_dmatag, dmamap)(*(sc->bge_dmatag)->_dmamap_unload)((sc->bge_dmatag) , (dmamap)); |
3739 | |
3740 | m_freem(m); |
3741 | } |
3742 | freed++; |
3743 | BGE_INC(cons, BGE_TX_RING_CNT)(cons) = (cons + 1) % 512; |
3744 | } |
3745 | |
3746 | txcnt = atomic_sub_int_nv(&sc->bge_txcnt, freed)_atomic_sub_int_nv(&sc->bge_txcnt, freed); |
3747 | |
3748 | sc->bge_tx_saved_considx = cons; |
3749 | |
3750 | if (ifq_is_oactive(&ifp->if_snd)) |
3751 | ifq_restart(&ifp->if_snd); |
3752 | else if (txcnt == 0) |
3753 | ifp->if_timer = 0; |
3754 | } |
3755 | |
3756 | int |
3757 | bge_intr(void *xsc) |
3758 | { |
3759 | struct bge_softc *sc; |
3760 | struct ifnet *ifp; |
3761 | u_int32_t statusword, statustag; |
3762 | |
3763 | sc = xsc; |
3764 | ifp = &sc->arpcom.ac_if; |
3765 | |
3766 | /* read status word from status block */ |
3767 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 | 0x08)) |
3768 | offsetof(struct bge_ring_data, bge_status_block),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 | 0x08)) |
3769 | sizeof (struct bge_status_block),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 | 0x08)) |
3770 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x02 | 0x08)); |
3771 | |
3772 | statusword = sc->bge_rdata->bge_status_block.bge_status; |
3773 | statustag = sc->bge_rdata->bge_status_block.bge_status_tag << 24; |
3774 | |
3775 | if (sc->bge_flags & BGE_TAGGED_STATUS0x00200000) { |
3776 | if (sc->bge_lasttag == statustag && |
3777 | (CSR_READ_4(sc, BGE_PCI_PCISTATE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x70))) & |
3778 | BGE_PCISTATE_INTR_NOT_ACTIVE0x00000002)) |
3779 | return (0); |
3780 | sc->bge_lasttag = statustag; |
3781 | } else { |
3782 | if (!(statusword & BGE_STATFLAG_UPDATED0x00000001) && |
3783 | (CSR_READ_4(sc, BGE_PCI_PCISTATE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x70))) & |
3784 | BGE_PCISTATE_INTR_NOT_ACTIVE0x00000002)) |
3785 | return (0); |
3786 | /* Ack interrupt and stop others from occurring. */ |
3787 | bge_writembx(sc, BGE_MBX_IRQ0_LO0x0204, 1); |
3788 | statustag = 0; |
3789 | } |
3790 | |
3791 | /* clear status word */ |
3792 | sc->bge_rdata->bge_status_block.bge_status = 0; |
3793 | |
3794 | bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x01 | 0x04)) |
3795 | offsetof(struct bge_ring_data, bge_status_block),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x01 | 0x04)) |
3796 | sizeof (struct bge_status_block),(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x01 | 0x04)) |
3797 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( sc->bge_ring_map), (__builtin_offsetof(struct bge_ring_data , bge_status_block)), (sizeof (struct bge_status_block)), (0x01 | 0x04)); |
3798 | |
3799 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57000x07 || |
3800 | statusword & BGE_STATFLAG_LINKSTATE_CHANGED0x00000002 || |
3801 | BGE_STS_BIT(sc, BGE_STS_LINK_EVT)((sc)->bge_sts & (0x00000002))) { |
3802 | KERNEL_LOCK()_kernel_lock(); |
3803 | bge_link_upd(sc); |
3804 | KERNEL_UNLOCK()_kernel_unlock(); |
3805 | } |
3806 | |
3807 | /* Re-enable interrupts. */ |
3808 | bge_writembx(sc, BGE_MBX_IRQ0_LO0x0204, statustag); |
3809 | |
3810 | if (ifp->if_flags & IFF_RUNNING0x40) { |
3811 | /* Check RX return ring producer/consumer */ |
3812 | bge_rxeof(sc); |
3813 | |
3814 | /* Check TX ring producer/consumer */ |
3815 | bge_txeof(sc); |
3816 | } |
3817 | |
3818 | return (1); |
3819 | } |
3820 | |
3821 | void |
3822 | bge_tick(void *xsc) |
3823 | { |
3824 | struct bge_softc *sc = xsc; |
3825 | struct mii_data *mii = &sc->bge_mii; |
3826 | int s; |
3827 | |
3828 | s = splnet()splraise(0x4); |
3829 | |
3830 | if (BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000)) { |
3831 | mtx_enter(&sc->bge_kstat_mtx); |
3832 | bge_stats_update_regs(sc); |
3833 | mtx_leave(&sc->bge_kstat_mtx); |
3834 | } else |
3835 | bge_stats_update(sc); |
3836 | |
3837 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200) { |
3838 | /* |
3839 | * Since in TBI mode auto-polling can't be used we should poll |
3840 | * link status manually. Here we register pending link event |
3841 | * and trigger interrupt. |
3842 | */ |
3843 | BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT)((sc)->bge_sts |= (0x00000002)); |
3844 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6808) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6808 ))) | (0x00000004))))); |
3845 | } else { |
3846 | /* |
3847 | * Do not touch PHY if we have link up. This could break |
3848 | * IPMI/ASF mode or produce extra input errors. |
3849 | * (extra input errors was reported for bcm5701 & bcm5704). |
3850 | */ |
3851 | if (!BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001))) |
3852 | mii_tick(mii); |
3853 | } |
3854 | |
3855 | timeout_add_sec(&sc->bge_timeout, 1); |
3856 | |
3857 | splx(s)spllower(s); |
3858 | } |
3859 | |
3860 | void |
3861 | bge_stats_update_regs(struct bge_softc *sc) |
3862 | { |
3863 | struct ifnet *ifp = &sc->arpcom.ac_if; |
3864 | uint32_t collisions, discards, inerrors; |
3865 | uint32_t ucast, mcast, bcast; |
3866 | u_int32_t val; |
3867 | #if NKSTAT1 > 0 |
3868 | struct kstat_kv *kvs = sc->bge_kstat->ks_data; |
3869 | #endif |
3870 | |
3871 | collisions = CSR_READ_4(sc, BGE_MAC_STATS +((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, etherStatsCollisions )))) |
3872 | offsetof(struct bge_mac_stats_regs, etherStatsCollisions))((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, etherStatsCollisions )))); |
3873 | |
3874 | /* |
3875 | * XXX |
3876 | * Unlike other controllers, the BGE_RXLP_LOCSTAT_IFIN_DROPS counter |
3877 | * of the BCM5717, BCM5718, BCM5762, BCM5719 A0 and BCM5720 A0 |
3878 | * controllers includes the number of unwanted multicast frames. |
3879 | * This comes from a silicon bug and known workaround to get rough |
3880 | * (not exact) counter is to enable interrupt on MBUF low watermark |
3881 | * attention. This can be accomplished by setting BGE_HCCMODE_ATTN |
3882 | * bit of BGE_HDD_MODE, BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE |
3883 | * and BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL. However |
3884 | * that change would generate more interrupts and there are still |
3885 | * possibilities of losing multiple frames during |
3886 | * BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling. Given that |
3887 | * the workaround still would not get correct counter I don't think |
3888 | * it's worth to implement it. So ignore reading the counter on |
3889 | * controllers that have the silicon bug. |
3890 | */ |
3891 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57170x5717 && |
3892 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) != BGE_ASICREV_BCM57620x5762 && |
3893 | sc->bge_chipid != BGE_CHIPID_BCM5719_A00x05719000 && |
3894 | sc->bge_chipid != BGE_CHIPID_BCM5720_A00x05720000) |
3895 | discards = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x2250)) ); |
3896 | else |
3897 | discards = 0; |
3898 | |
3899 | inerrors = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x2254)) ); |
3900 | |
3901 | ifp->if_collisionsif_data.ifi_collisions += collisions; |
3902 | ifp->if_ierrorsif_data.ifi_ierrors += discards + inerrors; |
3903 | |
3904 | ucast = CSR_READ_4(sc, BGE_MAC_STATS +((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts )))) |
3905 | offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts))((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts )))); |
3906 | mcast = CSR_READ_4(sc, BGE_MAC_STATS +((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts )))) |
3907 | offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts))((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts )))); |
3908 | bcast = CSR_READ_4(sc, BGE_MAC_STATS +((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts )))) |
3909 | offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts))((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts )))); |
3910 | if (sc->bge_flags & BGE_RDMA_BUG0x00800000) { |
3911 | /* |
3912 | * If controller transmitted more than BGE_NUM_RDMA_CHANNELS |
3913 | * frames, it's safe to disable workaround for DMA engine's |
3914 | * miscalculation of TXMBUF space. |
3915 | */ |
3916 | if (ucast + mcast + bcast > BGE_NUM_RDMA_CHANNELS4) { |
3917 | val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x4910)) ); |
3918 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57190x5719) |
3919 | val &= ~BGE_RDMA_TX_LENGTH_WA_57190x02000000; |
3920 | else |
3921 | val &= ~BGE_RDMA_TX_LENGTH_WA_57200x00200000; |
3922 | CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x4910) , (val))); |
3923 | sc->bge_flags &= ~BGE_RDMA_BUG0x00800000; |
3924 | } |
3925 | } |
3926 | |
3927 | #if NKSTAT1 > 0 |
3928 | kstat_kv_u32(&kvs[bge_stat_out_ucast_pkt])(&kvs[bge_stat_out_ucast_pkt])->kv_v.v_u32 += ucast; |
3929 | kstat_kv_u32(&kvs[bge_stat_out_mcast_pkt])(&kvs[bge_stat_out_mcast_pkt])->kv_v.v_u32 += mcast; |
3930 | kstat_kv_u32(&kvs[bge_stat_out_bcast_pkt])(&kvs[bge_stat_out_bcast_pkt])->kv_v.v_u32 += bcast; |
3931 | kstat_kv_u32(&kvs[bge_stat_collisions])(&kvs[bge_stat_collisions])->kv_v.v_u32 += collisions; |
3932 | kstat_kv_u32(&kvs[bge_stat_if_in_drops])(&kvs[bge_stat_if_in_drops])->kv_v.v_u32 += discards; |
3933 | kstat_kv_u32(&kvs[bge_stat_if_in_errors])(&kvs[bge_stat_if_in_errors])->kv_v.v_u32 += inerrors; |
3934 | #endif |
3935 | } |
3936 | |
3937 | void |
3938 | bge_stats_update(struct bge_softc *sc) |
3939 | { |
3940 | struct ifnet *ifp = &sc->arpcom.ac_if; |
3941 | bus_size_t stats = BGE_MEMWIN_START0x00008000 + BGE_STATS_BLOCK0x00000300; |
3942 | u_int32_t cnt; |
3943 | |
3944 | #define READ_STAT(sc, stats, stat) \ |
3945 | CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))((sc->bge_btag)->read_4((sc->bge_bhandle), (stats + __builtin_offsetof (struct bge_stats, stat)))) |
3946 | |
3947 | cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo); |
3948 | ifp->if_collisionsif_data.ifi_collisions += (u_int32_t)(cnt - sc->bge_tx_collisions); |
3949 | sc->bge_tx_collisions = cnt; |
3950 | |
3951 | cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo); |
3952 | sc->bge_rx_overruns = cnt; |
3953 | cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo); |
3954 | ifp->if_ierrorsif_data.ifi_ierrors += (uint32_t)(cnt - sc->bge_rx_inerrors); |
3955 | sc->bge_rx_inerrors = cnt; |
3956 | cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); |
3957 | ifp->if_ierrorsif_data.ifi_ierrors += (u_int32_t)(cnt - sc->bge_rx_discards); |
3958 | sc->bge_rx_discards = cnt; |
3959 | |
3960 | cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); |
3961 | ifp->if_oerrorsif_data.ifi_oerrors += (u_int32_t)(cnt - sc->bge_tx_discards); |
3962 | sc->bge_tx_discards = cnt; |
3963 | |
3964 | #undef READ_STAT |
3965 | } |
3966 | |
3967 | /* |
3968 | * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. |
3969 | */ |
3970 | int |
3971 | bge_compact_dma_runt(struct mbuf *pkt) |
3972 | { |
3973 | struct mbuf *m, *prev, *n = NULL((void *)0); |
3974 | int totlen, newprevlen; |
3975 | |
3976 | prev = NULL((void *)0); |
3977 | totlen = 0; |
3978 | |
3979 | for (m = pkt; m != NULL((void *)0); prev = m,m = m->m_nextm_hdr.mh_next) { |
3980 | int mlen = m->m_lenm_hdr.mh_len; |
3981 | int shortfall = 8 - mlen ; |
3982 | |
3983 | totlen += mlen; |
3984 | if (mlen == 0) |
3985 | continue; |
3986 | if (mlen >= 8) |
3987 | continue; |
3988 | |
3989 | /* If we get here, mbuf data is too small for DMA engine. |
3990 | * Try to fix by shuffling data to prev or next in chain. |
3991 | * If that fails, do a compacting deep-copy of the whole chain. |
3992 | */ |
3993 | |
3994 | /* Internal frag. If fits in prev, copy it there. */ |
3995 | if (prev && m_trailingspace(prev) >= m->m_lenm_hdr.mh_len) { |
3996 | bcopy(m->m_datam_hdr.mh_data, prev->m_datam_hdr.mh_data+prev->m_lenm_hdr.mh_len, mlen); |
3997 | prev->m_lenm_hdr.mh_len += mlen; |
3998 | m->m_lenm_hdr.mh_len = 0; |
3999 | /* XXX stitch chain */ |
4000 | prev->m_nextm_hdr.mh_next = m_free(m); |
4001 | m = prev; |
4002 | continue; |
4003 | } else if (m->m_nextm_hdr.mh_next != NULL((void *)0) && |
4004 | m_trailingspace(m) >= shortfall && |
4005 | m->m_nextm_hdr.mh_next->m_lenm_hdr.mh_len >= (8 + shortfall)) { |
4006 | /* m is writable and have enough data in next, pull up. */ |
4007 | |
4008 | bcopy(m->m_nextm_hdr.mh_next->m_datam_hdr.mh_data, m->m_datam_hdr.mh_data+m->m_lenm_hdr.mh_len, shortfall); |
4009 | m->m_lenm_hdr.mh_len += shortfall; |
4010 | m->m_nextm_hdr.mh_next->m_lenm_hdr.mh_len -= shortfall; |
4011 | m->m_nextm_hdr.mh_next->m_datam_hdr.mh_data += shortfall; |
4012 | } else if (m->m_nextm_hdr.mh_next == NULL((void *)0) || 1) { |
4013 | /* Got a runt at the very end of the packet. |
4014 | * borrow data from the tail of the preceding mbuf and |
4015 | * update its length in-place. (The original data is still |
4016 | * valid, so we can do this even if prev is not writable.) |
4017 | */ |
4018 | |
4019 | /* if we'd make prev a runt, just move all of its data. */ |
4020 | #ifdef DEBUG |
4021 | KASSERT(prev != NULL /*, ("runt but null PREV")*/)((prev != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_bge.c" , 4021, "prev != NULL")); |
4022 | KASSERT(prev->m_len >= 8 /*, ("runt prev")*/)((prev->m_hdr.mh_len >= 8) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/pci/if_bge.c", 4022, "prev->m_len >= 8" )); |
4023 | #endif |
4024 | if ((prev->m_lenm_hdr.mh_len - shortfall) < 8) |
4025 | shortfall = prev->m_lenm_hdr.mh_len; |
4026 | |
4027 | newprevlen = prev->m_lenm_hdr.mh_len - shortfall; |
4028 | |
4029 | MGET(n, M_NOWAIT, MT_DATA)n = m_get((0x0002), (1)); |
4030 | if (n == NULL((void *)0)) |
4031 | return (ENOBUFS55); |
4032 | KASSERT(m->m_len + shortfall < MLEN((m->m_hdr.mh_len + shortfall < (256 - sizeof(struct m_hdr ))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_bge.c" , 4034, "m->m_len + shortfall < MLEN")) |
4033 | /*,((m->m_hdr.mh_len + shortfall < (256 - sizeof(struct m_hdr ))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_bge.c" , 4034, "m->m_len + shortfall < MLEN")) |
4034 | ("runt %d +prev %d too big\n", m->m_len, shortfall)*/)((m->m_hdr.mh_len + shortfall < (256 - sizeof(struct m_hdr ))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_bge.c" , 4034, "m->m_len + shortfall < MLEN")); |
4035 | |
4036 | /* first copy the data we're stealing from prev */ |
4037 | bcopy(prev->m_datam_hdr.mh_data + newprevlen, n->m_datam_hdr.mh_data, shortfall); |
4038 | |
4039 | /* update prev->m_len accordingly */ |
4040 | prev->m_lenm_hdr.mh_len -= shortfall; |
4041 | |
4042 | /* copy data from runt m */ |
4043 | bcopy(m->m_datam_hdr.mh_data, n->m_datam_hdr.mh_data + shortfall, m->m_lenm_hdr.mh_len); |
4044 | |
4045 | /* n holds what we stole from prev, plus m */ |
4046 | n->m_lenm_hdr.mh_len = shortfall + m->m_lenm_hdr.mh_len; |
4047 | |
4048 | /* stitch n into chain and free m */ |
4049 | n->m_nextm_hdr.mh_next = m->m_nextm_hdr.mh_next; |
4050 | prev->m_nextm_hdr.mh_next = n; |
4051 | /* KASSERT(m->m_next == NULL); */ |
4052 | m->m_nextm_hdr.mh_next = NULL((void *)0); |
4053 | m_free(m); |
4054 | m = n; /* for continuing loop */ |
4055 | } |
4056 | } |
4057 | return (0); |
4058 | } |
4059 | |
4060 | /* |
4061 | * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. |
4062 | * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, |
4063 | * but when such padded frames employ the bge IP/TCP checksum offload, |
4064 | * the hardware checksum assist gives incorrect results (possibly |
4065 | * from incorporating its own padding into the UDP/TCP checksum; who knows). |
4066 | * If we pad such runts with zeros, the onboard checksum comes out correct. |
4067 | */ |
4068 | int |
4069 | bge_cksum_pad(struct mbuf *m) |
4070 | { |
4071 | int padlen = ETHER_MIN_NOPAD(64 - 4) - m->m_pkthdrM_dat.MH.MH_pkthdr.len; |
4072 | struct mbuf *last; |
4073 | |
4074 | /* If there's only the packet-header and we can pad there, use it. */ |
4075 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.len == m->m_lenm_hdr.mh_len && m_trailingspace(m) >= padlen) { |
4076 | last = m; |
4077 | } else { |
4078 | /* |
4079 | * Walk packet chain to find last mbuf. We will either |
4080 | * pad there, or append a new mbuf and pad it. |
4081 | */ |
4082 | for (last = m; last->m_nextm_hdr.mh_next != NULL((void *)0); last = last->m_nextm_hdr.mh_next); |
4083 | if (m_trailingspace(last) < padlen) { |
4084 | /* Allocate new empty mbuf, pad it. Compact later. */ |
4085 | struct mbuf *n; |
4086 | |
4087 | MGET(n, M_DONTWAIT, MT_DATA)n = m_get((0x0002), (1)); |
4088 | if (n == NULL((void *)0)) |
4089 | return (ENOBUFS55); |
4090 | n->m_lenm_hdr.mh_len = 0; |
4091 | last->m_nextm_hdr.mh_next = n; |
4092 | last = n; |
4093 | } |
4094 | } |
4095 | |
4096 | /* Now zero the pad area, to avoid the bge cksum-assist bug. */ |
4097 | memset(mtod(last, caddr_t) + last->m_len, 0, padlen)__builtin_memset((((caddr_t)((last)->m_hdr.mh_data)) + last ->m_hdr.mh_len), (0), (padlen)); |
4098 | last->m_lenm_hdr.mh_len += padlen; |
4099 | m->m_pkthdrM_dat.MH.MH_pkthdr.len += padlen; |
4100 | |
4101 | return (0); |
4102 | } |
4103 | |
4104 | /* |
4105 | * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data |
4106 | * pointers to descriptors. |
4107 | */ |
4108 | int |
4109 | bge_encap(struct bge_softc *sc, struct mbuf *m, int *txinc) |
4110 | { |
4111 | struct bge_tx_bd *f = NULL((void *)0); |
4112 | u_int32_t frag, cur; |
4113 | u_int16_t csum_flags = 0; |
4114 | bus_dmamap_t dmamap; |
4115 | int i = 0; |
4116 | |
4117 | cur = frag = (sc->bge_tx_prodidx + *txinc) % BGE_TX_RING_CNT512; |
4118 | |
4119 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags) { |
4120 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_IPV4_CSUM_OUT0x0001) |
4121 | csum_flags |= BGE_TXBDFLAG_IP_CSUM0x0002; |
4122 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & |
4123 | (M_TCP_CSUM_OUT0x0002 | M_UDP_CSUM_OUT0x0004)) { |
4124 | csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM0x0001; |
4125 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < ETHER_MIN_NOPAD(64 - 4) && |
4126 | bge_cksum_pad(m) != 0) |
4127 | return (ENOBUFS55); |
4128 | } |
4129 | } |
4130 | |
4131 | if (sc->bge_flags & BGE_JUMBO_FRAME0x04000000 && |
4132 | m->m_pkthdrM_dat.MH.MH_pkthdr.len > ETHER_MAX_LEN1518) |
4133 | csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME0x0008; |
4134 | |
4135 | if (!(BGE_CHIPREV(sc->bge_chipid)((sc->bge_chipid) >> 8) == BGE_CHIPREV_5700_BX0x71)) |
4136 | goto doit; |
4137 | |
4138 | /* |
4139 | * bcm5700 Revision B silicon cannot handle DMA descriptors with |
4140 | * less than eight bytes. If we encounter a teeny mbuf |
4141 | * at the end of a chain, we can pad. Otherwise, copy. |
4142 | */ |
4143 | if (bge_compact_dma_runt(m) != 0) |
4144 | return (ENOBUFS55); |
4145 | |
4146 | doit: |
4147 | dmamap = sc->bge_txdma[cur]; |
4148 | |
4149 | /* |
4150 | * Start packing the mbufs in this chain into |
4151 | * the fragment pointers. Stop when we run out |
4152 | * of fragments or hit the end of the mbuf chain. |
4153 | */ |
4154 | switch (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m,(*(sc->bge_dmatag)->_dmamap_load_mbuf)((sc->bge_dmatag ), (dmamap), (m), (0x0001)) |
4155 | BUS_DMA_NOWAIT)(*(sc->bge_dmatag)->_dmamap_load_mbuf)((sc->bge_dmatag ), (dmamap), (m), (0x0001))) { |
4156 | case 0: |
4157 | break; |
4158 | case EFBIG27: |
4159 | if (m_defrag(m, M_DONTWAIT0x0002) == 0 && |
4160 | bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m,(*(sc->bge_dmatag)->_dmamap_load_mbuf)((sc->bge_dmatag ), (dmamap), (m), (0x0001)) |
4161 | BUS_DMA_NOWAIT)(*(sc->bge_dmatag)->_dmamap_load_mbuf)((sc->bge_dmatag ), (dmamap), (m), (0x0001)) == 0) |
4162 | break; |
4163 | |
4164 | /* FALLTHROUGH */ |
4165 | default: |
4166 | return (ENOBUFS55); |
4167 | } |
4168 | |
4169 | for (i = 0; i < dmamap->dm_nsegs; i++) { |
4170 | f = &sc->bge_rdata->bge_tx_ring[frag]; |
4171 | if (sc->bge_cdata.bge_tx_chain[frag] != NULL((void *)0)) |
4172 | break; |
4173 | BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr)do { (f->bge_addr).bge_addr_lo = ((u_int64_t) (dmamap-> dm_segs[i].ds_addr) & 0xffffffff); if (sizeof(bus_addr_t) == 8) (f->bge_addr).bge_addr_hi = ((u_int64_t) (dmamap-> dm_segs[i].ds_addr) >> 32); else (f->bge_addr).bge_addr_hi = 0; } while(0); |
4174 | f->bge_len = dmamap->dm_segs[i].ds_len; |
4175 | f->bge_flags = csum_flags; |
4176 | f->bge_vlan_tag = 0; |
4177 | #if NVLAN1 > 0 |
4178 | if (m->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020) { |
4179 | f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG0x0040; |
4180 | f->bge_vlan_tag = m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag; |
4181 | } |
4182 | #endif |
4183 | cur = frag; |
4184 | BGE_INC(frag, BGE_TX_RING_CNT)(frag) = (frag + 1) % 512; |
4185 | } |
4186 | |
4187 | if (i < dmamap->dm_nsegs) |
4188 | goto fail_unload; |
4189 | |
4190 | if (frag == sc->bge_tx_saved_considx) |
4191 | goto fail_unload; |
4192 | |
4193 | bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmamap), (0), (dmamap->dm_mapsize), (0x04)) |
4194 | BUS_DMASYNC_PREWRITE)(*(sc->bge_dmatag)->_dmamap_sync)((sc->bge_dmatag), ( dmamap), (0), (dmamap->dm_mapsize), (0x04)); |
4195 | |
4196 | sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END0x0004; |
4197 | sc->bge_cdata.bge_tx_chain[cur] = m; |
4198 | sc->bge_cdata.bge_tx_map[cur] = dmamap; |
4199 | |
4200 | *txinc += dmamap->dm_nsegs; |
4201 | |
4202 | return (0); |
4203 | |
4204 | fail_unload: |
4205 | bus_dmamap_unload(sc->bge_dmatag, dmamap)(*(sc->bge_dmatag)->_dmamap_unload)((sc->bge_dmatag) , (dmamap)); |
4206 | |
4207 | return (ENOBUFS55); |
4208 | } |
4209 | |
4210 | /* |
4211 | * Main transmit routine. To avoid having to do mbuf copies, we put pointers |
4212 | * to the mbuf data regions directly in the transmit descriptors. |
4213 | */ |
4214 | void |
4215 | bge_start(struct ifqueue *ifq) |
4216 | { |
4217 | struct ifnet *ifp = ifq->ifq_if; |
4218 | struct bge_softc *sc = ifp->if_softc; |
4219 | struct mbuf *m; |
4220 | int txinc; |
4221 | |
4222 | if (!BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001))) { |
4223 | ifq_purge(ifq); |
4224 | return; |
4225 | } |
4226 | |
4227 | txinc = 0; |
4228 | while (1) { |
4229 | /* Check if we have enough free send BDs. */ |
4230 | if (sc->bge_txcnt + txinc + BGE_NTXSEG30 + 16 >= |
4231 | BGE_TX_RING_CNT512) { |
4232 | ifq_set_oactive(ifq); |
4233 | break; |
4234 | } |
4235 | |
4236 | m = ifq_dequeue(ifq); |
4237 | if (m == NULL((void *)0)) |
4238 | break; |
4239 | |
4240 | if (bge_encap(sc, m, &txinc) != 0) { |
4241 | m_freem(m); |
4242 | continue; |
4243 | } |
4244 | |
4245 | #if NBPFILTER1 > 0 |
4246 | if (ifp->if_bpf) |
4247 | bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1)); |
4248 | #endif |
4249 | } |
4250 | |
4251 | if (txinc != 0) { |
4252 | /* Transmit */ |
4253 | sc->bge_tx_prodidx = (sc->bge_tx_prodidx + txinc) % |
4254 | BGE_TX_RING_CNT512; |
4255 | bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO0x0304, sc->bge_tx_prodidx); |
4256 | if (BGE_CHIPREV(sc->bge_chipid)((sc->bge_chipid) >> 8) == BGE_CHIPREV_5700_BX0x71) |
4257 | bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO0x0304, |
4258 | sc->bge_tx_prodidx); |
4259 | |
4260 | atomic_add_int(&sc->bge_txcnt, txinc)_atomic_add_int(&sc->bge_txcnt, txinc); |
4261 | |
4262 | /* |
4263 | * Set a timeout in case the chip goes out to lunch. |
4264 | */ |
4265 | ifp->if_timer = 5; |
4266 | } |
4267 | } |
4268 | |
4269 | void |
4270 | bge_init(void *xsc) |
4271 | { |
4272 | struct bge_softc *sc = xsc; |
4273 | struct ifnet *ifp; |
4274 | u_int16_t *m; |
4275 | u_int32_t mode; |
4276 | int s; |
4277 | |
4278 | s = splnet()splraise(0x4); |
4279 | |
4280 | ifp = &sc->arpcom.ac_if; |
4281 | |
4282 | /* Cancel pending I/O and flush buffers. */ |
4283 | bge_stop(sc, 0); |
4284 | bge_sig_pre_reset(sc, BGE_RESET_START1); |
4285 | bge_reset(sc); |
4286 | bge_sig_legacy(sc, BGE_RESET_START1); |
4287 | bge_sig_post_reset(sc, BGE_RESET_START1); |
4288 | |
4289 | bge_chipinit(sc); |
4290 | |
4291 | /* |
4292 | * Init the various state machines, ring |
4293 | * control blocks and firmware. |
4294 | */ |
4295 | if (bge_blockinit(sc)) { |
4296 | printf("%s: initialization failure\n", sc->bge_dev.dv_xname); |
4297 | splx(s)spllower(s); |
4298 | return; |
4299 | } |
4300 | |
4301 | /* Specify MRU. */ |
4302 | if (BGE_IS_JUMBO_CAPABLE(sc)((sc)->bge_flags & 0x00000100)) |
4303 | CSR_WRITE_4(sc, BGE_RX_MTU,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x043C) , (9022 + 4))) |
4304 | BGE_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x043C) , (9022 + 4))); |
4305 | else |
4306 | CSR_WRITE_4(sc, BGE_RX_MTU,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x043C) , (1518 + 4))) |
4307 | ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x043C) , (1518 + 4))); |
4308 | |
4309 | /* Load our MAC address. */ |
4310 | m = (u_int16_t *)&sc->arpcom.ac_enaddr[0]; |
4311 | CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]))((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0410) , ((__uint16_t)(__builtin_constant_p(m[0]) ? (__uint16_t)(((__uint16_t )(m[0]) & 0xffU) << 8 | ((__uint16_t)(m[0]) & 0xff00U ) >> 8) : __swap16md(m[0]))))); |
4312 | CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]))((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0414) , (((__uint16_t)(__builtin_constant_p(m[1]) ? (__uint16_t)((( __uint16_t)(m[1]) & 0xffU) << 8 | ((__uint16_t)(m[1 ]) & 0xff00U) >> 8) : __swap16md(m[1])) << 16 ) | (__uint16_t)(__builtin_constant_p(m[2]) ? (__uint16_t)((( __uint16_t)(m[2]) & 0xffU) << 8 | ((__uint16_t)(m[2 ]) & 0xff00U) >> 8) : __swap16md(m[2]))))); |
4313 | |
4314 | if (!(ifp->if_capabilitiesif_data.ifi_capabilities & IFCAP_VLAN_HWTAGGING0x00000020)) { |
4315 | /* Disable hardware decapsulation of VLAN frames. */ |
4316 | BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0468) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0468 ))) | (0x00000400))))); |
4317 | } |
4318 | |
4319 | /* Program promiscuous mode and multicast filters. */ |
4320 | bge_iff(sc); |
4321 | |
4322 | /* Init RX ring. */ |
4323 | bge_init_rx_ring_std(sc); |
4324 | |
4325 | /* |
4326 | * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's |
4327 | * memory to ensure that the chip has in fact read the first |
4328 | * entry of the ring. |
4329 | */ |
4330 | if (sc->bge_chipid == BGE_CHIPID_BCM5705_A00x3000) { |
4331 | u_int32_t v, i; |
4332 | for (i = 0; i < 10; i++) { |
4333 | DELAY(20)(*delay_func)(20); |
4334 | v = bge_readmem_ind(sc, BGE_STD_RX_RINGS0x00006000 + 8); |
4335 | if (v == (MCLBYTES(1 << 11) - ETHER_ALIGN2)) |
4336 | break; |
4337 | } |
4338 | if (i == 10) |
4339 | printf("%s: 5705 A0 chip failed to load RX ring\n", |
4340 | sc->bge_dev.dv_xname); |
4341 | } |
4342 | |
4343 | /* Init Jumbo RX ring. */ |
4344 | if (sc->bge_flags & BGE_JUMBO_RING0x01000000) |
4345 | bge_init_rx_ring_jumbo(sc); |
4346 | |
4347 | /* Init our RX return ring index */ |
4348 | sc->bge_rx_saved_considx = 0; |
4349 | |
4350 | /* Init our RX/TX stat counters. */ |
4351 | sc->bge_tx_collisions = 0; |
4352 | sc->bge_rx_discards = 0; |
4353 | sc->bge_rx_inerrors = 0; |
4354 | sc->bge_rx_overruns = 0; |
4355 | sc->bge_tx_discards = 0; |
4356 | |
4357 | /* Init TX ring. */ |
4358 | bge_init_tx_ring(sc); |
4359 | |
4360 | /* Enable TX MAC state machine lockup fix. */ |
4361 | mode = CSR_READ_4(sc, BGE_TX_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x045C)) ); |
4362 | if (BGE_IS_5755_PLUS(sc)((sc)->bge_flags & 0x00004000) || |
4363 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM59060x0c) |
4364 | mode |= BGE_TXMODE_MBUF_LOCKUP_FIX0x00000100; |
4365 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57200x5720 || |
4366 | BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762) { |
4367 | mode &= ~(BGE_TXMODE_JMB_FRM_LEN0x00400000 | BGE_TXMODE_CNT_DN_MODE0x00800000); |
4368 | mode |= CSR_READ_4(sc, BGE_TX_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x045C)) ) & |
4369 | (BGE_TXMODE_JMB_FRM_LEN0x00400000 | BGE_TXMODE_CNT_DN_MODE0x00800000); |
4370 | } |
4371 | |
4372 | /* Turn on transmitter */ |
4373 | CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x045C) , (mode | 0x00000002))); |
4374 | DELAY(100)(*delay_func)(100); |
4375 | |
4376 | mode = CSR_READ_4(sc, BGE_RX_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0468)) ); |
4377 | if (BGE_IS_5755_PLUS(sc)((sc)->bge_flags & 0x00004000)) |
4378 | mode |= BGE_RXMODE_IPV6_ENABLE0x01000000; |
4379 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57620x5762) |
4380 | mode |= BGE_RXMODE_IPV4_FRAG_FIX0x02000000; |
4381 | |
4382 | /* Turn on receiver */ |
4383 | CSR_WRITE_4(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0468) , (mode | 0x00000002))); |
4384 | DELAY(10)(*delay_func)(10); |
4385 | |
4386 | /* |
4387 | * Set the number of good frames to receive after RX MBUF |
4388 | * Low Watermark has been reached. After the RX MAC receives |
4389 | * this number of frames, it will drop subsequent incoming |
4390 | * frames until the MBUF High Watermark is reached. |
4391 | */ |
4392 | if (BGE_IS_57765_PLUS(sc)((sc)->bge_flags & 0x00040000)) |
4393 | CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0504) , (1))); |
4394 | else |
4395 | CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0504) , (2))); |
4396 | |
4397 | /* Tell firmware we're alive. */ |
4398 | BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6800) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6800 ))) | (0x00010000))))); |
4399 | |
4400 | /* Enable host interrupts. */ |
4401 | BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x68), ( (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x68))) | (0x00000001))))); |
4402 | BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x68), ( (((sc->bge_btag)->read_4((sc->bge_bhandle), (0x68))) & ~(0x00000002))))); |
4403 | bge_writembx(sc, BGE_MBX_IRQ0_LO0x0204, 0); |
4404 | |
4405 | bge_ifmedia_upd(ifp); |
4406 | |
4407 | ifp->if_flags |= IFF_RUNNING0x40; |
4408 | ifq_clr_oactive(&ifp->if_snd); |
4409 | |
4410 | splx(s)spllower(s); |
4411 | |
4412 | timeout_add_sec(&sc->bge_timeout, 1); |
4413 | } |
4414 | |
4415 | /* |
4416 | * Set media options. |
4417 | */ |
4418 | int |
4419 | bge_ifmedia_upd(struct ifnet *ifp) |
4420 | { |
4421 | struct bge_softc *sc = ifp->if_softc; |
4422 | struct mii_data *mii = &sc->bge_mii; |
4423 | struct ifmedia *ifm = &sc->bge_ifmedia; |
4424 | |
4425 | /* If this is a 1000baseX NIC, enable the TBI port. */ |
4426 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200) { |
4427 | if (IFM_TYPE(ifm->ifm_media)((ifm->ifm_media) & 0x000000000000ff00ULL) != IFM_ETHER0x0000000000000100ULL) |
4428 | return (EINVAL22); |
4429 | switch(IFM_SUBTYPE(ifm->ifm_media)((ifm->ifm_media) & 0x00000000000000ffULL)) { |
4430 | case IFM_AUTO0ULL: |
4431 | /* |
4432 | * The BCM5704 ASIC appears to have a special |
4433 | * mechanism for programming the autoneg |
4434 | * advertisement registers in TBI mode. |
4435 | */ |
4436 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57040x02) { |
4437 | u_int32_t sgdig; |
4438 | sgdig = CSR_READ_4(sc, BGE_SGDIG_STS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x05B4)) ); |
4439 | if (sgdig & BGE_SGDIGSTS_DONE0x00000002) { |
4440 | CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0444) , (0))); |
4441 | sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x05B0)) ); |
4442 | sgdig |= BGE_SGDIGCFG_AUTO0x80000000 | |
4443 | BGE_SGDIGCFG_PAUSE_CAP0x00000800 | |
4444 | BGE_SGDIGCFG_ASYM_PAUSE0x00001000; |
4445 | CSR_WRITE_4(sc, BGE_SGDIG_CFG,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x05B0) , (sgdig | 0x40000000))) |
4446 | sgdig | BGE_SGDIGCFG_SEND)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x05B0) , (sgdig | 0x40000000))); |
4447 | DELAY(5)(*delay_func)(5); |
4448 | CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x05B0) , (sgdig))); |
4449 | } |
4450 | } |
4451 | break; |
4452 | case IFM_1000_SX11: |
4453 | if ((ifm->ifm_media & IFM_GMASK0x00ffff0000000000ULL) == IFM_FDX0x0000010000000000ULL) { |
4454 | BGE_CLRBIT(sc, BGE_MAC_MODE,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0400) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400 ))) & ~(0x00000002))))) |
4455 | BGE_MACMODE_HALF_DUPLEX)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0400) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400 ))) & ~(0x00000002))))); |
4456 | } else { |
4457 | BGE_SETBIT(sc, BGE_MAC_MODE,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0400) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400 ))) | (0x00000002))))) |
4458 | BGE_MACMODE_HALF_DUPLEX)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0400) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400 ))) | (0x00000002))))); |
4459 | } |
4460 | DELAY(40)(*delay_func)(40); |
4461 | break; |
4462 | default: |
4463 | return (EINVAL22); |
4464 | } |
4465 | /* XXX 802.3x flow control for 1000BASE-SX */ |
4466 | return (0); |
4467 | } |
4468 | |
4469 | BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT)((sc)->bge_sts |= (0x00000002)); |
4470 | if (mii->mii_instance) { |
4471 | struct mii_softc *miisc; |
4472 | LIST_FOREACH(miisc, &mii->mii_phys, mii_list)for((miisc) = ((&mii->mii_phys)->lh_first); (miisc) != ((void *)0); (miisc) = ((miisc)->mii_list.le_next)) |
4473 | mii_phy_reset(miisc); |
4474 | } |
4475 | mii_mediachg(mii); |
4476 | |
4477 | /* |
4478 | * Force an interrupt so that we will call bge_link_upd |
4479 | * if needed and clear any pending link state attention. |
4480 | * Without this we are not getting any further interrupts |
4481 | * for link state changes and thus will not UP the link and |
4482 | * not be able to send in bge_start. The only way to get |
4483 | * things working was to receive a packet and get a RX intr. |
4484 | */ |
4485 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57000x07 || |
4486 | sc->bge_flags & BGE_IS_57880x00000800) |
4487 | BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6808) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6808 ))) | (0x00000004))))); |
4488 | else |
4489 | BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x3C00) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x3C00 ))) | (0x00000008))))); |
4490 | |
4491 | return (0); |
4492 | } |
4493 | |
4494 | /* |
4495 | * Report current media status. |
4496 | */ |
4497 | void |
4498 | bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) |
4499 | { |
4500 | struct bge_softc *sc = ifp->if_softc; |
4501 | struct mii_data *mii = &sc->bge_mii; |
4502 | |
4503 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200) { |
4504 | ifmr->ifm_status = IFM_AVALID0x0000000000000001ULL; |
4505 | ifmr->ifm_active = IFM_ETHER0x0000000000000100ULL; |
4506 | if (CSR_READ_4(sc, BGE_MAC_STS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0404)) ) & |
4507 | BGE_MACSTAT_TBI_PCS_SYNCHED0x00000001) { |
4508 | ifmr->ifm_status |= IFM_ACTIVE0x0000000000000002ULL; |
4509 | } else { |
4510 | ifmr->ifm_active |= IFM_NONE2ULL; |
4511 | return; |
4512 | } |
4513 | ifmr->ifm_active |= IFM_1000_SX11; |
4514 | if (CSR_READ_4(sc, BGE_MAC_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400)) ) & BGE_MACMODE_HALF_DUPLEX0x00000002) |
4515 | ifmr->ifm_active |= IFM_HDX0x0000020000000000ULL; |
4516 | else |
4517 | ifmr->ifm_active |= IFM_FDX0x0000010000000000ULL; |
4518 | return; |
4519 | } |
4520 | |
4521 | mii_pollstat(mii); |
4522 | ifmr->ifm_status = mii->mii_media_status; |
4523 | ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK(0x0000040000000000ULL|0x0000000000020000ULL|0x0000000000040000ULL )) | |
4524 | sc->bge_flowflags; |
4525 | } |
4526 | |
4527 | int |
4528 | bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) |
4529 | { |
4530 | struct bge_softc *sc = ifp->if_softc; |
4531 | struct ifreq *ifr = (struct ifreq *) data; |
4532 | int s, error = 0; |
4533 | struct mii_data *mii; |
4534 | |
4535 | s = splnet()splraise(0x4); |
4536 | |
4537 | switch(command) { |
4538 | case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((12))): |
4539 | ifp->if_flags |= IFF_UP0x1; |
4540 | if (!(ifp->if_flags & IFF_RUNNING0x40)) |
4541 | bge_init(sc); |
4542 | break; |
4543 | |
4544 | case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))): |
4545 | if (ifp->if_flags & IFF_UP0x1) { |
4546 | if (ifp->if_flags & IFF_RUNNING0x40) |
4547 | error = ENETRESET52; |
4548 | else |
4549 | bge_init(sc); |
4550 | } else { |
4551 | if (ifp->if_flags & IFF_RUNNING0x40) |
4552 | bge_stop(sc, 0); |
4553 | } |
4554 | break; |
4555 | |
4556 | case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((55))): |
4557 | /* XXX Flow control is not supported for 1000BASE-SX */ |
4558 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200) { |
4559 | ifr->ifr_mediaifr_ifru.ifru_media &= ~IFM_ETH_FMASK(0x0000040000000000ULL|0x0000000000020000ULL|0x0000000000040000ULL ); |
4560 | sc->bge_flowflags = 0; |
4561 | } |
4562 | |
4563 | /* Flow control requires full-duplex mode. */ |
4564 | if (IFM_SUBTYPE(ifr->ifr_media)((ifr->ifr_ifru.ifru_media) & 0x00000000000000ffULL) == IFM_AUTO0ULL || |
4565 | (ifr->ifr_mediaifr_ifru.ifru_media & IFM_FDX0x0000010000000000ULL) == 0) { |
4566 | ifr->ifr_mediaifr_ifru.ifru_media &= ~IFM_ETH_FMASK(0x0000040000000000ULL|0x0000000000020000ULL|0x0000000000040000ULL ); |
4567 | } |
4568 | if (IFM_SUBTYPE(ifr->ifr_media)((ifr->ifr_ifru.ifru_media) & 0x00000000000000ffULL) != IFM_AUTO0ULL) { |
4569 | if ((ifr->ifr_mediaifr_ifru.ifru_media & IFM_ETH_FMASK(0x0000040000000000ULL|0x0000000000020000ULL|0x0000000000040000ULL )) == IFM_FLOW0x0000040000000000ULL) { |
4570 | /* We can do both TXPAUSE and RXPAUSE. */ |
4571 | ifr->ifr_mediaifr_ifru.ifru_media |= |
4572 | IFM_ETH_TXPAUSE0x0000000000040000ULL | IFM_ETH_RXPAUSE0x0000000000020000ULL; |
4573 | } |
4574 | sc->bge_flowflags = ifr->ifr_mediaifr_ifru.ifru_media & IFM_ETH_FMASK(0x0000040000000000ULL|0x0000000000020000ULL|0x0000000000040000ULL ); |
4575 | } |
4576 | /* FALLTHROUGH */ |
4577 | case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifmediareq) & 0x1fff) << 16) | ((('i')) << 8) | ((56))): |
4578 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200) { |
4579 | error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, |
4580 | command); |
4581 | } else { |
4582 | mii = &sc->bge_mii; |
4583 | error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, |
4584 | command); |
4585 | } |
4586 | break; |
4587 | |
4588 | case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((170))): |
4589 | error = bge_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data); |
4590 | break; |
4591 | |
4592 | default: |
4593 | error = ether_ioctl(ifp, &sc->arpcom, command, data); |
4594 | } |
4595 | |
4596 | if (error == ENETRESET52) { |
4597 | if (ifp->if_flags & IFF_RUNNING0x40) |
4598 | bge_iff(sc); |
4599 | error = 0; |
4600 | } |
4601 | |
4602 | splx(s)spllower(s); |
4603 | return (error); |
4604 | } |
4605 | |
4606 | int |
4607 | bge_rxrinfo(struct bge_softc *sc, struct if_rxrinfo *ifri) |
4608 | { |
4609 | struct if_rxring_info ifr[2]; |
4610 | u_int n = 0; |
4611 | |
4612 | memset(ifr, 0, sizeof(ifr))__builtin_memset((ifr), (0), (sizeof(ifr))); |
4613 | |
4614 | if (ISSET(sc->bge_flags, BGE_RXRING_VALID)((sc->bge_flags) & (0x00000002))) { |
4615 | ifr[n].ifr_size = sc->bge_rx_std_len; |
4616 | strlcpy(ifr[n].ifr_name, "std", sizeof(ifr[n].ifr_name)); |
4617 | ifr[n].ifr_info = sc->bge_std_ring; |
4618 | |
4619 | n++; |
4620 | } |
4621 | |
4622 | if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)((sc->bge_flags) & (0x00000004))) { |
4623 | ifr[n].ifr_size = BGE_JLEN((9022 + 2) + (sizeof(u_int64_t) - ((9022 + 2) % sizeof(u_int64_t )))); |
4624 | strlcpy(ifr[n].ifr_name, "jumbo", sizeof(ifr[n].ifr_name)); |
4625 | ifr[n].ifr_info = sc->bge_jumbo_ring; |
4626 | |
4627 | n++; |
4628 | } |
4629 | |
4630 | return (if_rxr_info_ioctl(ifri, n, ifr)); |
4631 | } |
4632 | |
4633 | void |
4634 | bge_watchdog(struct ifnet *ifp) |
4635 | { |
4636 | struct bge_softc *sc; |
4637 | |
4638 | sc = ifp->if_softc; |
4639 | |
4640 | printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname); |
4641 | |
4642 | bge_init(sc); |
4643 | |
4644 | ifp->if_oerrorsif_data.ifi_oerrors++; |
4645 | } |
4646 | |
4647 | void |
4648 | bge_stop_block(struct bge_softc *sc, bus_size_t reg, u_int32_t bit) |
4649 | { |
4650 | int i; |
4651 | |
4652 | BGE_CLRBIT(sc, reg, bit)((sc->bge_btag)->write_4((sc->bge_bhandle), (reg), ( (((sc->bge_btag)->read_4((sc->bge_bhandle), (reg))) & ~(bit))))); |
4653 | |
4654 | for (i = 0; i < BGE_TIMEOUT100000; i++) { |
4655 | if ((CSR_READ_4(sc, reg)((sc->bge_btag)->read_4((sc->bge_bhandle), (reg))) & bit) == 0) |
4656 | return; |
4657 | delay(100)(*delay_func)(100); |
4658 | } |
4659 | |
4660 | DPRINTFN(5, ("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n", |
4661 | sc->bge_dev.dv_xname, (u_long) reg, bit)); |
4662 | } |
4663 | |
4664 | /* |
4665 | * Stop the adapter and free any mbufs allocated to the |
4666 | * RX and TX lists. |
4667 | */ |
4668 | void |
4669 | bge_stop(struct bge_softc *sc, int softonly) |
4670 | { |
4671 | struct ifnet *ifp = &sc->arpcom.ac_if; |
4672 | struct ifmedia_entry *ifm; |
4673 | struct mii_data *mii; |
4674 | int mtmp, itmp; |
4675 | |
4676 | timeout_del(&sc->bge_timeout); |
4677 | timeout_del(&sc->bge_rxtimeout); |
4678 | timeout_del(&sc->bge_rxtimeout_jumbo); |
4679 | |
4680 | ifp->if_flags &= ~IFF_RUNNING0x40; |
4681 | ifp->if_timer = 0; |
4682 | |
4683 | if (!softonly) { |
4684 | /* |
4685 | * Tell firmware we're shutting down. |
4686 | */ |
4687 | /* bge_stop_fw(sc); */ |
4688 | bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN0); |
4689 | |
4690 | /* |
4691 | * Disable all of the receiver blocks |
4692 | */ |
4693 | bge_stop_block(sc, BGE_RX_MODE0x0468, BGE_RXMODE_ENABLE0x00000002); |
4694 | bge_stop_block(sc, BGE_RBDI_MODE0x2C00, BGE_RBDIMODE_ENABLE0x00000002); |
4695 | bge_stop_block(sc, BGE_RXLP_MODE0x2000, BGE_RXLPMODE_ENABLE0x00000002); |
4696 | if (BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000)) |
4697 | bge_stop_block(sc, BGE_RXLS_MODE0x3400, BGE_RXLSMODE_ENABLE0x00000002); |
4698 | bge_stop_block(sc, BGE_RDBDI_MODE0x2400, BGE_RBDIMODE_ENABLE0x00000002); |
4699 | bge_stop_block(sc, BGE_RDC_MODE0x2800, BGE_RDCMODE_ENABLE0x00000002); |
4700 | bge_stop_block(sc, BGE_RBDC_MODE0x3000, BGE_RBDCMODE_ENABLE0x00000002); |
4701 | |
4702 | /* |
4703 | * Disable all of the transmit blocks |
4704 | */ |
4705 | bge_stop_block(sc, BGE_SRS_MODE0x1400, BGE_SRSMODE_ENABLE0x00000002); |
4706 | bge_stop_block(sc, BGE_SBDI_MODE0x1800, BGE_SBDIMODE_ENABLE0x00000002); |
4707 | bge_stop_block(sc, BGE_SDI_MODE0x0C00, BGE_SDIMODE_ENABLE0x00000002); |
4708 | bge_stop_block(sc, BGE_RDMA_MODE0x4800, BGE_RDMAMODE_ENABLE0x00000002); |
4709 | bge_stop_block(sc, BGE_SDC_MODE0x1000, BGE_SDCMODE_ENABLE0x00000002); |
4710 | if (BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000)) |
4711 | bge_stop_block(sc, BGE_DMAC_MODE0x6400, BGE_DMACMODE_ENABLE0x00000002); |
4712 | bge_stop_block(sc, BGE_SBDC_MODE0x1C00, BGE_SBDCMODE_ENABLE0x00000002); |
4713 | |
4714 | /* |
4715 | * Shut down all of the memory managers and related |
4716 | * state machines. |
4717 | */ |
4718 | bge_stop_block(sc, BGE_HCC_MODE0x3C00, BGE_HCCMODE_ENABLE0x00000002); |
4719 | bge_stop_block(sc, BGE_WDMA_MODE0x4C00, BGE_WDMAMODE_ENABLE0x00000002); |
4720 | if (BGE_IS_5700_FAMILY(sc)((sc)->bge_flags & 0x00010000)) |
4721 | bge_stop_block(sc, BGE_MBCF_MODE0x3800, BGE_MBCFMODE_ENABLE0x00000002); |
4722 | |
4723 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x5C00) , (0xFFFFFFFF))); |
4724 | CSR_WRITE_4(sc, BGE_FTQ_RESET, 0)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x5C00) , (0))); |
4725 | |
4726 | if (!BGE_IS_5705_PLUS(sc)((sc)->bge_flags & 0x00001000)) { |
4727 | bge_stop_block(sc, BGE_BMAN_MODE0x4400, BGE_BMANMODE_ENABLE0x00000002); |
4728 | bge_stop_block(sc, BGE_MARB_MODE0x4000, BGE_MARBMODE_ENABLE0x00000002); |
4729 | } |
4730 | |
4731 | bge_reset(sc); |
4732 | bge_sig_legacy(sc, BGE_RESET_SHUTDOWN0); |
4733 | bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN0); |
4734 | |
4735 | /* |
4736 | * Tell firmware we're shutting down. |
4737 | */ |
4738 | BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x6800) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x6800 ))) & ~(0x00010000))))); |
4739 | } |
4740 | |
4741 | intr_barrier(sc->bge_intrhand); |
4742 | ifq_barrier(&ifp->if_snd); |
4743 | |
4744 | ifq_clr_oactive(&ifp->if_snd); |
4745 | |
4746 | /* Free the RX lists. */ |
4747 | bge_free_rx_ring_std(sc); |
4748 | |
4749 | /* Free jumbo RX list. */ |
4750 | if (sc->bge_flags & BGE_JUMBO_RING0x01000000) |
4751 | bge_free_rx_ring_jumbo(sc); |
4752 | |
4753 | /* Free TX buffers. */ |
4754 | bge_free_tx_ring(sc); |
4755 | |
4756 | /* |
4757 | * Isolate/power down the PHY, but leave the media selection |
4758 | * unchanged so that things will be put back to normal when |
4759 | * we bring the interface back up. |
4760 | */ |
4761 | if (!(sc->bge_flags & BGE_FIBER_TBI0x00000200)) { |
4762 | mii = &sc->bge_mii; |
4763 | itmp = ifp->if_flags; |
4764 | ifp->if_flags |= IFF_UP0x1; |
4765 | ifm = mii->mii_media.ifm_cur; |
4766 | mtmp = ifm->ifm_media; |
4767 | ifm->ifm_media = IFM_ETHER0x0000000000000100ULL|IFM_NONE2ULL; |
4768 | mii_mediachg(mii); |
4769 | ifm->ifm_media = mtmp; |
4770 | ifp->if_flags = itmp; |
4771 | } |
4772 | |
4773 | sc->bge_tx_saved_considx = BGE_TXCONS_UNSET0xFFFF; |
4774 | |
4775 | if (!softonly) { |
4776 | /* Clear MAC's link state (PHY may still have link UP). */ |
4777 | BGE_STS_CLRBIT(sc, BGE_STS_LINK)((sc)->bge_sts &= ~(0x00000001)); |
4778 | } |
4779 | } |
4780 | |
4781 | void |
4782 | bge_link_upd(struct bge_softc *sc) |
4783 | { |
4784 | struct ifnet *ifp = &sc->arpcom.ac_if; |
4785 | struct mii_data *mii = &sc->bge_mii; |
4786 | u_int32_t status; |
4787 | int link; |
4788 | |
4789 | /* Clear 'pending link event' flag */ |
4790 | BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT)((sc)->bge_sts &= ~(0x00000002)); |
4791 | |
4792 | /* |
4793 | * Process link state changes. |
4794 | * Grrr. The link status word in the status block does |
4795 | * not work correctly on the BCM5700 rev AX and BX chips, |
4796 | * according to all available information. Hence, we have |
4797 | * to enable MII interrupts in order to properly obtain |
4798 | * async link changes. Unfortunately, this also means that |
4799 | * we have to read the MAC status register to detect link |
4800 | * changes, thereby adding an additional register access to |
4801 | * the interrupt handler. |
4802 | * |
4803 | */ |
4804 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57000x07) { |
4805 | status = CSR_READ_4(sc, BGE_MAC_STS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0404)) ); |
4806 | if (status & BGE_MACSTAT_MI_INTERRUPT0x00800000) { |
4807 | mii_pollstat(mii); |
4808 | |
4809 | if (!BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001)) && |
4810 | mii->mii_media_status & IFM_ACTIVE0x0000000000000002ULL && |
4811 | IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) != IFM_NONE2ULL) |
4812 | BGE_STS_SETBIT(sc, BGE_STS_LINK)((sc)->bge_sts |= (0x00000001)); |
4813 | else if (BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001)) && |
4814 | (!(mii->mii_media_status & IFM_ACTIVE0x0000000000000002ULL) || |
4815 | IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) == IFM_NONE2ULL)) |
4816 | BGE_STS_CLRBIT(sc, BGE_STS_LINK)((sc)->bge_sts &= ~(0x00000001)); |
4817 | |
4818 | /* Clear the interrupt */ |
4819 | CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0408) , (0x00800000))) |
4820 | BGE_EVTENB_MI_INTERRUPT)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0408) , (0x00800000))); |
4821 | bge_miibus_readreg(&sc->bge_dev, sc->bge_phy_addr, |
4822 | BRGPHY_MII_ISR0x1A); |
4823 | bge_miibus_writereg(&sc->bge_dev, sc->bge_phy_addr, |
4824 | BRGPHY_MII_IMR0x1B, BRGPHY_INTRS~(0x0002|0x0004|0x0008)); |
4825 | } |
4826 | return; |
4827 | } |
4828 | |
4829 | if (sc->bge_flags & BGE_FIBER_TBI0x00000200) { |
4830 | status = CSR_READ_4(sc, BGE_MAC_STS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0404)) ); |
4831 | if (status & BGE_MACSTAT_TBI_PCS_SYNCHED0x00000001) { |
4832 | if (!BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001))) { |
4833 | BGE_STS_SETBIT(sc, BGE_STS_LINK)((sc)->bge_sts |= (0x00000001)); |
4834 | if (BGE_ASICREV(sc->bge_chipid)((sc->bge_chipid) >> 12) == BGE_ASICREV_BCM57040x02) |
4835 | BGE_CLRBIT(sc, BGE_MAC_MODE,((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0400) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400 ))) & ~(0x00020000))))) |
4836 | BGE_MACMODE_TBI_SEND_CFGS)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0400) , ((((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400 ))) & ~(0x00020000))))); |
4837 | CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0xFFFFFFFF))); |
4838 | status = CSR_READ_4(sc, BGE_MAC_MODE)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0400)) ); |
4839 | link = (status & BGE_MACMODE_HALF_DUPLEX0x00000002) ? |
4840 | LINK_STATE_HALF_DUPLEX5 : |
4841 | LINK_STATE_FULL_DUPLEX6; |
4842 | ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL))); |
4843 | if (ifp->if_link_stateif_data.ifi_link_state != link) { |
4844 | ifp->if_link_stateif_data.ifi_link_state = link; |
4845 | if_link_state_change(ifp); |
4846 | } |
4847 | } |
4848 | } else if (BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001))) { |
4849 | BGE_STS_CLRBIT(sc, BGE_STS_LINK)((sc)->bge_sts &= ~(0x00000001)); |
4850 | link = LINK_STATE_DOWN2; |
4851 | ifp->if_baudrateif_data.ifi_baudrate = 0; |
4852 | if (ifp->if_link_stateif_data.ifi_link_state != link) { |
4853 | ifp->if_link_stateif_data.ifi_link_state = link; |
4854 | if_link_state_change(ifp); |
4855 | } |
4856 | } |
4857 | } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)((sc)->bge_sts & (0x00000004))) { |
4858 | /* |
4859 | * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit |
4860 | * in status word always set. Workaround this bug by reading |
4861 | * PHY link status directly. |
4862 | */ |
4863 | link = (CSR_READ_4(sc, BGE_MI_STS)((sc->bge_btag)->read_4((sc->bge_bhandle), (0x0450)) ) & BGE_MISTS_LINK0x00000001)? |
4864 | BGE_STS_LINK0x00000001 : 0; |
4865 | |
4866 | if (BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001)) != link) { |
4867 | mii_pollstat(mii); |
4868 | |
4869 | if (!BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001)) && |
4870 | mii->mii_media_status & IFM_ACTIVE0x0000000000000002ULL && |
4871 | IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) != IFM_NONE2ULL) |
4872 | BGE_STS_SETBIT(sc, BGE_STS_LINK)((sc)->bge_sts |= (0x00000001)); |
4873 | else if (BGE_STS_BIT(sc, BGE_STS_LINK)((sc)->bge_sts & (0x00000001)) && |
4874 | (!(mii->mii_media_status & IFM_ACTIVE0x0000000000000002ULL) || |
4875 | IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) == IFM_NONE2ULL)) |
4876 | BGE_STS_CLRBIT(sc, BGE_STS_LINK)((sc)->bge_sts &= ~(0x00000001)); |
4877 | } |
4878 | } else { |
4879 | /* |
4880 | * For controllers that call mii_tick, we have to poll |
4881 | * link status. |
4882 | */ |
4883 | mii_pollstat(mii); |
4884 | } |
4885 | |
4886 | /* Clear the attention */ |
4887 | CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0x00000010| 0x00000008|0x00400000| 0x00001000))) |
4888 | BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0x00000010| 0x00000008|0x00400000| 0x00001000))) |
4889 | BGE_MACSTAT_LINK_CHANGED)((sc->bge_btag)->write_4((sc->bge_bhandle), (0x0404) , (0x00000010| 0x00000008|0x00400000| 0x00001000))); |
4890 | } |
4891 | |
4892 | #if NKSTAT1 > 0 |
4893 | |
4894 | struct bge_stat { |
4895 | char name[KSTAT_KV_NAMELEN16]; |
4896 | enum kstat_kv_unit unit; |
4897 | bus_size_t reg; |
4898 | }; |
4899 | |
4900 | #define MACREG(_f)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, _f) \ |
4901 | BGE_MAC_STATS0x0800 + offsetof(struct bge_mac_stats_regs, _f)__builtin_offsetof(struct bge_mac_stats_regs, _f) |
4902 | |
4903 | static const struct bge_stat bge_kstat_tpl[] = { |
4904 | /* MAC stats */ |
4905 | [bge_stat_out_octets] = { "out octets", KSTAT_KV_U_BYTES, |
4906 | MACREG(ifHCOutOctets)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, ifHCOutOctets ) }, |
4907 | [bge_stat_collisions] = { "collisions", KSTAT_KV_U_NONE, 0 }, |
4908 | [bge_stat_xon_sent] = { "xon sent", KSTAT_KV_U_NONE, |
4909 | MACREG(outXonSent)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, outXonSent ) }, |
4910 | [bge_stat_xoff_sent] = { "xoff sent", KSTAT_KV_U_NONE, |
4911 | MACREG(outXonSent)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, outXonSent ) }, |
4912 | [bge_stat_xmit_errors] = { "xmit errors", KSTAT_KV_U_NONE, |
4913 | MACREG(dot3StatsInternalMacTransmitErrors)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, dot3StatsInternalMacTransmitErrors ) }, |
4914 | [bge_stat_coll_frames] = { "coll frames", KSTAT_KV_U_PACKETS, |
4915 | MACREG(dot3StatsSingleCollisionFrames)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, dot3StatsSingleCollisionFrames ) }, |
4916 | [bge_stat_multicoll_frames] = { "multicoll frames", KSTAT_KV_U_PACKETS, |
4917 | MACREG(dot3StatsMultipleCollisionFrames)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, dot3StatsMultipleCollisionFrames ) }, |
4918 | [bge_stat_deferred_xmit] = { "deferred xmit", KSTAT_KV_U_NONE, |
4919 | MACREG(dot3StatsDeferredTransmissions)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, dot3StatsDeferredTransmissions ) }, |
4920 | [bge_stat_excess_coll] = { "excess coll", KSTAT_KV_U_NONE, |
4921 | MACREG(dot3StatsExcessiveCollisions)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, dot3StatsExcessiveCollisions ) }, |
4922 | [bge_stat_late_coll] = { "late coll", KSTAT_KV_U_NONE, |
4923 | MACREG(dot3StatsLateCollisions)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, dot3StatsLateCollisions ) }, |
4924 | [bge_stat_out_ucast_pkt] = { "out ucast pkts", KSTAT_KV_U_PACKETS, 0 }, |
4925 | [bge_stat_out_mcast_pkt] = { "out mcast pkts", KSTAT_KV_U_PACKETS, 0 }, |
4926 | [bge_stat_out_bcast_pkt] = { "out bcast pkts", KSTAT_KV_U_PACKETS, 0 }, |
4927 | [bge_stat_in_octets] = { "in octets", KSTAT_KV_U_BYTES, |
4928 | MACREG(ifHCInOctets)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, ifHCInOctets ) }, |
4929 | [bge_stat_fragments] = { "fragments", KSTAT_KV_U_NONE, |
4930 | MACREG(etherStatsFragments)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, etherStatsFragments ) }, |
4931 | [bge_stat_in_ucast_pkt] = { "in ucast pkts", KSTAT_KV_U_PACKETS, |
4932 | MACREG(ifHCInUcastPkts)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, ifHCInUcastPkts ) }, |
4933 | [bge_stat_in_mcast_pkt] = { "in mcast pkts", KSTAT_KV_U_PACKETS, |
4934 | MACREG(ifHCInMulticastPkts)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, ifHCInMulticastPkts ) }, |
4935 | [bge_stat_in_bcast_pkt] = { "in bcast pkts", KSTAT_KV_U_PACKETS, |
4936 | MACREG(ifHCInBroadcastPkts)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, ifHCInBroadcastPkts ) }, |
4937 | [bge_stat_fcs_errors] = { "FCS errors", KSTAT_KV_U_NONE, |
4938 | MACREG(dot3StatsFCSErrors)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, dot3StatsFCSErrors ) }, |
4939 | [bge_stat_align_errors] = { "align errors", KSTAT_KV_U_NONE, |
4940 | MACREG(dot3StatsAlignmentErrors)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, dot3StatsAlignmentErrors ) }, |
4941 | [bge_stat_xon_rcvd] = { "xon rcvd", KSTAT_KV_U_NONE, |
4942 | MACREG(xonPauseFramesReceived)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, xonPauseFramesReceived ) }, |
4943 | [bge_stat_xoff_rcvd] = { "xoff rcvd", KSTAT_KV_U_NONE, |
4944 | MACREG(xoffPauseFramesReceived)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, xoffPauseFramesReceived ) }, |
4945 | [bge_stat_ctrl_frame_rcvd] = { "ctrlframes rcvd", KSTAT_KV_U_NONE, |
4946 | MACREG(macControlFramesReceived)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, macControlFramesReceived ) }, |
4947 | [bge_stat_xoff_entered] = { "xoff entered", KSTAT_KV_U_NONE, |
4948 | MACREG(xoffStateEntered)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, xoffStateEntered ) }, |
4949 | [bge_stat_too_long_frames] = { "too long frames", KSTAT_KV_U_NONE, |
4950 | MACREG(dot3StatsFramesTooLong)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, dot3StatsFramesTooLong ) }, |
4951 | [bge_stat_jabbers] = { "jabbers", KSTAT_KV_U_NONE, |
4952 | MACREG(etherStatsJabbers)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, etherStatsJabbers ) }, |
4953 | [bge_stat_too_short_pkts] = { "too short pkts", KSTAT_KV_U_NONE, |
4954 | MACREG(etherStatsUndersizePkts)0x0800 + __builtin_offsetof(struct bge_mac_stats_regs, etherStatsUndersizePkts ) }, |
4955 | |
4956 | /* Send Data Initiator stats */ |
4957 | [bge_stat_dma_rq_full] = { "DMA RQ full", KSTAT_KV_U_NONE, |
4958 | BGE_LOCSTATS_DMA_RQ_FULL0x0CC0 }, |
4959 | [bge_stat_dma_hprq_full] = { "DMA HPRQ full", KSTAT_KV_U_NONE, |
4960 | BGE_LOCSTATS_DMA_HIPRIO_RQ_FULL0x0CC4 }, |
4961 | [bge_stat_sdc_queue_full] = { "SDC queue full", KSTAT_KV_U_NONE, |
4962 | BGE_LOCSTATS_SDC_QUEUE_FULL0x0CC8 }, |
4963 | [bge_stat_nic_sendprod_set] = { "sendprod set", KSTAT_KV_U_NONE, |
4964 | BGE_LOCSTATS_NIC_SENDPROD_SET0x0CCC }, |
4965 | [bge_stat_status_updated] = { "stats updated", KSTAT_KV_U_NONE, |
4966 | BGE_LOCSTATS_STATS_UPDATED0x0CD0 }, |
4967 | [bge_stat_irqs] = { "irqs", KSTAT_KV_U_NONE, BGE_LOCSTATS_IRQS0x0CD4 }, |
4968 | [bge_stat_avoided_irqs] = { "avoided irqs", KSTAT_KV_U_NONE, |
4969 | BGE_LOCSTATS_AVOIDED_IRQS0x0CD8 }, |
4970 | [bge_stat_tx_thresh_hit] = { "tx thresh hit", KSTAT_KV_U_NONE, |
4971 | BGE_LOCSTATS_TX_THRESH_HIT0x0CDC }, |
4972 | |
4973 | /* Receive List Placement stats */ |
4974 | [bge_stat_filtdrop] = { "filtdrop", KSTAT_KV_U_NONE, |
4975 | BGE_RXLP_LOCSTAT_FILTDROP0x2240 }, |
4976 | [bge_stat_dma_wrq_full] = { "DMA WRQ full", KSTAT_KV_U_NONE, |
4977 | BGE_RXLP_LOCSTAT_DMA_WRQ_FULL0x2244 }, |
4978 | [bge_stat_dma_hpwrq_full] = { "DMA HPWRQ full", KSTAT_KV_U_NONE, |
4979 | BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL0x2248 }, |
4980 | [bge_stat_out_of_bds] = { "out of BDs", KSTAT_KV_U_NONE, |
4981 | BGE_RXLP_LOCSTAT_OUT_OF_BDS0x224C }, |
4982 | [bge_stat_if_in_drops] = { "if in drops", KSTAT_KV_U_NONE, 0 }, |
4983 | [bge_stat_if_in_errors] = { "if in errors", KSTAT_KV_U_NONE, 0 }, |
4984 | [bge_stat_rx_thresh_hit] = { "rx thresh hit", KSTAT_KV_U_NONE, |
4985 | BGE_RXLP_LOCSTAT_RXTHRESH_HIT0x2258 }, |
4986 | }; |
4987 | |
4988 | int |
4989 | bge_kstat_read(struct kstat *ks) |
4990 | { |
4991 | struct bge_softc *sc = ks->ks_softc; |
4992 | struct kstat_kv *kvs = ks->ks_data; |
4993 | int i; |
4994 | |
4995 | bge_stats_update_regs(sc); |
4996 | |
4997 | for (i = 0; i < nitems(bge_kstat_tpl)(sizeof((bge_kstat_tpl)) / sizeof((bge_kstat_tpl)[0])); i++) { |
4998 | if (bge_kstat_tpl[i].reg != 0) |
4999 | kstat_kv_u32(kvs)(kvs)->kv_v.v_u32 += CSR_READ_4(sc,((sc->bge_btag)->read_4((sc->bge_bhandle), (bge_kstat_tpl [i].reg))) |
5000 | bge_kstat_tpl[i].reg)((sc->bge_btag)->read_4((sc->bge_bhandle), (bge_kstat_tpl [i].reg))); |
5001 | kvs++; |
5002 | } |
5003 | |
5004 | getnanouptime(&ks->ks_updated); |
5005 | return 0; |
5006 | } |
5007 | |
5008 | void |
5009 | bge_kstat_attach(struct bge_softc *sc) |
5010 | { |
5011 | struct kstat *ks; |
5012 | struct kstat_kv *kvs; |
5013 | int i; |
5014 | |
5015 | |
5016 | ks = kstat_create(sc->bge_dev.dv_xname, 0, "bge-stats", 0, |
5017 | KSTAT_T_KV1, 0); |
5018 | if (ks == NULL((void *)0)) |
5019 | return; |
5020 | |
5021 | kvs = mallocarray(nitems(bge_kstat_tpl)(sizeof((bge_kstat_tpl)) / sizeof((bge_kstat_tpl)[0])), sizeof(*kvs), M_DEVBUF2, |
5022 | M_ZERO0x0008 | M_WAITOK0x0001); |
5023 | for (i = 0; i < nitems(bge_kstat_tpl)(sizeof((bge_kstat_tpl)) / sizeof((bge_kstat_tpl)[0])); i++) { |
5024 | const struct bge_stat *tpl = &bge_kstat_tpl[i]; |
5025 | kstat_kv_unit_init(&kvs[i], tpl->name, KSTAT_KV_T_UINT32, |
5026 | tpl->unit); |
5027 | } |
5028 | |
5029 | kstat_set_mutex(ks, &sc->bge_kstat_mtx); |
5030 | ks->ks_softc = sc; |
5031 | ks->ks_data = kvs; |
5032 | ks->ks_datalen = nitems(bge_kstat_tpl)(sizeof((bge_kstat_tpl)) / sizeof((bge_kstat_tpl)[0])) * sizeof(*kvs); |
5033 | ks->ks_read = bge_kstat_read; |
5034 | |
5035 | sc->bge_kstat = ks; |
5036 | kstat_install(ks); |
5037 | } |
5038 | #endif /* NKSTAT > 0 */ |