Bug Summary

File:dev/pci/if_bce.c
Warning:line 1150, column 2
Value stored to 'val' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name if_bce.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/if_bce.c
1/* $OpenBSD: if_bce.c,v 1.54 2022/01/09 05:42:46 jsg Exp $ */
2/* $NetBSD: if_bce.c,v 1.3 2003/09/29 01:53:02 mrg Exp $ */
3
4/*
5 * Copyright (c) 2003 Clifford Wright. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31/*
32 * Broadcom BCM440x 10/100 ethernet (broadcom.com)
33 * SiliconBackplane is technology from Sonics, Inc.(sonicsinc.com)
34 *
35 * Cliff Wright cliff@snipe444.org
36 */
37
38#include "bpfilter.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/timeout.h>
43#include <sys/sockio.h>
44#include <sys/mbuf.h>
45#include <sys/malloc.h>
46#include <sys/kernel.h>
47#include <sys/device.h>
48#include <sys/socket.h>
49
50#include <net/if.h>
51#include <net/if_media.h>
52
53#include <netinet/in.h>
54#include <netinet/if_ether.h>
55#if NBPFILTER1 > 0
56#include <net/bpf.h>
57#endif
58
59#include <dev/pci/pcireg.h>
60#include <dev/pci/pcivar.h>
61#include <dev/pci/pcidevs.h>
62
63#include <dev/mii/mii.h>
64#include <dev/mii/miivar.h>
65#include <dev/mii/miidevs.h>
66
67#include <dev/pci/if_bcereg.h>
68
69#include <uvm/uvm.h>
70
71/* ring descriptor */
72struct bce_dma_slot {
73 u_int32_t ctrl;
74 u_int32_t addr;
75};
76#define CTRL_BC_MASK0x1fff 0x1fff /* buffer byte count */
77#define CTRL_EOT0x10000000 0x10000000 /* end of descriptor table */
78#define CTRL_IOC0x20000000 0x20000000 /* interrupt on completion */
79#define CTRL_EOF0x40000000 0x40000000 /* end of frame */
80#define CTRL_SOF0x80000000 0x80000000 /* start of frame */
81
82#define BCE_RXBUF_LEN((1 << 11) - 4) (MCLBYTES(1 << 11) - 4)
83
84/* Packet status is returned in a pre-packet header */
85struct rx_pph {
86 u_int16_t len;
87 u_int16_t flags;
88 u_int16_t pad[12];
89};
90
91#define BCE_PREPKT_HEADER_SIZE30 30
92
93/* packet status flags bits */
94#define RXF_NO0x8 0x8 /* odd number of nibbles */
95#define RXF_RXER0x4 0x4 /* receive symbol error */
96#define RXF_CRC0x2 0x2 /* crc error */
97#define RXF_OV0x1 0x1 /* fifo overflow */
98
99/* number of descriptors used in a ring */
100#define BCE_NRXDESC64 64
101#define BCE_NTXDESC64 64
102
103#define BCE_TIMEOUT100 100 /* # 10us for mii read/write */
104
105struct bce_softc {
106 struct device bce_dev;
107 bus_space_tag_t bce_btag;
108 bus_space_handle_t bce_bhandle;
109 bus_dma_tag_t bce_dmatag;
110 struct arpcom bce_ac; /* interface info */
111 void *bce_intrhand;
112 struct pci_attach_args bce_pa;
113 struct mii_data bce_mii;
114 u_int32_t bce_phy; /* eeprom indicated phy */
115 struct bce_dma_slot *bce_rx_ring; /* receive ring */
116 struct bce_dma_slot *bce_tx_ring; /* transmit ring */
117 caddr_t bce_data;
118 bus_dmamap_t bce_ring_map;
119 bus_dmamap_t bce_rxdata_map;
120 bus_dmamap_t bce_txdata_map;
121 u_int32_t bce_intmask; /* current intr mask */
122 u_int32_t bce_rxin; /* last rx descriptor seen */
123 u_int32_t bce_txin; /* last tx descriptor seen */
124 int bce_txsfree; /* no. tx slots available */
125 int bce_txsnext; /* next available tx slot */
126 struct timeout bce_timeout;
127};
128
129int bce_probe(struct device *, void *, void *);
130void bce_attach(struct device *, struct device *, void *);
131int bce_activate(struct device *, int);
132int bce_ioctl(struct ifnet *, u_long, caddr_t);
133void bce_start(struct ifnet *);
134void bce_watchdog(struct ifnet *);
135int bce_intr(void *);
136void bce_rxintr(struct bce_softc *);
137void bce_txintr(struct bce_softc *);
138int bce_init(struct ifnet *);
139void bce_add_mac(struct bce_softc *, u_int8_t *, unsigned long);
140void bce_add_rxbuf(struct bce_softc *, int);
141void bce_stop(struct ifnet *);
142void bce_reset(struct bce_softc *);
143void bce_iff(struct ifnet *);
144int bce_mii_read(struct device *, int, int);
145void bce_mii_write(struct device *, int, int, int);
146void bce_statchg(struct device *);
147int bce_mediachange(struct ifnet *);
148void bce_mediastatus(struct ifnet *, struct ifmediareq *);
149void bce_tick(void *);
150
151#ifdef BCE_DEBUG
152#define DPRINTF(x) do { \
153 if (bcedebug) \
154 printf x; \
155} while (/* CONSTCOND */ 0)
156#define DPRINTFN(n,x) do { \
157 if (bcedebug >= (n)) \
158 printf x; \
159} while (/* CONSTCOND */ 0)
160int bcedebug = 0;
161#else
162#define DPRINTF(x)
163#define DPRINTFN(n,x)
164#endif
165
166struct cfattach bce_ca = {
167 sizeof(struct bce_softc), bce_probe, bce_attach, NULL((void *)0), bce_activate
168};
169struct cfdriver bce_cd = {
170 NULL((void *)0), "bce", DV_IFNET
171};
172
173const struct pci_matchid bce_devices[] = {
174 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM44010x4401 },
175 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM4401B00x4402 },
176 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM4401B10x170c }
177};
178
179int
180bce_probe(struct device *parent, void *match, void *aux)
181{
182 return (pci_matchbyid((struct pci_attach_args *)aux, bce_devices,
183 nitems(bce_devices)(sizeof((bce_devices)) / sizeof((bce_devices)[0]))));
184}
185
186void
187bce_attach(struct device *parent, struct device *self, void *aux)
188{
189 struct bce_softc *sc = (struct bce_softc *) self;
190 struct pci_attach_args *pa = aux;
191 pci_chipset_tag_t pc = pa->pa_pc;
192 pci_intr_handle_t ih;
193 const char *intrstr = NULL((void *)0);
194 caddr_t kva;
195 bus_dma_segment_t seg;
196 int rseg;
197 struct ifnet *ifp;
198 pcireg_t memtype;
199 bus_addr_t memaddr;
200 bus_size_t memsize;
201 int pmreg;
202 pcireg_t pmode;
203 int error;
204
205 sc->bce_pa = *pa;
206 sc->bce_dmatag = pa->pa_dmat;
207
208 /*
209 * Map control/status registers.
210 */
211 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR00x10);
212 if (pci_mapreg_map(pa, BCE_PCI_BAR00x10, memtype, 0, &sc->bce_btag,
213 &sc->bce_bhandle, &memaddr, &memsize, 0)) {
214 printf(": unable to find mem space\n");
215 return;
216 }
217
218 /* Get it out of power save mode if needed. */
219 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT0x01, &pmreg, 0)) {
220 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
221 if (pmode == 3) {
222 /*
223 * The card has lost all configuration data in
224 * this state, so punt.
225 */
226 printf(": unable to wake up from power state D3\n");
227 return;
228 }
229 if (pmode != 0) {
230 printf(": waking up from power state D%d\n",
231 pmode);
232 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
233 }
234 }
235
236 if (pci_intr_map(pa, &ih)) {
237 printf(": couldn't map interrupt\n");
238 return;
239 }
240
241 intrstr = pci_intr_string(pc, ih);
242 sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET0x7, bce_intr, sc,
243 self->dv_xname);
244 if (sc->bce_intrhand == NULL((void *)0)) {
245 printf(": couldn't establish interrupt");
246 if (intrstr != NULL((void *)0))
247 printf(" at %s", intrstr);
248 printf("\n");
249 return;
250 }
251
252 /* reset the chip */
253 bce_reset(sc);
254
255 /* Create the data DMA region and maps. */
256 if ((sc->bce_data = (caddr_t)uvm_km_kmemalloc_pla(kernel_map,
257 uvm.kernel_object, (BCE_NTXDESC64 + BCE_NRXDESC64) * MCLBYTES(1 << 11), 0,
258 UVM_KMF_NOWAIT0x1, 0, (paddr_t)(0x40000000 - 1), 0, 0, 1)) == NULL((void *)0)) {
259 printf(": unable to alloc space for ring");
260 return;
261 }
262
263 /* create a dma map for the RX ring */
264 if ((error = bus_dmamap_create(sc->bce_dmatag, BCE_NRXDESC * MCLBYTES,(*(sc->bce_dmatag)->_dmamap_create)((sc->bce_dmatag)
, (64 * (1 << 11)), (1), (64 * (1 << 11)), (0), (
0x0001 | 0x0002), (&sc->bce_rxdata_map))
265 1, BCE_NRXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->bce_dmatag)->_dmamap_create)((sc->bce_dmatag)
, (64 * (1 << 11)), (1), (64 * (1 << 11)), (0), (
0x0001 | 0x0002), (&sc->bce_rxdata_map))
266 &sc->bce_rxdata_map)(*(sc->bce_dmatag)->_dmamap_create)((sc->bce_dmatag)
, (64 * (1 << 11)), (1), (64 * (1 << 11)), (0), (
0x0001 | 0x0002), (&sc->bce_rxdata_map))
)) {
267 printf(": unable to create ring DMA map, error = %d\n", error);
268 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
269 (BCE_NTXDESC64 + BCE_NRXDESC64) * MCLBYTES(1 << 11));
270 return;
271 }
272
273 /* connect the ring space to the dma map */
274 if (bus_dmamap_load(sc->bce_dmatag, sc->bce_rxdata_map, sc->bce_data,(*(sc->bce_dmatag)->_dmamap_load)((sc->bce_dmatag), (
sc->bce_rxdata_map), (sc->bce_data), (64 * (1 << 11
)), (((void *)0)), (0x0200 | 0x0001))
275 BCE_NRXDESC * MCLBYTES, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT)(*(sc->bce_dmatag)->_dmamap_load)((sc->bce_dmatag), (
sc->bce_rxdata_map), (sc->bce_data), (64 * (1 << 11
)), (((void *)0)), (0x0200 | 0x0001))
) {
276 printf(": unable to load rx ring DMA map\n");
277 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
278 (BCE_NTXDESC64 + BCE_NRXDESC64) * MCLBYTES(1 << 11));
279 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map)(*(sc->bce_dmatag)->_dmamap_destroy)((sc->bce_dmatag
), (sc->bce_rxdata_map))
;
280 return;
281 }
282
283 /* create a dma map for the TX ring */
284 if ((error = bus_dmamap_create(sc->bce_dmatag, BCE_NTXDESC * MCLBYTES,(*(sc->bce_dmatag)->_dmamap_create)((sc->bce_dmatag)
, (64 * (1 << 11)), (1), (64 * (1 << 11)), (0), (
0x0001 | 0x0002), (&sc->bce_txdata_map))
285 1, BCE_NTXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->bce_dmatag)->_dmamap_create)((sc->bce_dmatag)
, (64 * (1 << 11)), (1), (64 * (1 << 11)), (0), (
0x0001 | 0x0002), (&sc->bce_txdata_map))
286 &sc->bce_txdata_map)(*(sc->bce_dmatag)->_dmamap_create)((sc->bce_dmatag)
, (64 * (1 << 11)), (1), (64 * (1 << 11)), (0), (
0x0001 | 0x0002), (&sc->bce_txdata_map))
)) {
287 printf(": unable to create ring DMA map, error = %d\n", error);
288 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
289 (BCE_NTXDESC64 + BCE_NRXDESC64) * MCLBYTES(1 << 11));
290 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map)(*(sc->bce_dmatag)->_dmamap_destroy)((sc->bce_dmatag
), (sc->bce_rxdata_map))
;
291 return;
292 }
293
294 /* connect the ring space to the dma map */
295 if (bus_dmamap_load(sc->bce_dmatag, sc->bce_txdata_map,(*(sc->bce_dmatag)->_dmamap_load)((sc->bce_dmatag), (
sc->bce_txdata_map), (sc->bce_data + 64 * (1 << 11
)), (64 * (1 << 11)), (((void *)0)), (0x0400 | 0x0001))
296 sc->bce_data + BCE_NRXDESC * MCLBYTES,(*(sc->bce_dmatag)->_dmamap_load)((sc->bce_dmatag), (
sc->bce_txdata_map), (sc->bce_data + 64 * (1 << 11
)), (64 * (1 << 11)), (((void *)0)), (0x0400 | 0x0001))
297 BCE_NTXDESC * MCLBYTES, NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT)(*(sc->bce_dmatag)->_dmamap_load)((sc->bce_dmatag), (
sc->bce_txdata_map), (sc->bce_data + 64 * (1 << 11
)), (64 * (1 << 11)), (((void *)0)), (0x0400 | 0x0001))
) {
298 printf(": unable to load tx ring DMA map\n");
299 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
300 (BCE_NTXDESC64 + BCE_NRXDESC64) * MCLBYTES(1 << 11));
301 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map)(*(sc->bce_dmatag)->_dmamap_destroy)((sc->bce_dmatag
), (sc->bce_rxdata_map))
;
302 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map)(*(sc->bce_dmatag)->_dmamap_destroy)((sc->bce_dmatag
), (sc->bce_txdata_map))
;
303 return;
304 }
305
306
307 /*
308 * Allocate DMA-safe memory for ring descriptors.
309 * The receive, and transmit rings can not share the same
310 * 4k space, however both are allocated at once here.
311 */
312 /*
313 * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but
314 * due to the limitation above. ??
315 */
316 if ((error = bus_dmamem_alloc_range(sc->bce_dmatag, 2 * PAGE_SIZE,(*(sc->bce_dmatag)->_dmamem_alloc_range)((sc->bce_dmatag
), (2 * (1 << 12)), ((1 << 12)), (2 * (1 <<
12)), (&seg), (1), (&rseg), (0x0001), ((bus_addr_t)0
), ((bus_addr_t)0x3fffffff))
317 PAGE_SIZE, 2 * PAGE_SIZE, &seg, 1, &rseg, BUS_DMA_NOWAIT,(*(sc->bce_dmatag)->_dmamem_alloc_range)((sc->bce_dmatag
), (2 * (1 << 12)), ((1 << 12)), (2 * (1 <<
12)), (&seg), (1), (&rseg), (0x0001), ((bus_addr_t)0
), ((bus_addr_t)0x3fffffff))
318 (bus_addr_t)0, (bus_addr_t)0x3fffffff)(*(sc->bce_dmatag)->_dmamem_alloc_range)((sc->bce_dmatag
), (2 * (1 << 12)), ((1 << 12)), (2 * (1 <<
12)), (&seg), (1), (&rseg), (0x0001), ((bus_addr_t)0
), ((bus_addr_t)0x3fffffff))
)) {
319 printf(": unable to alloc space for ring descriptors, "
320 "error = %d\n", error);
321 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
322 (BCE_NTXDESC64 + BCE_NRXDESC64) * MCLBYTES(1 << 11));
323 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map)(*(sc->bce_dmatag)->_dmamap_destroy)((sc->bce_dmatag
), (sc->bce_rxdata_map))
;
324 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map)(*(sc->bce_dmatag)->_dmamap_destroy)((sc->bce_dmatag
), (sc->bce_txdata_map))
;
325 return;
326 }
327
328 /* map ring space to kernel */
329 if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg,(*(sc->bce_dmatag)->_dmamem_map)((sc->bce_dmatag), (
&seg), (rseg), (2 * (1 << 12)), (&kva), (0x0001
))
330 2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT)(*(sc->bce_dmatag)->_dmamem_map)((sc->bce_dmatag), (
&seg), (rseg), (2 * (1 << 12)), (&kva), (0x0001
))
)) {
331 printf(": unable to map DMA buffers, error = %d\n", error);
332 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
333 (BCE_NTXDESC64 + BCE_NRXDESC64) * MCLBYTES(1 << 11));
334 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map)(*(sc->bce_dmatag)->_dmamap_destroy)((sc->bce_dmatag
), (sc->bce_rxdata_map))
;
335 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map)(*(sc->bce_dmatag)->_dmamap_destroy)((sc->bce_dmatag
), (sc->bce_txdata_map))
;
336 bus_dmamem_free(sc->bce_dmatag, &seg, rseg)(*(sc->bce_dmatag)->_dmamem_free)((sc->bce_dmatag), (
&seg), (rseg))
;
337 return;
338 }
339
340 /* create a dma map for the ring */
341 if ((error = bus_dmamap_create(sc->bce_dmatag, 2 * PAGE_SIZE, 1,(*(sc->bce_dmatag)->_dmamap_create)((sc->bce_dmatag)
, (2 * (1 << 12)), (1), (2 * (1 << 12)), (0), (0x0001
), (&sc->bce_ring_map))
342 2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->bce_ring_map)(*(sc->bce_dmatag)->_dmamap_create)((sc->bce_dmatag)
, (2 * (1 << 12)), (1), (2 * (1 << 12)), (0), (0x0001
), (&sc->bce_ring_map))
)) {
343 printf(": unable to create ring DMA map, error = %d\n", error);
344 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
345 (BCE_NTXDESC64 + BCE_NRXDESC64) * MCLBYTES(1 << 11));
346 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map)(*(sc->bce_dmatag)->_dmamap_destroy)((sc->bce_dmatag
), (sc->bce_rxdata_map))
;
347 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map)(*(sc->bce_dmatag)->_dmamap_destroy)((sc->bce_dmatag
), (sc->bce_txdata_map))
;
348 bus_dmamem_free(sc->bce_dmatag, &seg, rseg)(*(sc->bce_dmatag)->_dmamem_free)((sc->bce_dmatag), (
&seg), (rseg))
;
349 return;
350 }
351
352 /* connect the ring space to the dma map */
353 if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva,(*(sc->bce_dmatag)->_dmamap_load)((sc->bce_dmatag), (
sc->bce_ring_map), (kva), (2 * (1 << 12)), (((void *
)0)), (0x0001))
354 2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)(*(sc->bce_dmatag)->_dmamap_load)((sc->bce_dmatag), (
sc->bce_ring_map), (kva), (2 * (1 << 12)), (((void *
)0)), (0x0001))
) {
355 printf(": unable to load ring DMA map\n");
356 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
357 (BCE_NTXDESC64 + BCE_NRXDESC64) * MCLBYTES(1 << 11));
358 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map)(*(sc->bce_dmatag)->_dmamap_destroy)((sc->bce_dmatag
), (sc->bce_rxdata_map))
;
359 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map)(*(sc->bce_dmatag)->_dmamap_destroy)((sc->bce_dmatag
), (sc->bce_txdata_map))
;
360 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map)(*(sc->bce_dmatag)->_dmamap_destroy)((sc->bce_dmatag
), (sc->bce_ring_map))
;
361 bus_dmamem_free(sc->bce_dmatag, &seg, rseg)(*(sc->bce_dmatag)->_dmamem_free)((sc->bce_dmatag), (
&seg), (rseg))
;
362 return;
363 }
364
365 /* save the ring space in softc */
366 sc->bce_rx_ring = (struct bce_dma_slot *)kva;
367 sc->bce_tx_ring = (struct bce_dma_slot *)(kva + PAGE_SIZE(1 << 12));
368
369 /* Set up ifnet structure */
370 ifp = &sc->bce_ac.ac_if;
371 strlcpy(ifp->if_xname, sc->bce_dev.dv_xname, IF_NAMESIZE16);
372 ifp->if_softc = sc;
373 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
374 ifp->if_ioctl = bce_ioctl;
375 ifp->if_start = bce_start;
376 ifp->if_watchdog = bce_watchdog;
377
378 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010;
379
380 /* MAC address */
381 sc->bce_ac.ac_enaddr[0] =
382 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET0)((sc->bce_btag)->read_1((sc->bce_bhandle), ((0x1000 +
0x4F))))
;
383 sc->bce_ac.ac_enaddr[1] =
384 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET1)((sc->bce_btag)->read_1((sc->bce_bhandle), ((0x1000 +
0x4E))))
;
385 sc->bce_ac.ac_enaddr[2] =
386 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET2)((sc->bce_btag)->read_1((sc->bce_bhandle), ((0x1000 +
0x51))))
;
387 sc->bce_ac.ac_enaddr[3] =
388 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET3)((sc->bce_btag)->read_1((sc->bce_bhandle), ((0x1000 +
0x50))))
;
389 sc->bce_ac.ac_enaddr[4] =
390 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET4)((sc->bce_btag)->read_1((sc->bce_bhandle), ((0x1000 +
0x53))))
;
391 sc->bce_ac.ac_enaddr[5] =
392 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET5)((sc->bce_btag)->read_1((sc->bce_bhandle), ((0x1000 +
0x52))))
;
393
394 printf(": %s, address %s\n", intrstr,
395 ether_sprintf(sc->bce_ac.ac_enaddr));
396
397 /* Initialize our media structures and probe the MII. */
398 sc->bce_mii.mii_ifp = ifp;
399 sc->bce_mii.mii_readreg = bce_mii_read;
400 sc->bce_mii.mii_writereg = bce_mii_write;
401 sc->bce_mii.mii_statchg = bce_statchg;
402 ifmedia_init(&sc->bce_mii.mii_media, 0, bce_mediachange,
403 bce_mediastatus);
404 mii_attach(&sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY-1,
405 MII_OFFSET_ANY-1, 0);
406 if (LIST_FIRST(&sc->bce_mii.mii_phys)((&sc->bce_mii.mii_phys)->lh_first) == NULL((void *)0)) {
407 ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER0x0000000000000100ULL | IFM_NONE2ULL, 0, NULL((void *)0));
408 ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER0x0000000000000100ULL | IFM_NONE2ULL);
409 } else
410 ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL);
411
412 /* get the phy */
413 sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->read_1((sc->bce_bhandle), ((0x1000 +
0x5A))))
414 BCE_PHY)((sc->bce_btag)->read_1((sc->bce_bhandle), ((0x1000 +
0x5A))))
& 0x1f;
415
416 /*
417 * Enable activity led.
418 * XXX This should be in a phy driver, but not currently.
419 */
420 bce_mii_write((struct device *) sc, 1, 26, /* MAGIC */
421 bce_mii_read((struct device *) sc, 1, 26) & 0x7fff); /* MAGIC */
422
423 /* enable traffic meter led mode */
424 bce_mii_write((struct device *) sc, 1, 27, /* MAGIC */
425 bce_mii_read((struct device *) sc, 1, 27) | (1 << 6)); /* MAGIC */
426
427 /* Attach the interface */
428 if_attach(ifp);
429 ether_ifattach(ifp);
430
431 timeout_set(&sc->bce_timeout, bce_tick, sc);
432}
433
434int
435bce_activate(struct device *self, int act)
436{
437 struct bce_softc *sc = (struct bce_softc *)self;
438 struct ifnet *ifp = &sc->bce_ac.ac_if;
439
440 switch (act) {
441 case DVACT_SUSPEND3:
442 if (ifp->if_flags & IFF_RUNNING0x40)
443 bce_stop(ifp);
444 break;
445 case DVACT_RESUME4:
446 if (ifp->if_flags & IFF_UP0x1) {
447 bce_init(ifp);
448 bce_start(ifp);
449 }
450 break;
451 }
452
453 return (0);
454}
455
456/* handle media, and ethernet requests */
457int
458bce_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
459{
460 struct bce_softc *sc = ifp->if_softc;
461 struct ifreq *ifr = (struct ifreq *) data;
462 int s, error = 0;
463
464 s = splnet()splraise(0x7);
465
466 switch (cmd) {
467 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
468 ifp->if_flags |= IFF_UP0x1;
469 if (!(ifp->if_flags & IFF_RUNNING0x40))
470 bce_init(ifp);
471 break;
472
473 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
474 if (ifp->if_flags & IFF_UP0x1) {
475 if (ifp->if_flags & IFF_RUNNING0x40)
476 error = ENETRESET52;
477 else
478 bce_init(ifp);
479 } else {
480 if (ifp->if_flags & IFF_RUNNING0x40)
481 bce_stop(ifp);
482 }
483 break;
484
485 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
486 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
487 error = ifmedia_ioctl(ifp, ifr, &sc->bce_mii.mii_media, cmd);
488 break;
489
490 default:
491 error = ether_ioctl(ifp, &sc->bce_ac, cmd, data);
492 }
493
494 if (error == ENETRESET52) {
495 if (ifp->if_flags & IFF_RUNNING0x40)
496 bce_iff(ifp);
497 error = 0;
498 }
499
500 splx(s)spllower(s);
501 return error;
502}
503
504/* Start packet transmission on the interface. */
505void
506bce_start(struct ifnet *ifp)
507{
508 struct bce_softc *sc = ifp->if_softc;
509 struct mbuf *m0;
510 u_int32_t ctrl;
511 int txstart;
512 int txsfree;
513 int newpkts = 0;
514
515 /*
516 * do not start another if currently transmitting, and more
517 * descriptors(tx slots) are needed for next packet.
518 */
519 if (!(ifp->if_flags & IFF_RUNNING0x40) || ifq_is_oactive(&ifp->if_snd))
520 return;
521
522 /* determine number of descriptors available */
523 if (sc->bce_txsnext >= sc->bce_txin)
524 txsfree = BCE_NTXDESC64 - 1 + sc->bce_txin - sc->bce_txsnext;
525 else
526 txsfree = sc->bce_txin - sc->bce_txsnext - 1;
527
528 /*
529 * Loop through the send queue, setting up transmit descriptors
530 * until we drain the queue, or use up all available transmit
531 * descriptors.
532 */
533 while (txsfree > 0) {
534
535 /* Grab a packet off the queue. */
536 m0 = ifq_dequeue(&ifp->if_snd);
537 if (m0 == NULL((void *)0))
538 break;
539
540 /*
541 * copy mbuf chain into DMA memory buffer.
542 */
543 m_copydata(m0, 0, m0->m_pkthdrM_dat.MH.MH_pkthdr.len, sc->bce_data +
544 (sc->bce_txsnext + BCE_NRXDESC64) * MCLBYTES(1 << 11));
545 ctrl = m0->m_pkthdrM_dat.MH.MH_pkthdr.len & CTRL_BC_MASK0x1fff;
546 ctrl |= CTRL_SOF0x80000000 | CTRL_EOF0x40000000 | CTRL_IOC0x20000000;
547
548#if NBPFILTER1 > 0
549 /* Pass the packet to any BPF listeners. */
550 if (ifp->if_bpf)
551 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT(1 << 1));
552#endif
553 /* mbuf no longer needed */
554 m_freem(m0);
555
556 /* Sync the data DMA map. */
557 bus_dmamap_sync(sc->bce_dmatag, sc->bce_txdata_map,(*(sc->bce_dmatag)->_dmamap_sync)((sc->bce_dmatag), (
sc->bce_txdata_map), (sc->bce_txsnext * (1 << 11)
), ((1 << 11)), (0x04))
558 sc->bce_txsnext * MCLBYTES, MCLBYTES, BUS_DMASYNC_PREWRITE)(*(sc->bce_dmatag)->_dmamap_sync)((sc->bce_dmatag), (
sc->bce_txdata_map), (sc->bce_txsnext * (1 << 11)
), ((1 << 11)), (0x04))
;
559
560 /* Initialize the transmit descriptor(s). */
561 txstart = sc->bce_txsnext;
562
563 if (sc->bce_txsnext == BCE_NTXDESC64 - 1)
564 ctrl |= CTRL_EOT0x10000000;
565 sc->bce_tx_ring[sc->bce_txsnext].ctrl = htole32(ctrl)((__uint32_t)(ctrl));
566 sc->bce_tx_ring[sc->bce_txsnext].addr =
567 htole32(sc->bce_txdata_map->dm_segs[0].ds_addr +((__uint32_t)(sc->bce_txdata_map->dm_segs[0].ds_addr + sc
->bce_txsnext * (1 << 11) + 0x40000000))
568 sc->bce_txsnext * MCLBYTES + 0x40000000)((__uint32_t)(sc->bce_txdata_map->dm_segs[0].ds_addr + sc
->bce_txsnext * (1 << 11) + 0x40000000))
; /* MAGIC */
569 if (sc->bce_txsnext + 1 > BCE_NTXDESC64 - 1)
570 sc->bce_txsnext = 0;
571 else
572 sc->bce_txsnext++;
573 txsfree--;
574
575 /* sync descriptors being used */
576 bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,(*(sc->bce_dmatag)->_dmamap_sync)((sc->bce_dmatag), (
sc->bce_ring_map), (sizeof(struct bce_dma_slot) * txstart +
(1 << 12)), (sizeof(struct bce_dma_slot)), (0x01 | 0x04
))
577 sizeof(struct bce_dma_slot) * txstart + PAGE_SIZE,(*(sc->bce_dmatag)->_dmamap_sync)((sc->bce_dmatag), (
sc->bce_ring_map), (sizeof(struct bce_dma_slot) * txstart +
(1 << 12)), (sizeof(struct bce_dma_slot)), (0x01 | 0x04
))
578 sizeof(struct bce_dma_slot),(*(sc->bce_dmatag)->_dmamap_sync)((sc->bce_dmatag), (
sc->bce_ring_map), (sizeof(struct bce_dma_slot) * txstart +
(1 << 12)), (sizeof(struct bce_dma_slot)), (0x01 | 0x04
))
579 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->bce_dmatag)->_dmamap_sync)((sc->bce_dmatag), (
sc->bce_ring_map), (sizeof(struct bce_dma_slot) * txstart +
(1 << 12)), (sizeof(struct bce_dma_slot)), (0x01 | 0x04
))
;
580
581 /* Give the packet to the chip. */
582 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_DPTR,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0208)
, (sc->bce_txsnext * sizeof(struct bce_dma_slot))))
583 sc->bce_txsnext * sizeof(struct bce_dma_slot))((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0208)
, (sc->bce_txsnext * sizeof(struct bce_dma_slot))))
;
584
585 newpkts++;
586 }
587 if (txsfree == 0) {
588 /* No more slots left; notify upper layer. */
589 ifq_set_oactive(&ifp->if_snd);
590 }
591 if (newpkts) {
592 /* Set a watchdog timer in case the chip flakes out. */
593 ifp->if_timer = 5;
594 }
595}
596
597/* Watchdog timer handler. */
598void
599bce_watchdog(struct ifnet *ifp)
600{
601 struct bce_softc *sc = ifp->if_softc;
602
603 printf("%s: device timeout\n", sc->bce_dev.dv_xname);
604 ifp->if_oerrorsif_data.ifi_oerrors++;
605
606 (void) bce_init(ifp);
607
608 /* Try to get more packets going. */
609 bce_start(ifp);
610}
611
612int
613bce_intr(void *xsc)
614{
615 struct bce_softc *sc;
616 struct ifnet *ifp;
617 u_int32_t intstatus;
618 int wantinit;
619 int handled = 0;
620
621 sc = xsc;
622 ifp = &sc->bce_ac.ac_if;
623
624
625 for (wantinit = 0; wantinit == 0;) {
626 intstatus = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0020))
)
627 BCE_INT_STS)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0020))
)
;
628
629 /* ignore if not ours, or unsolicited interrupts */
630 intstatus &= sc->bce_intmask;
631 if (intstatus == 0)
632 break;
633
634 handled = 1;
635
636 /* Ack interrupt */
637 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_STS,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0020)
, (intstatus)))
638 intstatus)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0020)
, (intstatus)))
;
639
640 /* Receive interrupts. */
641 if (intstatus & I_RI0x00010000)
642 bce_rxintr(sc);
643 /* Transmit interrupts. */
644 if (intstatus & I_XI0x01000000)
645 bce_txintr(sc);
646 /* Error interrupts */
647 if (intstatus & ~(I_RI0x00010000 | I_XI0x01000000)) {
648 if (intstatus & I_XU0x00008000)
649 printf("%s: transmit fifo underflow\n",
650 sc->bce_dev.dv_xname);
651 if (intstatus & I_RO0x00004000) {
652 printf("%s: receive fifo overflow\n",
653 sc->bce_dev.dv_xname);
654 ifp->if_ierrorsif_data.ifi_ierrors++;
655 }
656 if (intstatus & I_RU0x00002000)
657 printf("%s: receive descriptor underflow\n",
658 sc->bce_dev.dv_xname);
659 if (intstatus & I_DE0x00001000)
660 printf("%s: descriptor protocol error\n",
661 sc->bce_dev.dv_xname);
662 if (intstatus & I_PD0x00000800)
663 printf("%s: data error\n",
664 sc->bce_dev.dv_xname);
665 if (intstatus & I_PC0x00000400)
666 printf("%s: descriptor error\n",
667 sc->bce_dev.dv_xname);
668 if (intstatus & I_TO0x00000080)
669 printf("%s: general purpose timeout\n",
670 sc->bce_dev.dv_xname);
671 wantinit = 1;
672 }
673 }
674
675 if (handled) {
676 if (wantinit)
677 bce_init(ifp);
678 /* Try to get more packets going. */
679 bce_start(ifp);
680 }
681 return (handled);
682}
683
684/* Receive interrupt handler */
685void
686bce_rxintr(struct bce_softc *sc)
687{
688 struct ifnet *ifp = &sc->bce_ac.ac_if;
689 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
690 struct rx_pph *pph;
691 struct mbuf *m;
692 int curr;
693 int len;
694 int i;
695
696 /* get pointer to active receive slot */
697 curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXSTATUS)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x021C))
)
698 & RS_CD_MASK0x0fff;
699 curr = curr / sizeof(struct bce_dma_slot);
700 if (curr >= BCE_NRXDESC64)
701 curr = BCE_NRXDESC64 - 1;
702
703 /* process packets up to but not current packet being worked on */
704 for (i = sc->bce_rxin; i != curr; i = (i + 1) % BCE_NRXDESC64) {
705 /* complete any post dma memory ops on packet */
706 bus_dmamap_sync(sc->bce_dmatag, sc->bce_rxdata_map,(*(sc->bce_dmatag)->_dmamap_sync)((sc->bce_dmatag), (
sc->bce_rxdata_map), (i * (1 << 11)), ((1 << 11
)), (0x02))
707 i * MCLBYTES, MCLBYTES, BUS_DMASYNC_POSTREAD)(*(sc->bce_dmatag)->_dmamap_sync)((sc->bce_dmatag), (
sc->bce_rxdata_map), (i * (1 << 11)), ((1 << 11
)), (0x02))
;
708
709 /*
710 * If the packet had an error, simply recycle the buffer,
711 * resetting the len, and flags.
712 */
713 pph = (struct rx_pph *)(sc->bce_data + i * MCLBYTES(1 << 11));
714 if (pph->flags & (RXF_NO0x8 | RXF_RXER0x4 | RXF_CRC0x2 | RXF_OV0x1)) {
715 ifp->if_ierrorsif_data.ifi_ierrors++;
716 pph->len = 0;
717 pph->flags = 0;
718 continue;
719 }
720 /* receive the packet */
721 len = pph->len;
722 if (len == 0)
723 continue; /* no packet if empty */
724 pph->len = 0;
725 pph->flags = 0;
726
727 /*
728 * The chip includes the CRC with every packet. Trim
729 * it off here.
730 */
731 len -= ETHER_CRC_LEN4;
732
733 m = m_devget(sc->bce_data + i * MCLBYTES(1 << 11) +
734 BCE_PREPKT_HEADER_SIZE30, len, ETHER_ALIGN2);
735
736 ml_enqueue(&ml, m);
737
738 /* re-check current in case it changed */
739 curr = (bus_space_read_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->read_4((sc->bce_bhandle), (0x021C))
)
740 BCE_DMA_RXSTATUS)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x021C))
)
& RS_CD_MASK0x0fff) /
741 sizeof(struct bce_dma_slot);
742 if (curr >= BCE_NRXDESC64)
743 curr = BCE_NRXDESC64 - 1;
744 }
745
746 if_input(ifp, &ml);
747
748 sc->bce_rxin = curr;
749}
750
751/* Transmit interrupt handler */
752void
753bce_txintr(struct bce_softc *sc)
754{
755 struct ifnet *ifp = &sc->bce_ac.ac_if;
756 int curr;
757 int i;
758
759 ifq_clr_oactive(&ifp->if_snd);
760
761 /*
762 * Go through the Tx list and free mbufs for those
763 * frames which have been transmitted.
764 */
765 curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->read_4((sc->bce_bhandle), (0x020C))
)
766 BCE_DMA_TXSTATUS)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x020C))
)
& RS_CD_MASK0x0fff;
767 curr = curr / sizeof(struct bce_dma_slot);
768 if (curr >= BCE_NTXDESC64)
769 curr = BCE_NTXDESC64 - 1;
770 for (i = sc->bce_txin; i != curr; i = (i + 1) % BCE_NTXDESC64) {
771 /* do any post dma memory ops on transmit data */
772 bus_dmamap_sync(sc->bce_dmatag, sc->bce_txdata_map,(*(sc->bce_dmatag)->_dmamap_sync)((sc->bce_dmatag), (
sc->bce_txdata_map), (i * (1 << 11)), ((1 << 11
)), (0x08))
773 i * MCLBYTES, MCLBYTES, BUS_DMASYNC_POSTWRITE)(*(sc->bce_dmatag)->_dmamap_sync)((sc->bce_dmatag), (
sc->bce_txdata_map), (i * (1 << 11)), ((1 << 11
)), (0x08))
;
774 }
775 sc->bce_txin = curr;
776
777 /*
778 * If there are no more pending transmissions, cancel the watchdog
779 * timer
780 */
781 if (sc->bce_txsnext == sc->bce_txin)
782 ifp->if_timer = 0;
783}
784
785/* initialize the interface */
786int
787bce_init(struct ifnet *ifp)
788{
789 struct bce_softc *sc = ifp->if_softc;
790 u_int32_t reg_win;
791 int i;
792
793 /* Cancel any pending I/O. */
794 bce_stop(ifp);
795
796 /* enable pci interrupts, bursts, and prefetch */
797
798 /* remap the pci registers to the Sonics config registers */
799
800 /* save the current map, so it can be restored */
801 reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
802 BCE_REG_WIN0x80);
803
804 /* set register window to Sonics registers */
805 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN0x80,
806 BCE_SONICS_WIN0x18002000);
807
808 /* enable SB to PCI interrupt */
809 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f94)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f94
))) | 0x02)))
810 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC) |((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f94)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f94
))) | 0x02)))
811 SBIV_ENET0)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f94)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f94
))) | 0x02)))
;
812
813 /* enable prefetch and bursts for sonics-to-pci translation 2 */
814 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0108)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0108
))) | 0x4 | 0x8)))
815 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2) |((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0108)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0108
))) | 0x4 | 0x8)))
816 SBTOPCI_PREF | SBTOPCI_BURST)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0108)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0108
))) | 0x4 | 0x8)))
;
817
818 /* restore to ethernet register space */
819 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN0x80,
820 reg_win);
821
822 /* Reset the chip to a known state. */
823 bce_reset(sc);
824
825 /* Initialize transmit descriptors */
826 memset(sc->bce_tx_ring, 0, BCE_NTXDESC * sizeof(struct bce_dma_slot))__builtin_memset((sc->bce_tx_ring), (0), (64 * sizeof(struct
bce_dma_slot)))
;
827 sc->bce_txsnext = 0;
828 sc->bce_txin = 0;
829
830 /* enable crc32 generation and set proper LED modes */
831 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x00A8)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x00A8
))) | 0x00000001 | 0x000000e0)))
832 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) |((sc->bce_btag)->write_4((sc->bce_bhandle), (0x00A8)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x00A8
))) | 0x00000001 | 0x000000e0)))
833 BCE_EMC_CRC32_ENAB | BCE_EMC_LED)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x00A8)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x00A8
))) | 0x00000001 | 0x000000e0)))
;
834
835 /* reset or clear powerdown control bit */
836 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x00A8)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x00A8
))) & ~0x00000004)))
837 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) &((sc->bce_btag)->write_4((sc->bce_bhandle), (0x00A8)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x00A8
))) & ~0x00000004)))
838 ~BCE_EMC_PDOWN)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x00A8)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x00A8
))) & ~0x00000004)))
;
839
840 /* setup DMA interrupt control */
841 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, 1 << 24)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0100)
, (1 << 24)))
; /* MAGIC */
842
843 /* program promiscuous mode and multicast filters */
844 bce_iff(ifp);
845
846 /* set max frame length, account for possible VLAN tag */
847 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_MAX,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0404)
, (1518 + 4)))
848 ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0404)
, (1518 + 4)))
;
849 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_MAX,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0408)
, (1518 + 4)))
850 ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0408)
, (1518 + 4)))
;
851
852 /* set tx watermark */
853 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_WATER, 56)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0434)
, (56)))
;
854
855 /* enable transmit */
856 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, XC_XE)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0200)
, (0x1)))
;
857 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXADDR,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0204)
, (sc->bce_ring_map->dm_segs[0].ds_addr + (1 << 12
) + 0x40000000)))
858 sc->bce_ring_map->dm_segs[0].ds_addr + PAGE_SIZE + 0x40000000)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0204)
, (sc->bce_ring_map->dm_segs[0].ds_addr + (1 << 12
) + 0x40000000)))
; /* MAGIC */
859
860 /*
861 * Give the receive ring to the chip, and
862 * start the receive DMA engine.
863 */
864 sc->bce_rxin = 0;
865
866 /* clear the rx descriptor ring */
867 memset(sc->bce_rx_ring, 0, BCE_NRXDESC * sizeof(struct bce_dma_slot))__builtin_memset((sc->bce_rx_ring), (0), (64 * sizeof(struct
bce_dma_slot)))
;
868 /* enable receive */
869 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0210)
, (30 << 1 | 0x1)))
870 BCE_PREPKT_HEADER_SIZE << 1 | XC_XE)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0210)
, (30 << 1 | 0x1)))
;
871 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXADDR,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0214)
, (sc->bce_ring_map->dm_segs[0].ds_addr + 0x40000000)))
872 sc->bce_ring_map->dm_segs[0].ds_addr + 0x40000000)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0214)
, (sc->bce_ring_map->dm_segs[0].ds_addr + 0x40000000)))
; /* MAGIC */
873
874 /* Initialize receive descriptors */
875 for (i = 0; i < BCE_NRXDESC64; i++)
876 bce_add_rxbuf(sc, i);
877
878 /* Enable interrupts */
879 sc->bce_intmask =
880 I_XI0x01000000 | I_RI0x00010000 | I_XU0x00008000 | I_RO0x00004000 | I_RU0x00002000 | I_DE0x00001000 | I_PD0x00000800 | I_PC0x00000400 | I_TO0x00000080;
881 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0024)
, (sc->bce_intmask)))
882 sc->bce_intmask)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0024)
, (sc->bce_intmask)))
;
883
884 /* start the receive dma */
885 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXDPTR,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0218)
, (64 * sizeof(struct bce_dma_slot))))
886 BCE_NRXDESC * sizeof(struct bce_dma_slot))((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0218)
, (64 * sizeof(struct bce_dma_slot))))
;
887
888 /* set media */
889 mii_mediachg(&sc->bce_mii);
890
891 /* turn on the ethernet mac */
892 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x042C)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x042C
))) | 0x00000001)))
893 bus_space_read_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x042C)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x042C
))) | 0x00000001)))
894 BCE_ENET_CTL) | EC_EE)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x042C)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x042C
))) | 0x00000001)))
;
895
896 /* start timer */
897 timeout_add_sec(&sc->bce_timeout, 1);
898
899 /* mark as running, and no outputs active */
900 ifp->if_flags |= IFF_RUNNING0x40;
901 ifq_clr_oactive(&ifp->if_snd);
902
903 return 0;
904}
905
906/* add a mac address to packet filter */
907void
908bce_add_mac(struct bce_softc *sc, u_int8_t *mac, unsigned long idx)
909{
910 int i;
911 u_int32_t rval;
912
913 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_LOW,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0420)
, (mac[2] << 24 | mac[3] << 16 | mac[4] << 8
| mac[5])))
914 mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5])((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0420)
, (mac[2] << 24 | mac[3] << 16 | mac[4] << 8
| mac[5])))
;
915 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_HI,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0424)
, (mac[0] << 8 | mac[1] | 0x10000)))
916 mac[0] << 8 | mac[1] | 0x10000)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0424)
, (mac[0] << 8 | mac[1] | 0x10000)))
; /* MAGIC */
917 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0428)
, (idx << 16 | 8)))
918 idx << 16 | 8)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0428)
, (idx << 16 | 8)))
; /* MAGIC */
919 /* wait for write to complete */
920 for (i = 0; i < 100; i++) {
921 rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0428))
)
922 BCE_FILT_CTL)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0428))
)
;
923 if (!(rval & 0x80000000)) /* MAGIC */
924 break;
925 delay(10)(*delay_func)(10);
926 }
927 if (i == 100) {
928 printf("%s: timed out writing pkt filter ctl\n",
929 sc->bce_dev.dv_xname);
930 }
931}
932
933/* Add a receive buffer to the indicated descriptor. */
934void
935bce_add_rxbuf(struct bce_softc *sc, int idx)
936{
937 struct bce_dma_slot *bced = &sc->bce_rx_ring[idx];
938
939 bus_dmamap_sync(sc->bce_dmatag, sc->bce_rxdata_map, idx * MCLBYTES,(*(sc->bce_dmatag)->_dmamap_sync)((sc->bce_dmatag), (
sc->bce_rxdata_map), (idx * (1 << 11)), ((1 <<
11)), (0x01))
940 MCLBYTES, BUS_DMASYNC_PREREAD)(*(sc->bce_dmatag)->_dmamap_sync)((sc->bce_dmatag), (
sc->bce_rxdata_map), (idx * (1 << 11)), ((1 <<
11)), (0x01))
;
941
942 *(u_int32_t *)(sc->bce_data + idx * MCLBYTES(1 << 11)) = 0;
943 bced->addr = htole32(sc->bce_rxdata_map->dm_segs[0].ds_addr +((__uint32_t)(sc->bce_rxdata_map->dm_segs[0].ds_addr + idx
* (1 << 11) + 0x40000000))
944 idx * MCLBYTES + 0x40000000)((__uint32_t)(sc->bce_rxdata_map->dm_segs[0].ds_addr + idx
* (1 << 11) + 0x40000000))
;
945 if (idx != (BCE_NRXDESC64 - 1))
946 bced->ctrl = htole32(BCE_RXBUF_LEN)((__uint32_t)(((1 << 11) - 4)));
947 else
948 bced->ctrl = htole32(BCE_RXBUF_LEN | CTRL_EOT)((__uint32_t)(((1 << 11) - 4) | 0x10000000));
949
950 bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,(*(sc->bce_dmatag)->_dmamap_sync)((sc->bce_dmatag), (
sc->bce_ring_map), (sizeof(struct bce_dma_slot) * idx), (sizeof
(struct bce_dma_slot)), (0x01|0x04))
951 sizeof(struct bce_dma_slot) * idx,(*(sc->bce_dmatag)->_dmamap_sync)((sc->bce_dmatag), (
sc->bce_ring_map), (sizeof(struct bce_dma_slot) * idx), (sizeof
(struct bce_dma_slot)), (0x01|0x04))
952 sizeof(struct bce_dma_slot),(*(sc->bce_dmatag)->_dmamap_sync)((sc->bce_dmatag), (
sc->bce_ring_map), (sizeof(struct bce_dma_slot) * idx), (sizeof
(struct bce_dma_slot)), (0x01|0x04))
953 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->bce_dmatag)->_dmamap_sync)((sc->bce_dmatag), (
sc->bce_ring_map), (sizeof(struct bce_dma_slot) * idx), (sizeof
(struct bce_dma_slot)), (0x01|0x04))
;
954
955}
956
957/* Stop transmission on the interface */
958void
959bce_stop(struct ifnet *ifp)
960{
961 struct bce_softc *sc = ifp->if_softc;
962 int i;
963 u_int32_t val;
964
965 /* Stop the 1 second timer */
966 timeout_del(&sc->bce_timeout);
967
968 /* Mark the interface down and cancel the watchdog timer. */
969 ifp->if_flags &= ~IFF_RUNNING0x40;
970 ifq_clr_oactive(&ifp->if_snd);
971 ifp->if_timer = 0;
972
973 /* Down the MII. */
974 mii_down(&sc->bce_mii);
975
976 /* Disable interrupts. */
977 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, 0)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0024)
, (0)))
;
978 sc->bce_intmask = 0;
979 delay(10)(*delay_func)(10);
980
981 /* Disable emac */
982 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, EC_ED)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x042C)
, (0x00000002)))
;
983 for (i = 0; i < 200; i++) {
984 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->read_4((sc->bce_bhandle), (0x042C))
)
985 BCE_ENET_CTL)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x042C))
)
;
986 if (!(val & EC_ED0x00000002))
987 break;
988 delay(10)(*delay_func)(10);
989 }
990
991 /* Stop the DMA */
992 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, 0)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0210)
, (0)))
;
993 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 0)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0200)
, (0)))
;
994 delay(10)(*delay_func)(10);
995}
996
997/* reset the chip */
998void
999bce_reset(struct bce_softc *sc)
1000{
1001 u_int32_t val;
1002 u_int32_t sbval;
1003 int i;
1004
1005 /* if SB core is up */
1006 sbval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f98))
)
1007 BCE_SBTMSTATELOW)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f98))
)
;
1008 if ((sbval & (SBTML_RESET0x1 | SBTML_REJ0x2 | SBTML_CLK0x10000)) == SBTML_CLK0x10000) {
1009 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0100)
, (0)))
1010 0)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0100)
, (0)))
;
1011
1012 /* disable emac */
1013 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x042C)
, (0x00000002)))
1014 EC_ED)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x042C)
, (0x00000002)))
;
1015 for (i = 0; i < 200; i++) {
1016 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->read_4((sc->bce_bhandle), (0x042C))
)
1017 BCE_ENET_CTL)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x042C))
)
;
1018 if (!(val & EC_ED0x00000002))
1019 break;
1020 delay(10)(*delay_func)(10);
1021 }
1022 if (i == 200)
1023 printf("%s: timed out disabling ethernet mac\n",
1024 sc->bce_dev.dv_xname);
1025
1026 /* reset the dma engines */
1027 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0200)
, (0)))
1028 0)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0200)
, (0)))
;
1029 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->read_4((sc->bce_bhandle), (0x021C))
)
1030 BCE_DMA_RXSTATUS)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x021C))
)
;
1031 /* if error on receive, wait to go idle */
1032 if (val & RS_ERROR0xf0000) {
1033 for (i = 0; i < 100; i++) {
1034 val = bus_space_read_4(sc->bce_btag,((sc->bce_btag)->read_4((sc->bce_bhandle), (0x021C))
)
1035 sc->bce_bhandle, BCE_DMA_RXSTATUS)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x021C))
)
;
1036 if (val & RS_DMA_IDLE0x2000)
1037 break;
1038 delay(10)(*delay_func)(10);
1039 }
1040 if (i == 100)
1041 printf("%s: receive dma did not go idle after"
1042 " error\n", sc->bce_dev.dv_xname);
1043 }
1044 bus_space_write_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x021C)
, (0)))
1045 BCE_DMA_RXSTATUS, 0)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x021C)
, (0)))
;
1046
1047 /* reset ethernet mac */
1048 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x042C)
, (0x00000004)))
1049 EC_ES)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x042C)
, (0x00000004)))
;
1050 for (i = 0; i < 200; i++) {
1051 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->read_4((sc->bce_bhandle), (0x042C))
)
1052 BCE_ENET_CTL)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x042C))
)
;
1053 if (!(val & EC_ES0x00000004))
1054 break;
1055 delay(10)(*delay_func)(10);
1056 }
1057 if (i == 200)
1058 printf("%s: timed out resetting ethernet mac\n",
1059 sc->bce_dev.dv_xname);
1060 } else {
1061 u_int32_t reg_win;
1062
1063 /* remap the pci registers to the Sonics config registers */
1064
1065 /* save the current map, so it can be restored */
1066 reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
1067 BCE_REG_WIN0x80);
1068 /* set register window to Sonics registers */
1069 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
1070 BCE_REG_WIN0x80, BCE_SONICS_WIN0x18002000);
1071
1072 /* enable SB to PCI interrupt */
1073 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f94)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f94
))) | 0x02)))
1074 bus_space_read_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f94)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f94
))) | 0x02)))
1075 BCE_SBINTVEC) | SBIV_ENET0)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f94)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f94
))) | 0x02)))
;
1076
1077 /* enable prefetch and bursts for sonics-to-pci translation 2 */
1078 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0108)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0108
))) | 0x4 | 0x8)))
1079 bus_space_read_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0108)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0108
))) | 0x4 | 0x8)))
1080 BCE_SPCI_TR2) | SBTOPCI_PREF | SBTOPCI_BURST)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0108)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0108
))) | 0x4 | 0x8)))
;
1081
1082 /* restore to ethernet register space */
1083 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN0x80,
1084 reg_win);
1085 }
1086
1087 /* disable SB core if not in reset */
1088 if (!(sbval & SBTML_RESET0x1)) {
1089
1090 /* set the reject bit */
1091 bus_space_write_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f98)
, (0x2 | 0x10000)))
1092 BCE_SBTMSTATELOW, SBTML_REJ | SBTML_CLK)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f98)
, (0x2 | 0x10000)))
;
1093 for (i = 0; i < 200; i++) {
1094 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f98))
)
1095 BCE_SBTMSTATELOW)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f98))
)
;
1096 if (val & SBTML_REJ0x2)
1097 break;
1098 delay(1)(*delay_func)(1);
1099 }
1100 if (i == 200)
1101 printf("%s: while resetting core, reject did not set\n",
1102 sc->bce_dev.dv_xname);
1103 /* wait until busy is clear */
1104 for (i = 0; i < 200; i++) {
1105 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f9C))
)
1106 BCE_SBTMSTATEHI)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f9C))
)
;
1107 if (!(val & 0x4))
1108 break;
1109 delay(1)(*delay_func)(1);
1110 }
1111 if (i == 200)
1112 printf("%s: while resetting core, busy did not clear\n",
1113 sc->bce_dev.dv_xname);
1114 /* set reset and reject while enabling the clocks */
1115 bus_space_write_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f98)
, (0x20000 | 0x10000 | 0x2 | 0x1)))
1116 BCE_SBTMSTATELOW,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f98)
, (0x20000 | 0x10000 | 0x2 | 0x1)))
1117 SBTML_FGC | SBTML_CLK | SBTML_REJ | SBTML_RESET)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f98)
, (0x20000 | 0x10000 | 0x2 | 0x1)))
;
1118 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f98))
)
1119 BCE_SBTMSTATELOW)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f98))
)
;
1120 delay(10)(*delay_func)(10);
1121 bus_space_write_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f98)
, (0x2 | 0x1)))
1122 BCE_SBTMSTATELOW, SBTML_REJ | SBTML_RESET)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f98)
, (0x2 | 0x1)))
;
1123 delay(1)(*delay_func)(1);
1124 }
1125 /* enable clock */
1126 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f98)
, (0x20000 | 0x10000 | 0x1)))
1127 SBTML_FGC | SBTML_CLK | SBTML_RESET)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f98)
, (0x20000 | 0x10000 | 0x1)))
;
1128 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f98))
)
;
1129 delay(1)(*delay_func)(1);
1130
1131 /* clear any error bits that may be on */
1132 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f9C))
)
;
1133 if (val & 1)
1134 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f9C)
, (0)))
1135 0)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f9C)
, (0)))
;
1136 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f90))
)
;
1137 if (val & SBIM_ERRORBITS(0x20000|0x40000))
1138 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f90)
, (val & ~(0x20000|0x40000))))
1139 val & ~SBIM_ERRORBITS)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f90)
, (val & ~(0x20000|0x40000))))
;
1140
1141 /* clear reset and allow it to propagate throughout the core */
1142 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f98)
, (0x20000 | 0x10000)))
1143 SBTML_FGC | SBTML_CLK)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f98)
, (0x20000 | 0x10000)))
;
1144 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f98))
)
;
1145 delay(1)(*delay_func)(1);
1146
1147 /* leave clock enabled */
1148 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f98)
, (0x10000)))
1149 SBTML_CLK)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0f98)
, (0x10000)))
;
1150 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0f98))
)
;
Value stored to 'val' is never read
1151 delay(1)(*delay_func)(1);
1152
1153 /* initialize MDC preamble, frequency */
1154 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_CTL, 0x8d)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0410)
, (0x8d)))
; /* MAGIC */
1155
1156 /* enable phy, differs for internal, and external */
1157 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0000))
)
;
1158 if (!(val & BCE_DC_IP0x00000400)) {
1159 /* select external phy */
1160 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x042C)
, (0x00000008)))
1161 EC_EP)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x042C)
, (0x00000008)))
;
1162 } else if (val & BCE_DC_ER0x00008000) { /* internal, clear reset bit if on */
1163 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0000)
, (val & ~0x00008000)))
1164 val & ~BCE_DC_ER)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0000)
, (val & ~0x00008000)))
;
1165 delay(100)(*delay_func)(100);
1166 }
1167}
1168
1169/* Set up the receive filter. */
1170void
1171bce_iff(struct ifnet *ifp)
1172{
1173 struct bce_softc *sc = ifp->if_softc;
1174 struct arpcom *ac = &sc->bce_ac;
1175 u_int32_t rxctl;
1176
1177 rxctl = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0400))
)
;
1178 rxctl &= ~(ERC_AM0x00000002 | ERC_DB0x00000001 | ERC_PE0x00000008);
1179 ifp->if_flags |= IFF_ALLMULTI0x200;
1180
1181 /* disable the filter */
1182 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL, 0)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0428)
, (0)))
;
1183
1184 /* add our own address */
1185 bce_add_mac(sc, ac->ac_enaddr, 0);
1186
1187 if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multicnt > 0) {
1188 ifp->if_flags |= IFF_ALLMULTI0x200;
1189 if (ifp->if_flags & IFF_PROMISC0x100)
1190 rxctl |= ERC_PE0x00000008;
1191 else
1192 rxctl |= ERC_AM0x00000002;
1193 }
1194
1195 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL, rxctl)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0400)
, (rxctl)))
;
1196
1197 /* enable the filter */
1198 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0428)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0428
))) | 1)))
1199 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL) | 1)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0428)
, (((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0428
))) | 1)))
;
1200}
1201
1202/* Read a PHY register on the MII. */
1203int
1204bce_mii_read(struct device *self, int phy, int reg)
1205{
1206 struct bce_softc *sc = (struct bce_softc *) self;
1207 int i;
1208 u_int32_t val;
1209
1210 /* clear mii_int */
1211 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x041C)
, (0x00000001)))
1212 BCE_MIINTR)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x041C)
, (0x00000001)))
;
1213
1214 /* Read the PHY register */
1215 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0414)
, ((0x02 << 28) | (0x01 << 30) | (0x02 << 16
) | ((phy & 0x1F) << 23) | ((reg & 0x1F) <<
18))))
1216 (MII_COMMAND_READ << 28) | (MII_COMMAND_START << 30) | /* MAGIC */((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0414)
, ((0x02 << 28) | (0x01 << 30) | (0x02 << 16
) | ((phy & 0x1F) << 23) | ((reg & 0x1F) <<
18))))
1217 (MII_COMMAND_ACK << 16) | BCE_MIPHY(phy) | BCE_MIREG(reg))((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0414)
, ((0x02 << 28) | (0x01 << 30) | (0x02 << 16
) | ((phy & 0x1F) << 23) | ((reg & 0x1F) <<
18))))
; /* MAGIC */
1218
1219 for (i = 0; i < BCE_TIMEOUT100; i++) {
1220 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->read_4((sc->bce_bhandle), (0x041C))
)
1221 BCE_MI_STS)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x041C))
)
;
1222 if (val & BCE_MIINTR0x00000001)
1223 break;
1224 delay(10)(*delay_func)(10);
1225 }
1226 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0414))
)
;
1227 if (i == BCE_TIMEOUT100) {
1228 printf("%s: PHY read timed out reading phy %d, reg %d, val = "
1229 "0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val);
1230 return (0);
1231 }
1232 return (val & BCE_MICOMM_DATA0x0000FFFF);
1233}
1234
1235/* Write a PHY register on the MII */
1236void
1237bce_mii_write(struct device *self, int phy, int reg, int val)
1238{
1239 struct bce_softc *sc = (struct bce_softc *) self;
1240 int i;
1241 u_int32_t rval;
1242
1243 /* clear mii_int */
1244 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x041C)
, (0x00000001)))
1245 BCE_MIINTR)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x041C)
, (0x00000001)))
;
1246
1247 /* Write the PHY register */
1248 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0414)
, ((0x01 << 28) | (0x01 << 30) | (0x02 << 16
) | (val & 0x0000FFFF) | ((phy & 0x1F) << 23) |
((reg & 0x1F) << 18))))
1249 (MII_COMMAND_WRITE << 28) | (MII_COMMAND_START << 30) | /* MAGIC */((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0414)
, ((0x01 << 28) | (0x01 << 30) | (0x02 << 16
) | (val & 0x0000FFFF) | ((phy & 0x1F) << 23) |
((reg & 0x1F) << 18))))
1250 (MII_COMMAND_ACK << 16) | (val & BCE_MICOMM_DATA) | /* MAGIC */((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0414)
, ((0x01 << 28) | (0x01 << 30) | (0x02 << 16
) | (val & 0x0000FFFF) | ((phy & 0x1F) << 23) |
((reg & 0x1F) << 18))))
1251 BCE_MIPHY(phy) | BCE_MIREG(reg))((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0414)
, ((0x01 << 28) | (0x01 << 30) | (0x02 << 16
) | (val & 0x0000FFFF) | ((phy & 0x1F) << 23) |
((reg & 0x1F) << 18))))
;
1252
1253 /* wait for write to complete */
1254 for (i = 0; i < BCE_TIMEOUT100; i++) {
1255 rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,((sc->bce_btag)->read_4((sc->bce_bhandle), (0x041C))
)
1256 BCE_MI_STS)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x041C))
)
;
1257 if (rval & BCE_MIINTR0x00000001)
1258 break;
1259 delay(10)(*delay_func)(10);
1260 }
1261 rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0414))
)
;
1262 if (i == BCE_TIMEOUT100) {
1263 printf("%s: PHY timed out writing phy %d, reg %d, val "
1264 "= 0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val);
1265 }
1266}
1267
1268/* sync hardware duplex mode to software state */
1269void
1270bce_statchg(struct device *self)
1271{
1272 struct bce_softc *sc = (struct bce_softc *) self;
1273 u_int32_t reg;
1274
1275 /* if needed, change register to match duplex mode */
1276 reg = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL)((sc->bce_btag)->read_4((sc->bce_bhandle), (0x0430))
)
;
1277 if (sc->bce_mii.mii_media_active & IFM_FDX0x0000010000000000ULL && !(reg & EXC_FD0x00000001))
1278 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0430)
, (reg | 0x00000001)))
1279 reg | EXC_FD)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0430)
, (reg | 0x00000001)))
;
1280 else if (!(sc->bce_mii.mii_media_active & IFM_FDX0x0000010000000000ULL) && reg & EXC_FD0x00000001)
1281 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL,((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0430)
, (reg & ~0x00000001)))
1282 reg & ~EXC_FD)((sc->bce_btag)->write_4((sc->bce_bhandle), (0x0430)
, (reg & ~0x00000001)))
;
1283
1284 /*
1285 * Enable activity led.
1286 * XXX This should be in a phy driver, but not currently.
1287 */
1288 bce_mii_write((struct device *) sc, 1, 26, /* MAGIC */
1289 bce_mii_read((struct device *) sc, 1, 26) & 0x7fff); /* MAGIC */
1290 /* enable traffic meter led mode */
1291 bce_mii_write((struct device *) sc, 1, 26, /* MAGIC */
1292 bce_mii_read((struct device *) sc, 1, 27) | (1 << 6)); /* MAGIC */
1293}
1294
1295/* Set hardware to newly-selected media */
1296int
1297bce_mediachange(struct ifnet *ifp)
1298{
1299 struct bce_softc *sc = ifp->if_softc;
1300
1301 if (ifp->if_flags & IFF_UP0x1)
1302 mii_mediachg(&sc->bce_mii);
1303 return (0);
1304}
1305
1306/* Get the current interface media status */
1307void
1308bce_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1309{
1310 struct bce_softc *sc = ifp->if_softc;
1311
1312 mii_pollstat(&sc->bce_mii);
1313 ifmr->ifm_active = sc->bce_mii.mii_media_active;
1314 ifmr->ifm_status = sc->bce_mii.mii_media_status;
1315}
1316
1317/* One second timer, checks link status */
1318void
1319bce_tick(void *v)
1320{
1321 struct bce_softc *sc = v;
1322 int s;
1323
1324 s = splnet()splraise(0x7);
1325 mii_tick(&sc->bce_mii);
1326 splx(s)spllower(s);
1327
1328 timeout_add_sec(&sc->bce_timeout, 1);
1329}