Bug Summary

File:dev/ic/dc.c
Warning:line 816, column 3
3rd function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name dc.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/ic/dc.c
1/* $OpenBSD: dc.c,v 1.154 2020/07/10 13:26:37 patrick Exp $ */
2
3/*
4 * Copyright (c) 1997, 1998, 1999
5 * Bill Paul <wpaul@ee.columbia.edu>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * $FreeBSD: src/sys/pci/if_dc.c,v 1.43 2001/01/19 23:55:07 wpaul Exp $
35 */
36
37/*
38 * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143
39 * series chips and several workalikes including the following:
40 *
41 * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com)
42 * Macronix/Lite-On 82c115 PNIC II (www.macronix.com)
43 * Lite-On 82c168/82c169 PNIC (www.litecom.com)
44 * ASIX Electronics AX88140A (www.asix.com.tw)
45 * ASIX Electronics AX88141 (www.asix.com.tw)
46 * ADMtek AL981 (www.admtek.com.tw)
47 * ADMtek AN983 (www.admtek.com.tw)
48 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com)
49 * Accton EN1217, EN2242 (www.accton.com)
50 * Xircom X3201 (www.xircom.com)
51 *
52 * Datasheets for the 21143 are available at developer.intel.com.
53 * Datasheets for the clone parts can be found at their respective sites.
54 * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.)
55 * The PNIC II is essentially a Macronix 98715A chip; the only difference
56 * worth noting is that its multicast hash table is only 128 bits wide
57 * instead of 512.
58 *
59 * Written by Bill Paul <wpaul@ee.columbia.edu>
60 * Electrical Engineering Department
61 * Columbia University, New York City
62 */
63
64/*
65 * The Intel 21143 is the successor to the DEC 21140. It is basically
66 * the same as the 21140 but with a few new features. The 21143 supports
67 * three kinds of media attachments:
68 *
69 * o MII port, for 10Mbps and 100Mbps support and NWAY
70 * autonegotiation provided by an external PHY.
71 * o SYM port, for symbol mode 100Mbps support.
72 * o 10baseT port.
73 * o AUI/BNC port.
74 *
75 * The 100Mbps SYM port and 10baseT port can be used together in
76 * combination with the internal NWAY support to create a 10/100
77 * autosensing configuration.
78 *
79 * Note that not all tulip workalikes are handled in this driver: we only
80 * deal with those which are relatively well behaved. The Winbond is
81 * handled separately due to its different register offsets and the
82 * special handling needed for its various bugs. The PNIC is handled
83 * here, but I'm not thrilled about it.
84 *
85 * All of the workalike chips use some form of MII transceiver support
86 * with the exception of the Macronix chips, which also have a SYM port.
87 * The ASIX AX88140A is also documented to have a SYM port, but all
88 * the cards I've seen use an MII transceiver, probably because the
89 * AX88140A doesn't support internal NWAY.
90 */
91
92#include "bpfilter.h"
93
94#include <sys/param.h>
95#include <sys/systm.h>
96#include <sys/mbuf.h>
97#include <sys/protosw.h>
98#include <sys/socket.h>
99#include <sys/ioctl.h>
100#include <sys/errno.h>
101#include <sys/malloc.h>
102#include <sys/kernel.h>
103#include <sys/device.h>
104#include <sys/timeout.h>
105
106#include <net/if.h>
107
108#include <netinet/in.h>
109#include <netinet/if_ether.h>
110
111#include <net/if_media.h>
112
113#if NBPFILTER1 > 0
114#include <net/bpf.h>
115#endif
116
117#include <dev/mii/mii.h>
118#include <dev/mii/miivar.h>
119
120#include <machine/bus.h>
121#include <dev/pci/pcidevs.h>
122
123#include <dev/ic/dcreg.h>
124
125/*
126 * The Davicom DM9102 has a broken DMA engine that reads beyond the
127 * end of the programmed transfer. Architectures with a proper IOMMU
128 * (such as sparc64) will trap on this access. To avoid having to
129 * copy each transmitted mbuf to guarantee enough trailing space,
130 * those architectures should implement BUS_DMA_OVERRUN that takes
131 * appropriate action to tolerate this behaviour.
132 */
133#ifndef BUS_DMA_OVERRUN0
134#define BUS_DMA_OVERRUN0 0
135#endif
136
137int dc_intr(void *);
138struct dc_type *dc_devtype(void *);
139int dc_newbuf(struct dc_softc *, int, struct mbuf *);
140int dc_encap(struct dc_softc *, bus_dmamap_t, struct mbuf *, u_int32_t *);
141
142void dc_pnic_rx_bug_war(struct dc_softc *, int);
143int dc_rx_resync(struct dc_softc *);
144int dc_rxeof(struct dc_softc *);
145void dc_txeof(struct dc_softc *);
146void dc_tick(void *);
147void dc_tx_underrun(struct dc_softc *);
148void dc_start(struct ifnet *);
149int dc_ioctl(struct ifnet *, u_long, caddr_t);
150void dc_watchdog(struct ifnet *);
151int dc_ifmedia_upd(struct ifnet *);
152void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *);
153
154void dc_delay(struct dc_softc *);
155void dc_eeprom_width(struct dc_softc *);
156void dc_eeprom_idle(struct dc_softc *);
157void dc_eeprom_putbyte(struct dc_softc *, int);
158void dc_eeprom_getword(struct dc_softc *, int, u_int16_t *);
159void dc_eeprom_getword_pnic(struct dc_softc *, int, u_int16_t *);
160void dc_eeprom_getword_xircom(struct dc_softc *, int, u_int16_t *);
161void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int);
162
163void dc_mii_writebit(struct dc_softc *, int);
164int dc_mii_readbit(struct dc_softc *);
165void dc_mii_sync(struct dc_softc *);
166void dc_mii_send(struct dc_softc *, u_int32_t, int);
167int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *);
168int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *);
169int dc_miibus_readreg(struct device *, int, int);
170void dc_miibus_writereg(struct device *, int, int, int);
171void dc_miibus_statchg(struct device *);
172
173void dc_setcfg(struct dc_softc *, uint64_t);
174u_int32_t dc_crc_le(struct dc_softc *, caddr_t);
175u_int32_t dc_crc_be(caddr_t)((ether_crc32_be(caddr_t,6) >> 26) & 0x0000003F);
176void dc_setfilt_21143(struct dc_softc *);
177void dc_setfilt_asix(struct dc_softc *);
178void dc_setfilt_admtek(struct dc_softc *);
179void dc_setfilt_xircom(struct dc_softc *);
180
181void dc_setfilt(struct dc_softc *);
182
183void dc_reset(struct dc_softc *);
184int dc_list_rx_init(struct dc_softc *);
185int dc_list_tx_init(struct dc_softc *);
186
187void dc_read_srom(struct dc_softc *, int);
188void dc_parse_21143_srom(struct dc_softc *);
189void dc_decode_leaf_sia(struct dc_softc *,
190 struct dc_eblock_sia *);
191void dc_decode_leaf_mii(struct dc_softc *,
192 struct dc_eblock_mii *);
193void dc_decode_leaf_sym(struct dc_softc *,
194 struct dc_eblock_sym *);
195void dc_apply_fixup(struct dc_softc *, uint64_t);
196
197#define DC_SETBIT(sc, reg, x)((sc->dc_btag)->write_4((sc->dc_bhandle), (reg), (((
sc->dc_btag)->read_4((sc->dc_bhandle), (reg))) | (x)
)))
\
198 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))((sc->dc_btag)->write_4((sc->dc_bhandle), (reg), (((
sc->dc_btag)->read_4((sc->dc_bhandle), (reg))) | (x)
)))
199
200#define DC_CLRBIT(sc, reg, x)((sc->dc_btag)->write_4((sc->dc_bhandle), (reg), (((
sc->dc_btag)->read_4((sc->dc_bhandle), (reg))) &
~(x))))
\
201 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))((sc->dc_btag)->write_4((sc->dc_bhandle), (reg), (((
sc->dc_btag)->read_4((sc->dc_bhandle), (reg))) &
~(x))))
202
203#define SIO_SET(x)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
(x)))))
DC_SETBIT(sc, DC_SIO, (x))((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
(x)))))
204#define SIO_CLR(x)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~((x)))))
DC_CLRBIT(sc, DC_SIO, (x))((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~((x)))))
205
206void
207dc_delay(struct dc_softc *sc)
208{
209 int idx;
210
211 for (idx = (300 / 33) + 1; idx > 0; idx--)
212 CSR_READ_4(sc, DC_BUSCTL)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x00)));
213}
214
215void
216dc_eeprom_width(struct dc_softc *sc)
217{
218 int i;
219
220 /* Force EEPROM to idle state. */
221 dc_eeprom_idle(sc);
222
223 /* Enter EEPROM access mode. */
224 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), (0x00000800
)))
;
225 dc_delay(sc);
226 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
0x00004000))))
;
227 dc_delay(sc);
228 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~(0x00000002))))
;
229 dc_delay(sc);
230 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
0x00000001))))
;
231 dc_delay(sc);
232
233 for (i = 3; i--;) {
234 if (6 & (1 << i))
235 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
0x00000004))))
;
236 else
237 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~(0x00000004))))
;
238 dc_delay(sc);
239 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
0x00000002))))
;
240 dc_delay(sc);
241 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~(0x00000002))))
;
242 dc_delay(sc);
243 }
244
245 for (i = 1; i <= 12; i++) {
246 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
0x00000002))))
;
247 dc_delay(sc);
248 if (!(CSR_READ_4(sc, DC_SIO)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) & DC_SIO_EE_DATAOUT0x00000008)) {
249 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~(0x00000002))))
;
250 dc_delay(sc);
251 break;
252 }
253 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~(0x00000002))))
;
254 dc_delay(sc);
255 }
256
257 /* Turn off EEPROM access mode. */
258 dc_eeprom_idle(sc);
259
260 if (i < 4 || i > 12)
261 sc->dc_romwidth = 6;
262 else
263 sc->dc_romwidth = i;
264
265 /* Enter EEPROM access mode. */
266 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), (0x00000800
)))
;
267 dc_delay(sc);
268 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
0x00004000))))
;
269 dc_delay(sc);
270 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~(0x00000002))))
;
271 dc_delay(sc);
272 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
0x00000001))))
;
273 dc_delay(sc);
274
275 /* Turn off EEPROM access mode. */
276 dc_eeprom_idle(sc);
277}
278
279void
280dc_eeprom_idle(struct dc_softc *sc)
281{
282 int i;
283
284 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), (0x00000800
)))
;
285 dc_delay(sc);
286 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
0x00004000))))
;
287 dc_delay(sc);
288 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~(0x00000002))))
;
289 dc_delay(sc);
290 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
0x00000001))))
;
291 dc_delay(sc);
292
293 for (i = 0; i < 25; i++) {
294 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~(0x00000002))))
;
295 dc_delay(sc);
296 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
0x00000002))))
;
297 dc_delay(sc);
298 }
299
300 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~(0x00000002))))
;
301 dc_delay(sc);
302 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~(0x00000001))))
;
303 dc_delay(sc);
304 CSR_WRITE_4(sc, DC_SIO, 0x00000000)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), (0x00000000
)))
;
305}
306
307/*
308 * Send a read command and address to the EEPROM, check for ACK.
309 */
310void
311dc_eeprom_putbyte(struct dc_softc *sc, int addr)
312{
313 int d, i;
314
315 d = DC_EECMD_READ0x180 >> 6;
316
317 for (i = 3; i--; ) {
318 if (d & (1 << i))
319 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
0x00000004))))
;
320 else
321 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~(0x00000004))))
;
322 dc_delay(sc);
323 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
0x00000002))))
;
324 dc_delay(sc);
325 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~(0x00000002))))
;
326 dc_delay(sc);
327 }
328
329 /*
330 * Feed in each bit and strobe the clock.
331 */
332 for (i = sc->dc_romwidth; i--;) {
333 if (addr & (1 << i)) {
334 SIO_SET(DC_SIO_EE_DATAIN)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
(0x00000004)))))
;
335 } else {
336 SIO_CLR(DC_SIO_EE_DATAIN)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~((0x00000004)))))
;
337 }
338 dc_delay(sc);
339 SIO_SET(DC_SIO_EE_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
(0x00000002)))))
;
340 dc_delay(sc);
341 SIO_CLR(DC_SIO_EE_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~((0x00000002)))))
;
342 dc_delay(sc);
343 }
344}
345
346/*
347 * Read a word of data stored in the EEPROM at address 'addr.'
348 * The PNIC 82c168/82c169 has its own non-standard way to read
349 * the EEPROM.
350 */
351void
352dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest)
353{
354 int i;
355 u_int32_t r;
356
357 CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x98), (0x00000600
|addr)))
;
358
359 for (i = 0; i < DC_TIMEOUT1000; i++) {
360 DELAY(1)(*delay_func)(1);
361 r = CSR_READ_4(sc, DC_SIO)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x48)));
362 if (!(r & DC_PN_SIOCTL_BUSY0x80000000)) {
363 *dest = (u_int16_t)(r & 0xFFFF);
364 return;
365 }
366 }
367}
368
369/*
370 * Read a word of data stored in the EEPROM at address 'addr.'
371 * The Xircom X3201 has its own non-standard way to read
372 * the EEPROM, too.
373 */
374void
375dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest)
376{
377 SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
(0x00001000 | 0x00004000)))))
;
378
379 addr *= 2;
380 CSR_WRITE_4(sc, DC_ROM, addr | 0x160)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x50), (addr
| 0x160)))
;
381 *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) & 0xff;
382 addr += 1;
383 CSR_WRITE_4(sc, DC_ROM, addr | 0x160)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x50), (addr
| 0x160)))
;
384 *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) & 0xff) << 8;
385
386 SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~((0x00001000 | 0x00004000)))))
;
387}
388
389/*
390 * Read a word of data stored in the EEPROM at address 'addr.'
391 */
392void
393dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest)
394{
395 int i;
396 u_int16_t word = 0;
397
398 /* Force EEPROM to idle state. */
399 dc_eeprom_idle(sc);
400
401 /* Enter EEPROM access mode. */
402 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), (0x00000800
)))
;
403 dc_delay(sc);
404 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
0x00004000))))
;
405 dc_delay(sc);
406 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~(0x00000002))))
;
407 dc_delay(sc);
408 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
0x00000001))))
;
409 dc_delay(sc);
410
411 /*
412 * Send address of word we want to read.
413 */
414 dc_eeprom_putbyte(sc, addr);
415
416 /*
417 * Start reading bits from EEPROM.
418 */
419 for (i = 0x8000; i; i >>= 1) {
420 SIO_SET(DC_SIO_EE_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
(0x00000002)))))
;
421 dc_delay(sc);
422 if (CSR_READ_4(sc, DC_SIO)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) & DC_SIO_EE_DATAOUT0x00000008)
423 word |= i;
424 dc_delay(sc);
425 SIO_CLR(DC_SIO_EE_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~((0x00000002)))))
;
426 dc_delay(sc);
427 }
428
429 /* Turn off EEPROM access mode. */
430 dc_eeprom_idle(sc);
431
432 *dest = word;
433}
434
435/*
436 * Read a sequence of words from the EEPROM.
437 */
438void
439dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt,
440 int swap)
441{
442 int i;
443 u_int16_t word = 0, *ptr;
444
445 for (i = 0; i < cnt; i++) {
446 if (DC_IS_PNIC(sc)(sc->dc_type == 0xA))
447 dc_eeprom_getword_pnic(sc, off + i, &word);
448 else if (DC_IS_XIRCOM(sc)(sc->dc_type == 0xB))
449 dc_eeprom_getword_xircom(sc, off + i, &word);
450 else
451 dc_eeprom_getword(sc, off + i, &word);
452 ptr = (u_int16_t *)(dest + (i * 2));
453 if (swap)
454 *ptr = betoh16(word)(__uint16_t)(__builtin_constant_p(word) ? (__uint16_t)(((__uint16_t
)(word) & 0xffU) << 8 | ((__uint16_t)(word) & 0xff00U
) >> 8) : __swap16md(word))
;
455 else
456 *ptr = letoh16(word)((__uint16_t)(word));
457 }
458}
459
460/*
461 * The following two routines are taken from the Macronix 98713
462 * Application Notes pp.19-21.
463 */
464/*
465 * Write a bit to the MII bus.
466 */
467void
468dc_mii_writebit(struct dc_softc *sc, int bit)
469{
470 if (bit)
471 CSR_WRITE_4(sc, DC_SIO,((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), (0x00002000
|0x00020000)))
472 DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), (0x00002000
|0x00020000)))
;
473 else
474 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), (0x00002000
)))
;
475
476 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
0x00010000))))
;
477 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~(0x00010000))))
;
478}
479
480/*
481 * Read a bit from the MII bus.
482 */
483int
484dc_mii_readbit(struct dc_softc *sc)
485{
486 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), (0x00004000
|0x00040000)))
;
487 CSR_READ_4(sc, DC_SIO)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x48)));
488 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) | (
0x00010000))))
;
489 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) &
~(0x00010000))))
;
490 if (CSR_READ_4(sc, DC_SIO)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x48))) & DC_SIO_MII_DATAIN0x00080000)
491 return (1);
492 return (0);
493}
494
495/*
496 * Sync the PHYs by setting data bit and strobing the clock 32 times.
497 */
498void
499dc_mii_sync(struct dc_softc *sc)
500{
501 int i;
502
503 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x48), (0x00002000
)))
;
504
505 for (i = 0; i < 32; i++)
506 dc_mii_writebit(sc, 1);
507}
508
509/*
510 * Clock a series of bits through the MII.
511 */
512void
513dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt)
514{
515 int i;
516
517 for (i = (0x1 << (cnt - 1)); i; i >>= 1)
518 dc_mii_writebit(sc, bits & i);
519}
520
521/*
522 * Read an PHY register through the MII.
523 */
524int
525dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame)
526{
527 int i, ack, s;
528
529 s = splnet()splraise(0x7);
530
531 /*
532 * Set up frame for RX.
533 */
534 frame->mii_stdelim = DC_MII_STARTDELIM0x01;
535 frame->mii_opcode = DC_MII_READOP0x02;
536 frame->mii_turnaround = 0;
537 frame->mii_data = 0;
538
539 /*
540 * Sync the PHYs.
541 */
542 dc_mii_sync(sc);
543
544 /*
545 * Send command/address info.
546 */
547 dc_mii_send(sc, frame->mii_stdelim, 2);
548 dc_mii_send(sc, frame->mii_opcode, 2);
549 dc_mii_send(sc, frame->mii_phyaddr, 5);
550 dc_mii_send(sc, frame->mii_regaddr, 5);
551
552#ifdef notdef
553 /* Idle bit */
554 dc_mii_writebit(sc, 1);
555 dc_mii_writebit(sc, 0);
556#endif
557
558 /* Check for ack */
559 ack = dc_mii_readbit(sc);
560
561 /*
562 * Now try reading data bits. If the ack failed, we still
563 * need to clock through 16 cycles to keep the PHY(s) in sync.
564 */
565 if (ack) {
566 for(i = 0; i < 16; i++) {
567 dc_mii_readbit(sc);
568 }
569 goto fail;
570 }
571
572 for (i = 0x8000; i; i >>= 1) {
573 if (!ack) {
574 if (dc_mii_readbit(sc))
575 frame->mii_data |= i;
576 }
577 }
578
579fail:
580
581 dc_mii_writebit(sc, 0);
582 dc_mii_writebit(sc, 0);
583
584 splx(s)spllower(s);
585
586 if (ack)
587 return (1);
588 return (0);
589}
590
591/*
592 * Write to a PHY register through the MII.
593 */
594int
595dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame)
596{
597 int s;
598
599 s = splnet()splraise(0x7);
600 /*
601 * Set up frame for TX.
602 */
603
604 frame->mii_stdelim = DC_MII_STARTDELIM0x01;
605 frame->mii_opcode = DC_MII_WRITEOP0x01;
606 frame->mii_turnaround = DC_MII_TURNAROUND0x02;
607
608 /*
609 * Sync the PHYs.
610 */
611 dc_mii_sync(sc);
612
613 dc_mii_send(sc, frame->mii_stdelim, 2);
614 dc_mii_send(sc, frame->mii_opcode, 2);
615 dc_mii_send(sc, frame->mii_phyaddr, 5);
616 dc_mii_send(sc, frame->mii_regaddr, 5);
617 dc_mii_send(sc, frame->mii_turnaround, 2);
618 dc_mii_send(sc, frame->mii_data, 16);
619
620 /* Idle bit. */
621 dc_mii_writebit(sc, 0);
622 dc_mii_writebit(sc, 0);
623
624 splx(s)spllower(s);
625 return (0);
626}
627
628int
629dc_miibus_readreg(struct device *self, int phy, int reg)
630{
631 struct dc_mii_frame frame;
632 struct dc_softc *sc = (struct dc_softc *)self;
633 int i, rval, phy_reg;
634
635 /*
636 * Note: both the AL981 and AN983 have internal PHYs,
637 * however the AL981 provides direct access to the PHY
638 * registers while the AN983 uses a serial MII interface.
639 * The AN983's MII interface is also buggy in that you
640 * can read from any MII address (0 to 31), but only address 1
641 * behaves normally. To deal with both cases, we pretend
642 * that the PHY is at MII address 1.
643 */
644 if (DC_IS_ADMTEK(sc)(sc->dc_type == 0x6 || sc->dc_type == 0x7) && phy != DC_ADMTEK_PHYADDR0x1)
645 return (0);
646
647 /*
648 * Note: the ukphy probs of the RS7112 report a PHY at
649 * MII address 0 (possibly HomePNA?) and 1 (ethernet)
650 * so we only respond to correct one.
651 */
652 if (DC_IS_CONEXANT(sc)(sc->dc_type == 0xC) && phy != DC_CONEXANT_PHYADDR0x1)
653 return (0);
654
655 if (sc->dc_pmode != DC_PMODE_MII0x1) {
656 if (phy == (MII_NPHY32 - 1)) {
657 switch(reg) {
658 case MII_BMSR0x01:
659 /*
660 * Fake something to make the probe
661 * code think there's a PHY here.
662 */
663 return (BMSR_MEDIAMASK(0x8000|0x4000|0x2000| 0x1000|0x0800|0x0400|0x0200));
664 break;
665 case MII_PHYIDR10x02:
666 if (DC_IS_PNIC(sc)(sc->dc_type == 0xA))
667 return (PCI_VENDOR_LITEON0x11ad);
668 return (PCI_VENDOR_DEC0x1011);
669 break;
670 case MII_PHYIDR20x03:
671 if (DC_IS_PNIC(sc)(sc->dc_type == 0xA))
672 return (PCI_PRODUCT_LITEON_PNIC0x0002);
673 return (PCI_PRODUCT_DEC_211420x0019);
674 break;
675 default:
676 return (0);
677 break;
678 }
679 } else
680 return (0);
681 }
682
683 if (DC_IS_PNIC(sc)(sc->dc_type == 0xA)) {
684 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ |((sc->dc_btag)->write_4((sc->dc_bhandle), (0xA0), (0x60020000
| (phy << 23) | (reg << 18))))
685 (phy << 23) | (reg << 18))((sc->dc_btag)->write_4((sc->dc_bhandle), (0xA0), (0x60020000
| (phy << 23) | (reg << 18))))
;
686 for (i = 0; i < DC_TIMEOUT1000; i++) {
687 DELAY(1)(*delay_func)(1);
688 rval = CSR_READ_4(sc, DC_PN_MII)((sc->dc_btag)->read_4((sc->dc_bhandle), (0xA0)));
689 if (!(rval & DC_PN_MII_BUSY0x80000000)) {
690 rval &= 0xFFFF;
691 return (rval == 0xFFFF ? 0 : rval);
692 }
693 }
694 return (0);
695 }
696
697 if (DC_IS_COMET(sc)(sc->dc_type == 0x6)) {
698 switch(reg) {
699 case MII_BMCR0x00:
700 phy_reg = DC_AL_BMCR0xB4;
701 break;
702 case MII_BMSR0x01:
703 phy_reg = DC_AL_BMSR0xB8;
704 break;
705 case MII_PHYIDR10x02:
706 phy_reg = DC_AL_VENID0xBC;
707 break;
708 case MII_PHYIDR20x03:
709 phy_reg = DC_AL_DEVID0xC0;
710 break;
711 case MII_ANAR0x04:
712 phy_reg = DC_AL_ANAR0xC4;
713 break;
714 case MII_ANLPAR0x05:
715 phy_reg = DC_AL_LPAR0xC8;
716 break;
717 case MII_ANER0x06:
718 phy_reg = DC_AL_ANER0xCC;
719 break;
720 default:
721 printf("%s: phy_read: bad phy register %x\n",
722 sc->sc_dev.dv_xname, reg);
723 return (0);
724 break;
725 }
726
727 rval = CSR_READ_4(sc, phy_reg)((sc->dc_btag)->read_4((sc->dc_bhandle), (phy_reg))) & 0x0000FFFF;
728
729 if (rval == 0xFFFF)
730 return (0);
731 return (rval);
732 }
733
734 bzero(&frame, sizeof(frame))__builtin_bzero((&frame), (sizeof(frame)));
735
736 frame.mii_phyaddr = phy;
737 frame.mii_regaddr = reg;
738 if (sc->dc_type == DC_TYPE_987130x1) {
739 phy_reg = CSR_READ_4(sc, DC_NETCFG)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x30)));
740 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), (phy_reg
& ~0x00040000)))
;
741 }
742 dc_mii_readreg(sc, &frame);
743 if (sc->dc_type == DC_TYPE_987130x1)
744 CSR_WRITE_4(sc, DC_NETCFG, phy_reg)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), (phy_reg
)))
;
745
746 return (frame.mii_data);
747}
748
749void
750dc_miibus_writereg(struct device *self, int phy, int reg, int data)
751{
752 struct dc_softc *sc = (struct dc_softc *)self;
753 struct dc_mii_frame frame;
754 int i, phy_reg;
1
'phy_reg' declared without an initial value
755
756 bzero(&frame, sizeof(frame))__builtin_bzero((&frame), (sizeof(frame)));
757
758 if (DC_IS_ADMTEK(sc)(sc->dc_type == 0x6 || sc->dc_type == 0x7) && phy != DC_ADMTEK_PHYADDR0x1)
2
Assuming field 'dc_type' is not equal to 6
3
Assuming field 'dc_type' is equal to 7
4
Assuming 'phy' is equal to DC_ADMTEK_PHYADDR
5
Taking false branch
759 return;
760 if (DC_IS_CONEXANT(sc)(sc->dc_type == 0xC) && phy != DC_CONEXANT_PHYADDR0x1)
761 return;
762
763 if (DC_IS_PNIC(sc)(sc->dc_type == 0xA)) {
6
Taking false branch
764 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE |((sc->dc_btag)->write_4((sc->dc_bhandle), (0xA0), (0x50020000
| (phy << 23) | (reg << 10) | data)))
765 (phy << 23) | (reg << 10) | data)((sc->dc_btag)->write_4((sc->dc_bhandle), (0xA0), (0x50020000
| (phy << 23) | (reg << 10) | data)))
;
766 for (i = 0; i < DC_TIMEOUT1000; i++) {
767 if (!(CSR_READ_4(sc, DC_PN_MII)((sc->dc_btag)->read_4((sc->dc_bhandle), (0xA0))) & DC_PN_MII_BUSY0x80000000))
768 break;
769 }
770 return;
771 }
772
773 if (DC_IS_COMET(sc)(sc->dc_type == 0x6)) {
7
Taking false branch
774 switch(reg) {
775 case MII_BMCR0x00:
776 phy_reg = DC_AL_BMCR0xB4;
777 break;
778 case MII_BMSR0x01:
779 phy_reg = DC_AL_BMSR0xB8;
780 break;
781 case MII_PHYIDR10x02:
782 phy_reg = DC_AL_VENID0xBC;
783 break;
784 case MII_PHYIDR20x03:
785 phy_reg = DC_AL_DEVID0xC0;
786 break;
787 case MII_ANAR0x04:
788 phy_reg = DC_AL_ANAR0xC4;
789 break;
790 case MII_ANLPAR0x05:
791 phy_reg = DC_AL_LPAR0xC8;
792 break;
793 case MII_ANER0x06:
794 phy_reg = DC_AL_ANER0xCC;
795 break;
796 default:
797 printf("%s: phy_write: bad phy register %x\n",
798 sc->sc_dev.dv_xname, reg);
799 return;
800 }
801
802 CSR_WRITE_4(sc, phy_reg, data)((sc->dc_btag)->write_4((sc->dc_bhandle), (phy_reg),
(data)))
;
803 return;
804 }
805
806 frame.mii_phyaddr = phy;
807 frame.mii_regaddr = reg;
808 frame.mii_data = data;
809
810 if (sc->dc_type
7.1
Field 'dc_type' is not equal to DC_TYPE_98713
== DC_TYPE_987130x1) {
8
Taking false branch
811 phy_reg = CSR_READ_4(sc, DC_NETCFG)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x30)));
812 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), (phy_reg
& ~0x00040000)))
;
813 }
814 dc_mii_writereg(sc, &frame);
815 if (sc->dc_type == DC_TYPE_987130x1)
9
Assuming field 'dc_type' is equal to DC_TYPE_98713
10
Taking true branch
816 CSR_WRITE_4(sc, DC_NETCFG, phy_reg)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), (phy_reg
)))
;
11
3rd function call argument is an uninitialized value
817}
818
819void
820dc_miibus_statchg(struct device *self)
821{
822 struct dc_softc *sc = (struct dc_softc *)self;
823 struct mii_data *mii;
824 struct ifmedia *ifm;
825
826 if (DC_IS_ADMTEK(sc)(sc->dc_type == 0x6 || sc->dc_type == 0x7))
827 return;
828
829 mii = &sc->sc_mii;
830 ifm = &mii->mii_media;
831 if (DC_IS_DAVICOM(sc)(sc->dc_type == 0x8) && IFM_SUBTYPE(ifm->ifm_media)((ifm->ifm_media) & 0x00000000000000ffULL) == IFM_HPNA_117) {
832 dc_setcfg(sc, ifm->ifm_media);
833 sc->dc_if_media = ifm->ifm_media;
834 } else {
835 dc_setcfg(sc, mii->mii_media_active);
836 sc->dc_if_media = mii->mii_media_active;
837 }
838}
839
840#define DC_BITS_5129 9
841#define DC_BITS_1287 7
842#define DC_BITS_646 6
843
844u_int32_t
845dc_crc_le(struct dc_softc *sc, caddr_t addr)
846{
847 u_int32_t crc;
848
849 /* Compute CRC for the address value. */
850 crc = ether_crc32_le(addr, ETHER_ADDR_LEN6);
851
852 /*
853 * The hash table on the PNIC II and the MX98715AEC-C/D/E
854 * chips is only 128 bits wide.
855 */
856 if (sc->dc_flags & DC_128BIT_HASH0x00001000)
857 return (crc & ((1 << DC_BITS_1287) - 1));
858
859 /* The hash table on the MX98715BEC is only 64 bits wide. */
860 if (sc->dc_flags & DC_64BIT_HASH0x00002000)
861 return (crc & ((1 << DC_BITS_646) - 1));
862
863 /* Xircom's hash filtering table is different (read: weird) */
864 /* Xircom uses the LEAST significant bits */
865 if (DC_IS_XIRCOM(sc)(sc->dc_type == 0xB)) {
866 if ((crc & 0x180) == 0x180)
867 return (crc & 0x0F) + (crc & 0x70)*3 + (14 << 4);
868 else
869 return (crc & 0x1F) + ((crc>>1) & 0xF0)*3 + (12 << 4);
870 }
871
872 return (crc & ((1 << DC_BITS_5129) - 1));
873}
874
875/*
876 * Calculate CRC of a multicast group address, return the lower 6 bits.
877 */
878#define dc_crc_be(addr)((ether_crc32_be(addr,6) >> 26) & 0x0000003F) ((ether_crc32_be(addr,ETHER_ADDR_LEN6) >> 26) \
879 & 0x0000003F)
880
881/*
882 * 21143-style RX filter setup routine. Filter programming is done by
883 * downloading a special setup frame into the TX engine. 21143, Macronix,
884 * PNIC, PNIC II and Davicom chips are programmed this way.
885 *
886 * We always program the chip using 'hash perfect' mode, i.e. one perfect
887 * address (our node address) and a 512-bit hash filter for multicast
888 * frames. We also sneak the broadcast address into the hash filter since
889 * we need that too.
890 */
891void
892dc_setfilt_21143(struct dc_softc *sc)
893{
894 struct arpcom *ac = &sc->sc_arpcom;
895 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
896 struct ether_multi *enm;
897 struct ether_multistep step;
898 struct dc_desc *sframe;
899 u_int32_t h, *sp;
900 int i;
901
902 i = sc->dc_cdata.dc_tx_prod;
903 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT)(sc->dc_cdata.dc_tx_prod) = (sc->dc_cdata.dc_tx_prod + 1
) % 256
;
904 sc->dc_cdata.dc_tx_cnt++;
905 sframe = &sc->dc_ldata->dc_tx_list[i];
906 sp = &sc->dc_ldata->dc_sbuf[0];
907 bzero(sp, DC_SFRAME_LEN)__builtin_bzero((sp), (192));
908
909 sframe->dc_datadc_ptr1 = htole32(sc->sc_listmap->dm_segs[0].ds_addr +((__uint32_t)(sc->sc_listmap->dm_segs[0].ds_addr + __builtin_offsetof
(struct dc_list_data, dc_sbuf)))
910 offsetof(struct dc_list_data, dc_sbuf))((__uint32_t)(sc->sc_listmap->dm_segs[0].ds_addr + __builtin_offsetof
(struct dc_list_data, dc_sbuf)))
;
911 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |((__uint32_t)(192 | 0x08000000 | 0x01000000 | 0x00400000 | 0x80000000
))
912 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT)((__uint32_t)(192 | 0x08000000 | 0x01000000 | 0x00400000 | 0x80000000
))
;
913
914 sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
915 (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
916
917 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_NETCFG_RX_PROMISC))((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~((0x00000080 | 0x00000040)))))
;
918 ifp->if_flags &= ~IFF_ALLMULTI0x200;
919
920 if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0) {
921 ifp->if_flags |= IFF_ALLMULTI0x200;
922 if (ifp->if_flags & IFF_PROMISC0x100)
923 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00000040))))
;
924 else
925 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00000080))))
;
926 } else {
927 ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first
); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step
)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while
( 0)
;
928 while (enm != NULL((void *)0)) {
929 h = dc_crc_le(sc, enm->enm_addrlo);
930
931 sp[h >> 4] |= htole32(1 << (h & 0xF))((__uint32_t)(1 << (h & 0xF)));
932
933 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
934 }
935 }
936
937 /*
938 * Always accept broadcast frames.
939 */
940 h = dc_crc_le(sc, (caddr_t)&etherbroadcastaddr);
941 sp[h >> 4] |= htole32(1 << (h & 0xF))((__uint32_t)(1 << (h & 0xF)));
942
943 /* Set our MAC address */
944 sp[39] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0)(((u_int16_t *)(sc->sc_arpcom.ac_enaddr))[(0)]);
945 sp[40] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1)(((u_int16_t *)(sc->sc_arpcom.ac_enaddr))[(1)]);
946 sp[41] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2)(((u_int16_t *)(sc->sc_arpcom.ac_enaddr))[(2)]);
947
948 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_sbuf
[0])), (sizeof(struct dc_list_data) - __builtin_offsetof(struct
dc_list_data, dc_sbuf[0])), (0x01 | 0x04))
949 offsetof(struct dc_list_data, dc_sbuf[0]),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_sbuf
[0])), (sizeof(struct dc_list_data) - __builtin_offsetof(struct
dc_list_data, dc_sbuf[0])), (0x01 | 0x04))
950 sizeof(struct dc_list_data) -(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_sbuf
[0])), (sizeof(struct dc_list_data) - __builtin_offsetof(struct
dc_list_data, dc_sbuf[0])), (0x01 | 0x04))
951 offsetof(struct dc_list_data, dc_sbuf[0]),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_sbuf
[0])), (sizeof(struct dc_list_data) - __builtin_offsetof(struct
dc_list_data, dc_sbuf[0])), (0x01 | 0x04))
952 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_sbuf
[0])), (sizeof(struct dc_list_data) - __builtin_offsetof(struct
dc_list_data, dc_sbuf[0])), (0x01 | 0x04))
;
953
954 sframe->dc_status = htole32(DC_TXSTAT_OWN)((__uint32_t)(0x80000000));
955
956 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_tx_list
[i])), (sizeof(struct dc_desc)), (0x01 | 0x04))
957 offsetof(struct dc_list_data, dc_tx_list[i]),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_tx_list
[i])), (sizeof(struct dc_desc)), (0x01 | 0x04))
958 sizeof(struct dc_desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_tx_list
[i])), (sizeof(struct dc_desc)), (0x01 | 0x04))
;
959
960 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x08), (0xFFFFFFFF
)))
;
961
962 /*
963 * The PNIC takes an exceedingly long time to process its
964 * setup frame; wait 10ms after posting the setup frame
965 * before proceeding, just so it has time to swallow its
966 * medicine.
967 */
968 DELAY(10000)(*delay_func)(10000);
969
970 ifp->if_timer = 5;
971}
972
973void
974dc_setfilt_admtek(struct dc_softc *sc)
975{
976 struct arpcom *ac = &sc->sc_arpcom;
977 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
978 struct ether_multi *enm;
979 struct ether_multistep step;
980 u_int32_t hashes[2];
981 int h = 0;
982
983 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_NETCFG_RX_PROMISC))((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~((0x00000080 | 0x00000040)))))
;
984 bzero(hashes, sizeof(hashes))__builtin_bzero((hashes), (sizeof(hashes)));
985 ifp->if_flags &= ~IFF_ALLMULTI0x200;
986
987 if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0) {
988 ifp->if_flags |= IFF_ALLMULTI0x200;
989 if (ifp->if_flags & IFF_PROMISC0x100)
990 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00000040))))
;
991 else
992 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00000080))))
;
993 } else {
994 /* now program new ones */
995 ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first
); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step
)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while
( 0)
;
996 while (enm != NULL((void *)0)) {
997 if (DC_IS_CENTAUR(sc)(sc->dc_type == 0x7))
998 h = dc_crc_le(sc, enm->enm_addrlo);
999 else
1000 h = dc_crc_be(enm->enm_addrlo)((ether_crc32_be(enm->enm_addrlo,6) >> 26) & 0x0000003F
)
;
1001
1002 if (h < 32)
1003 hashes[0] |= (1 << h);
1004 else
1005 hashes[1] |= (1 << (h - 32));
1006
1007 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
1008 }
1009 }
1010
1011 /* Init our MAC address */
1012 CSR_WRITE_4(sc, DC_AL_PAR0, ac->ac_enaddr[3] << 24 |((sc->dc_btag)->write_4((sc->dc_bhandle), (0xA4), (ac
->ac_enaddr[3] << 24 | ac->ac_enaddr[2] << 16
| ac->ac_enaddr[1] << 8 | ac->ac_enaddr[0])))
1013 ac->ac_enaddr[2] << 16 | ac->ac_enaddr[1] << 8 | ac->ac_enaddr[0])((sc->dc_btag)->write_4((sc->dc_bhandle), (0xA4), (ac
->ac_enaddr[3] << 24 | ac->ac_enaddr[2] << 16
| ac->ac_enaddr[1] << 8 | ac->ac_enaddr[0])))
;
1014 CSR_WRITE_4(sc, DC_AL_PAR1, ac->ac_enaddr[5] << 8 | ac->ac_enaddr[4])((sc->dc_btag)->write_4((sc->dc_bhandle), (0xA8), (ac
->ac_enaddr[5] << 8 | ac->ac_enaddr[4])))
;
1015
1016 CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0])((sc->dc_btag)->write_4((sc->dc_bhandle), (0xAC), (hashes
[0])))
;
1017 CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1])((sc->dc_btag)->write_4((sc->dc_bhandle), (0xB0), (hashes
[1])))
;
1018}
1019
1020void
1021dc_setfilt_asix(struct dc_softc *sc)
1022{
1023 struct arpcom *ac = &sc->sc_arpcom;
1024 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1025 struct ether_multi *enm;
1026 struct ether_multistep step;
1027 u_int32_t hashes[2];
1028 int h = 0;
1029
1030 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_AX_NETCFG_RX_BROAD |((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~((0x00000080 | 0x00000100 | 0x00000040)))))
1031 DC_NETCFG_RX_PROMISC))((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~((0x00000080 | 0x00000100 | 0x00000040)))))
;
1032 bzero(hashes, sizeof(hashes))__builtin_bzero((hashes), (sizeof(hashes)));
1033 ifp->if_flags &= ~IFF_ALLMULTI0x200;
1034
1035 /*
1036 * Always accept broadcast frames.
1037 */
1038 DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00000100))))
;
1039
1040 if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0) {
1041 ifp->if_flags |= IFF_ALLMULTI0x200;
1042 if (ifp->if_flags & IFF_PROMISC0x100)
1043 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00000040))))
;
1044 else
1045 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00000080))))
;
1046 } else {
1047 /* now program new ones */
1048 ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first
); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step
)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while
( 0)
;
1049 while (enm != NULL((void *)0)) {
1050 h = dc_crc_be(enm->enm_addrlo)((ether_crc32_be(enm->enm_addrlo,6) >> 26) & 0x0000003F
)
;
1051
1052 if (h < 32)
1053 hashes[0] |= (1 << h);
1054 else
1055 hashes[1] |= (1 << (h - 32));
1056
1057 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
1058 }
1059 }
1060
1061 /* Init our MAC address */
1062 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x68), (0x00000000
)))
;
1063 CSR_WRITE_4(sc, DC_AX_FILTDATA,((sc->dc_btag)->write_4((sc->dc_bhandle), (0x70), (*
(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[0]))))
1064 *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[0]))((sc->dc_btag)->write_4((sc->dc_bhandle), (0x70), (*
(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[0]))))
;
1065 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x68), (0x00000001
)))
;
1066 CSR_WRITE_4(sc, DC_AX_FILTDATA,((sc->dc_btag)->write_4((sc->dc_bhandle), (0x70), (*
(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[4]))))
1067 *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[4]))((sc->dc_btag)->write_4((sc->dc_bhandle), (0x70), (*
(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[4]))))
;
1068
1069 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x68), (0x00000002
)))
;
1070 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0])((sc->dc_btag)->write_4((sc->dc_bhandle), (0x70), (hashes
[0])))
;
1071 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x68), (0x00000003
)))
;
1072 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1])((sc->dc_btag)->write_4((sc->dc_bhandle), (0x70), (hashes
[1])))
;
1073}
1074
1075void
1076dc_setfilt_xircom(struct dc_softc *sc)
1077{
1078 struct arpcom *ac = &sc->sc_arpcom;
1079 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1080 struct ether_multi *enm;
1081 struct ether_multistep step;
1082 struct dc_desc *sframe;
1083 u_int32_t h, *sp;
1084 int i;
1085
1086 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON))((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~((0x00002000|0x00000002)))))
;
1087
1088 i = sc->dc_cdata.dc_tx_prod;
1089 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT)(sc->dc_cdata.dc_tx_prod) = (sc->dc_cdata.dc_tx_prod + 1
) % 256
;
1090 sc->dc_cdata.dc_tx_cnt++;
1091 sframe = &sc->dc_ldata->dc_tx_list[i];
1092 sp = &sc->dc_ldata->dc_sbuf[0];
1093 bzero(sp, DC_SFRAME_LEN)__builtin_bzero((sp), (192));
1094
1095 sframe->dc_datadc_ptr1 = htole32(sc->sc_listmap->dm_segs[0].ds_addr +((__uint32_t)(sc->sc_listmap->dm_segs[0].ds_addr + __builtin_offsetof
(struct dc_list_data, dc_sbuf)))
1096 offsetof(struct dc_list_data, dc_sbuf))((__uint32_t)(sc->sc_listmap->dm_segs[0].ds_addr + __builtin_offsetof
(struct dc_list_data, dc_sbuf)))
;
1097 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |((__uint32_t)(192 | 0x08000000 | 0x01000000 | 0x00400000 | 0x80000000
))
1098 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT)((__uint32_t)(192 | 0x08000000 | 0x01000000 | 0x00400000 | 0x80000000
))
;
1099
1100 sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
1101 (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
1102
1103 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_NETCFG_RX_PROMISC))((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~((0x00000080 | 0x00000040)))))
;
1104 ifp->if_flags &= ~IFF_ALLMULTI0x200;
1105
1106 if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0) {
1107 ifp->if_flags |= IFF_ALLMULTI0x200;
1108 if (ifp->if_flags & IFF_PROMISC0x100)
1109 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00000040))))
;
1110 else
1111 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00000080))))
;
1112 } else {
1113 /* now program new ones */
1114 ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first
); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step
)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while
( 0)
;
1115 while (enm != NULL((void *)0)) {
1116 h = dc_crc_le(sc, enm->enm_addrlo);
1117
1118 sp[h >> 4] |= htole32(1 << (h & 0xF))((__uint32_t)(1 << (h & 0xF)));
1119
1120 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
1121 }
1122 }
1123
1124 /*
1125 * Always accept broadcast frames.
1126 */
1127 h = dc_crc_le(sc, (caddr_t)&etherbroadcastaddr);
1128 sp[h >> 4] |= htole32(1 << (h & 0xF))((__uint32_t)(1 << (h & 0xF)));
1129
1130 /* Set our MAC address */
1131 sp[0] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0)(((u_int16_t *)(sc->sc_arpcom.ac_enaddr))[(0)]);
1132 sp[1] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1)(((u_int16_t *)(sc->sc_arpcom.ac_enaddr))[(1)]);
1133 sp[2] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2)(((u_int16_t *)(sc->sc_arpcom.ac_enaddr))[(2)]);
1134
1135 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00002000))))
;
1136 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00000002))))
;
1137 ifp->if_flags |= IFF_RUNNING0x40;
1138 sframe->dc_status = htole32(DC_TXSTAT_OWN)((__uint32_t)(0x80000000));
1139 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x08), (0xFFFFFFFF
)))
;
1140
1141 /*
1142 * wait some time...
1143 */
1144 DELAY(1000)(*delay_func)(1000);
1145
1146 ifp->if_timer = 5;
1147}
1148
1149void
1150dc_setfilt(struct dc_softc *sc)
1151{
1152 if (DC_IS_INTEL(sc)(sc->dc_type == 0x4 || sc->dc_type == 0xD) || DC_IS_MACRONIX(sc)(sc->dc_type == 0x1 || sc->dc_type == 0x2 || sc->dc_type
== 0x3)
|| DC_IS_PNIC(sc)(sc->dc_type == 0xA) ||
1153 DC_IS_PNICII(sc)(sc->dc_type == 0x9) || DC_IS_DAVICOM(sc)(sc->dc_type == 0x8) || DC_IS_CONEXANT(sc)(sc->dc_type == 0xC))
1154 dc_setfilt_21143(sc);
1155
1156 if (DC_IS_ASIX(sc)(sc->dc_type == 0x5))
1157 dc_setfilt_asix(sc);
1158
1159 if (DC_IS_ADMTEK(sc)(sc->dc_type == 0x6 || sc->dc_type == 0x7))
1160 dc_setfilt_admtek(sc);
1161
1162 if (DC_IS_XIRCOM(sc)(sc->dc_type == 0xB))
1163 dc_setfilt_xircom(sc);
1164}
1165
1166/*
1167 * In order to fiddle with the
1168 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
1169 * first have to put the transmit and/or receive logic in the idle state.
1170 */
1171void
1172dc_setcfg(struct dc_softc *sc, uint64_t media)
1173{
1174 int i, restart = 0;
1175 u_int32_t isr;
1176
1177 if (IFM_SUBTYPE(media)((media) & 0x00000000000000ffULL) == IFM_NONE2ULL)
1178 return;
1179
1180 if (CSR_READ_4(sc, DC_NETCFG)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) & (DC_NETCFG_TX_ON0x00002000|DC_NETCFG_RX_ON0x00000002)) {
1181 restart = 1;
1182 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON))((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~((0x00002000|0x00000002)))))
;
1183
1184 for (i = 0; i < DC_TIMEOUT1000; i++) {
1185 isr = CSR_READ_4(sc, DC_ISR)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x28)));
1186 if (isr & DC_ISR_TX_IDLE0x00000002 &&
1187 ((isr & DC_ISR_RX_STATE0x000E0000) == DC_RXSTATE_STOPPED0x00000000 ||
1188 (isr & DC_ISR_RX_STATE0x000E0000) == DC_RXSTATE_WAIT0x00060000))
1189 break;
1190 DELAY(10)(*delay_func)(10);
1191 }
1192
1193 if (i == DC_TIMEOUT1000) {
1194 if (!(isr & DC_ISR_TX_IDLE0x00000002) && !DC_IS_ASIX(sc)(sc->dc_type == 0x5))
1195 printf("%s: failed to force tx to idle state\n",
1196 sc->sc_dev.dv_xname);
1197 if (!((isr & DC_ISR_RX_STATE0x000E0000) == DC_RXSTATE_STOPPED0x00000000 ||
1198 (isr & DC_ISR_RX_STATE0x000E0000) == DC_RXSTATE_WAIT0x00060000) &&
1199 !DC_HAS_BROKEN_RXSTATE(sc)((sc->dc_type == 0x7) || (sc->dc_type == 0xC) || ((sc->
dc_type == 0x8) && sc->dc_revision >= 0x30))
)
1200 printf("%s: failed to force rx to idle state\n",
1201 sc->sc_dev.dv_xname);
1202 }
1203 }
1204
1205 if (IFM_SUBTYPE(media)((media) & 0x00000000000000ffULL) == IFM_100_TX6) {
1206 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~(0x00400000))))
;
1207 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00080000))))
;
1208 if (sc->dc_pmode == DC_PMODE_MII0x1) {
1209 int watchdogreg;
1210
1211 if (DC_IS_INTEL(sc)(sc->dc_type == 0x4 || sc->dc_type == 0xD)) {
1212 /* there's a write enable bit here that reads as 1 */
1213 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x78)));
1214 watchdogreg &= ~DC_WDOG_CTLWREN0x08000000;
1215 watchdogreg |= DC_WDOG_JABBERDIS0x00000001;
1216 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x78), (watchdogreg
)))
;
1217 } else {
1218 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x78), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x78))) | (
0x00000001))))
;
1219 }
1220 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~((0x00800000| 0x00040000|0x01000000)))))
1221 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER))((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~((0x00800000| 0x00040000|0x01000000)))))
;
1222 if (sc->dc_type == DC_TYPE_987130x1)
1223 DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
(0x00800000| 0x01000000)))))
1224 DC_NETCFG_SCRAMBLER))((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
(0x00800000| 0x01000000)))))
;
1225 if (!DC_IS_DAVICOM(sc)(sc->dc_type == 0x8))
1226 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00040000))))
;
1227 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x70), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x70))) &
~(0xFFFF))))
;
1228 if (DC_IS_INTEL(sc)(sc->dc_type == 0x4 || sc->dc_type == 0xD))
1229 dc_apply_fixup(sc, IFM_AUTO0ULL);
1230 } else {
1231 if (DC_IS_PNIC(sc)(sc->dc_type == 0xA)) {
1232 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x60), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x60))) | (
((0x000000001) | (0x000000001 << 4))))))
;
1233 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x60), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x60))) | (
((0x000000002) | (0x000000002 << 4))))))
;
1234 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL)((sc->dc_btag)->write_4((sc->dc_bhandle), (0xB8), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0xB8))) | (
0x00000800))))
;
1235 }
1236 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00040000))))
;
1237 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00800000))))
;
1238 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x01000000))))
;
1239 if (DC_IS_INTEL(sc)(sc->dc_type == 0x4 || sc->dc_type == 0xD))
1240 dc_apply_fixup(sc,
1241 (media & IFM_GMASK0x00ffff0000000000ULL) == IFM_FDX0x0000010000000000ULL ?
1242 IFM_100_TX6|IFM_FDX0x0000010000000000ULL : IFM_100_TX6);
1243 }
1244 }
1245
1246 if (IFM_SUBTYPE(media)((media) & 0x00000000000000ffULL) == IFM_10_T3) {
1247 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00400000))))
;
1248 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~(0x00080000))))
;
1249 if (sc->dc_pmode == DC_PMODE_MII0x1) {
1250 int watchdogreg;
1251
1252 if (DC_IS_INTEL(sc)(sc->dc_type == 0x4 || sc->dc_type == 0xD)) {
1253 /* there's a write enable bit here that reads as 1 */
1254 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x78)));
1255 watchdogreg &= ~DC_WDOG_CTLWREN0x08000000;
1256 watchdogreg |= DC_WDOG_JABBERDIS0x00000001;
1257 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x78), (watchdogreg
)))
;
1258 } else {
1259 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x78), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x78))) | (
0x00000001))))
;
1260 }
1261 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~((0x00800000| 0x00040000|0x01000000)))))
1262 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER))((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~((0x00800000| 0x00040000|0x01000000)))))
;
1263 if (sc->dc_type == DC_TYPE_987130x1)
1264 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00800000))))
;
1265 if (!DC_IS_DAVICOM(sc)(sc->dc_type == 0x8))
1266 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00040000))))
;
1267 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x70), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x70))) &
~(0xFFFF))))
;
1268 if (DC_IS_INTEL(sc)(sc->dc_type == 0x4 || sc->dc_type == 0xD))
1269 dc_apply_fixup(sc, IFM_AUTO0ULL);
1270 } else {
1271 if (DC_IS_PNIC(sc)(sc->dc_type == 0xA)) {
1272 DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL){ ((sc->dc_btag)->write_4((sc->dc_bhandle), (0x60), (
((sc->dc_btag)->read_4((sc->dc_bhandle), (0x60))) | (
((0x000000001) << 4))))); ((sc->dc_btag)->write_4
((sc->dc_bhandle), (0x60), (((sc->dc_btag)->read_4((
sc->dc_bhandle), (0x60))) & ~((0x000000001))))); }
;
1273 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x60), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x60))) | (
((0x000000002) | (0x000000002 << 4))))))
;
1274 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL)((sc->dc_btag)->write_4((sc->dc_bhandle), (0xB8), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0xB8))) &
~(0x00000800))))
;
1275 }
1276 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~(0x00040000))))
;
1277 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~(0x00800000))))
;
1278 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~(0x01000000))))
;
1279 if (DC_IS_INTEL(sc)(sc->dc_type == 0x4 || sc->dc_type == 0xD)) {
1280 DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x68), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x68))) &
~(0x00000001))))
;
1281 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x70), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x70))) &
~(0xFFFF))))
;
1282 if ((media & IFM_GMASK0x00ffff0000000000ULL) == IFM_FDX0x0000010000000000ULL)
1283 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x70), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x70))) | (
0x7F3D))))
;
1284 else
1285 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x70), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x70))) | (
0x7F3F))))
;
1286 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x68), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x68))) | (
0x00000001))))
;
1287 DC_CLRBIT(sc, DC_10BTCTRL,((sc->dc_btag)->write_4((sc->dc_bhandle), (0x70), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x70))) &
~(0x00000080))))
1288 DC_TCTL_AUTONEGENBL)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x70), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x70))) &
~(0x00000080))))
;
1289 dc_apply_fixup(sc,
1290 (media & IFM_GMASK0x00ffff0000000000ULL) == IFM_FDX0x0000010000000000ULL ?
1291 IFM_10_T3|IFM_FDX0x0000010000000000ULL : IFM_10_T3);
1292 DELAY(20000)(*delay_func)(20000);
1293 }
1294 }
1295 }
1296
1297 /*
1298 * If this is a Davicom DM9102A card with a DM9801 HomePNA
1299 * PHY and we want HomePNA mode, set the portsel bit to turn
1300 * on the external MII port.
1301 */
1302 if (DC_IS_DAVICOM(sc)(sc->dc_type == 0x8)) {
1303 if (IFM_SUBTYPE(media)((media) & 0x00000000000000ffULL) == IFM_HPNA_117) {
1304 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00040000))))
;
1305 sc->dc_link = 1;
1306 } else {
1307 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~(0x00040000))))
;
1308 }
1309 }
1310
1311 if ((media & IFM_GMASK0x00ffff0000000000ULL) == IFM_FDX0x0000010000000000ULL) {
1312 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00000200))))
;
1313 if (sc->dc_pmode == DC_PMODE_SYM0x2 && DC_IS_PNIC(sc)(sc->dc_type == 0xA))
1314 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX)((sc->dc_btag)->write_4((sc->dc_bhandle), (0xB8), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0xB8))) | (
0x00000100))))
;
1315 } else {
1316 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~(0x00000200))))
;
1317 if (sc->dc_pmode == DC_PMODE_SYM0x2 && DC_IS_PNIC(sc)(sc->dc_type == 0xA))
1318 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX)((sc->dc_btag)->write_4((sc->dc_bhandle), (0xB8), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0xB8))) &
~(0x00000100))))
;
1319 }
1320
1321 if (restart)
1322 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00002000|0x00000002))))
;
1323}
1324
1325void
1326dc_reset(struct dc_softc *sc)
1327{
1328 int i;
1329
1330 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x00), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x00))) | (
0x00000001))))
;
1331
1332 for (i = 0; i < DC_TIMEOUT1000; i++) {
1333 DELAY(10)(*delay_func)(10);
1334 if (!(CSR_READ_4(sc, DC_BUSCTL)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x00))) & DC_BUSCTL_RESET0x00000001))
1335 break;
1336 }
1337
1338 if (DC_IS_ASIX(sc)(sc->dc_type == 0x5) || DC_IS_ADMTEK(sc)(sc->dc_type == 0x6 || sc->dc_type == 0x7) || DC_IS_XIRCOM(sc)(sc->dc_type == 0xB) ||
1339 DC_IS_INTEL(sc)(sc->dc_type == 0x4 || sc->dc_type == 0xD) || DC_IS_CONEXANT(sc)(sc->dc_type == 0xC)) {
1340 DELAY(10000)(*delay_func)(10000);
1341 DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x00), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x00))) &
~(0x00000001))))
;
1342 i = 0;
1343 }
1344
1345 if (i == DC_TIMEOUT1000)
1346 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1347
1348 /* Wait a little while for the chip to get its brains in order. */
1349 DELAY(1000)(*delay_func)(1000);
1350
1351 CSR_WRITE_4(sc, DC_IMR, 0x00000000)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x38), (0x00000000
)))
;
1352 CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x00), (0x00000000
)))
;
1353 CSR_WRITE_4(sc, DC_NETCFG, 0x00000000)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), (0x00000000
)))
;
1354
1355 /*
1356 * Bring the SIA out of reset. In some cases, it looks
1357 * like failing to unreset the SIA soon enough gets it
1358 * into a state where it will never come out of reset
1359 * until we reset the whole chip again.
1360 */
1361 if (DC_IS_INTEL(sc)(sc->dc_type == 0x4 || sc->dc_type == 0xD)) {
1362 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x68), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x68))) | (
0x00000001))))
;
1363 CSR_WRITE_4(sc, DC_10BTCTRL, 0)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x70), (0
)))
;
1364 CSR_WRITE_4(sc, DC_WATCHDOG, 0)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x78), (0
)))
;
1365 }
1366
1367 if (sc->dc_type == DC_TYPE_211450xD)
1368 dc_setcfg(sc, IFM_10_T3);
1369}
1370
1371void
1372dc_apply_fixup(struct dc_softc *sc, uint64_t media)
1373{
1374 struct dc_mediainfo *m;
1375 u_int8_t *p;
1376 int i;
1377 u_int32_t reg;
1378
1379 m = sc->dc_mi;
1380
1381 while (m != NULL((void *)0)) {
1382 if (m->dc_media == media)
1383 break;
1384 m = m->dc_nextdc_ptr2;
1385 }
1386
1387 if (m == NULL((void *)0))
1388 return;
1389
1390 for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) {
1391 reg = (p[0] | (p[1] << 8)) << 16;
1392 CSR_WRITE_4(sc, DC_WATCHDOG, reg)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x78), (reg
)))
;
1393 }
1394
1395 for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) {
1396 reg = (p[0] | (p[1] << 8)) << 16;
1397 CSR_WRITE_4(sc, DC_WATCHDOG, reg)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x78), (reg
)))
;
1398 }
1399}
1400
1401void
1402dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l)
1403{
1404 struct dc_mediainfo *m;
1405
1406 m = malloc(sizeof(*m), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
1407 if (m == NULL((void *)0))
1408 return;
1409 switch (l->dc_sia_code & ~DC_SIA_CODE_EXT0x40) {
1410 case DC_SIA_CODE_10BT0x00:
1411 m->dc_media = IFM_10_T3;
1412 break;
1413 case DC_SIA_CODE_10BT_FDX0x04:
1414 m->dc_media = IFM_10_T3|IFM_FDX0x0000010000000000ULL;
1415 break;
1416 case DC_SIA_CODE_10B20x01:
1417 m->dc_media = IFM_10_24;
1418 break;
1419 case DC_SIA_CODE_10B50x02:
1420 m->dc_media = IFM_10_55;
1421 break;
1422 default:
1423 break;
1424 }
1425
1426 /*
1427 * We need to ignore CSR13, CSR14, CSR15 for SIA mode.
1428 * Things apparently already work for cards that do
1429 * supply Media Specific Data.
1430 */
1431 if (l->dc_sia_code & DC_SIA_CODE_EXT0x40) {
1432 m->dc_gp_len = 2;
1433 m->dc_gp_ptr =
1434 (u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl;
1435 } else {
1436 m->dc_gp_len = 2;
1437 m->dc_gp_ptr =
1438 (u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl;
1439 }
1440
1441 m->dc_nextdc_ptr2 = sc->dc_mi;
1442 sc->dc_mi = m;
1443
1444 sc->dc_pmode = DC_PMODE_SIA0x3;
1445}
1446
1447void
1448dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l)
1449{
1450 struct dc_mediainfo *m;
1451
1452 m = malloc(sizeof(*m), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
1453 if (m == NULL((void *)0))
1454 return;
1455 if (l->dc_sym_code == DC_SYM_CODE_100BT0x03)
1456 m->dc_media = IFM_100_TX6;
1457
1458 if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX0x05)
1459 m->dc_media = IFM_100_TX6|IFM_FDX0x0000010000000000ULL;
1460
1461 m->dc_gp_len = 2;
1462 m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl;
1463
1464 m->dc_nextdc_ptr2 = sc->dc_mi;
1465 sc->dc_mi = m;
1466
1467 sc->dc_pmode = DC_PMODE_SYM0x2;
1468}
1469
1470void
1471dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l)
1472{
1473 u_int8_t *p;
1474 struct dc_mediainfo *m;
1475
1476 m = malloc(sizeof(*m), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
1477 if (m == NULL((void *)0))
1478 return;
1479 /* We abuse IFM_AUTO to represent MII. */
1480 m->dc_media = IFM_AUTO0ULL;
1481 m->dc_gp_len = l->dc_gpr_len;
1482
1483 p = (u_int8_t *)l;
1484 p += sizeof(struct dc_eblock_mii);
1485 m->dc_gp_ptr = p;
1486 p += 2 * l->dc_gpr_len;
1487 m->dc_reset_len = *p;
1488 p++;
1489 m->dc_reset_ptr = p;
1490
1491 m->dc_nextdc_ptr2 = sc->dc_mi;
1492 sc->dc_mi = m;
1493}
1494
1495void
1496dc_read_srom(struct dc_softc *sc, int bits)
1497{
1498 sc->dc_sromsize = 2 << bits;
1499 sc->dc_srom = malloc(sc->dc_sromsize, M_DEVBUF2, M_NOWAIT0x0002);
1500 if (sc->dc_srom == NULL((void *)0))
1501 return;
1502 dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (sc->dc_sromsize / 2), 0);
1503}
1504
1505void
1506dc_parse_21143_srom(struct dc_softc *sc)
1507{
1508 struct dc_leaf_hdr *lhdr;
1509 struct dc_eblock_hdr *hdr;
1510 int have_mii, i, loff;
1511 char *ptr;
1512
1513 have_mii = 0;
1514 loff = sc->dc_srom[27];
1515 lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]);
1516
1517 ptr = (char *)lhdr;
1518 ptr += sizeof(struct dc_leaf_hdr) - 1;
1519 /*
1520 * Look if we got a MII media block.
1521 */
1522 for (i = 0; i < lhdr->dc_mcnt; i++) {
1523 hdr = (struct dc_eblock_hdr *)ptr;
1524 if (hdr->dc_type == DC_EBLOCK_MII0x0003)
1525 have_mii++;
1526
1527 ptr += (hdr->dc_len & 0x7F);
1528 ptr++;
1529 }
1530
1531 /*
1532 * Do the same thing again. Only use SIA and SYM media
1533 * blocks if no MII media block is available.
1534 */
1535 ptr = (char *)lhdr;
1536 ptr += sizeof(struct dc_leaf_hdr) - 1;
1537 for (i = 0; i < lhdr->dc_mcnt; i++) {
1538 hdr = (struct dc_eblock_hdr *)ptr;
1539 switch(hdr->dc_type) {
1540 case DC_EBLOCK_MII0x0003:
1541 dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr);
1542 break;
1543 case DC_EBLOCK_SIA0x0002:
1544 if (! have_mii)
1545 dc_decode_leaf_sia(sc,
1546 (struct dc_eblock_sia *)hdr);
1547 break;
1548 case DC_EBLOCK_SYM0x0004:
1549 if (! have_mii)
1550 dc_decode_leaf_sym(sc,
1551 (struct dc_eblock_sym *)hdr);
1552 break;
1553 default:
1554 /* Don't care. Yet. */
1555 break;
1556 }
1557 ptr += (hdr->dc_len & 0x7F);
1558 ptr++;
1559 }
1560}
1561
1562/*
1563 * Attach the interface. Allocate softc structures, do ifmedia
1564 * setup and ethernet/BPF attach.
1565 */
1566void
1567dc_attach(struct dc_softc *sc)
1568{
1569 struct ifnet *ifp;
1570 int mac_offset, tmp, i;
1571 u_int32_t reg;
1572
1573 /*
1574 * Get station address from the EEPROM.
1575 */
1576 if (sc->sc_hasmac)
1577 goto hasmac;
1578
1579 switch(sc->dc_type) {
1580 case DC_TYPE_987130x1:
1581 case DC_TYPE_98713A0x2:
1582 case DC_TYPE_987x50x3:
1583 case DC_TYPE_PNICII0x9:
1584 dc_read_eeprom(sc, (caddr_t)&mac_offset,
1585 (DC_EE_NODEADDR_OFFSET0x70 / 2), 1, 0);
1586 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1587 (mac_offset / 2), 3, 0);
1588 break;
1589 case DC_TYPE_PNIC0xA:
1590 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 0, 3, 1);
1591 break;
1592 case DC_TYPE_DM91020x8:
1593 case DC_TYPE_211430x4:
1594 case DC_TYPE_211450xD:
1595 case DC_TYPE_ASIX0x5:
1596 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1597 DC_EE_NODEADDR10, 3, 0);
1598 break;
1599 case DC_TYPE_AL9810x6:
1600 case DC_TYPE_AN9830x7:
1601 reg = CSR_READ_4(sc, DC_AL_PAR0)((sc->dc_btag)->read_4((sc->dc_bhandle), (0xA4)));
1602 sc->sc_arpcom.ac_enaddr[0] = (reg & 0xff);
1603 sc->sc_arpcom.ac_enaddr[1] = (reg >> 8) & 0xff;
1604 sc->sc_arpcom.ac_enaddr[2] = (reg >> 16) & 0xff;
1605 sc->sc_arpcom.ac_enaddr[3] = (reg >> 24) & 0xff;
1606 reg = CSR_READ_4(sc, DC_AL_PAR1)((sc->dc_btag)->read_4((sc->dc_bhandle), (0xA8)));
1607 sc->sc_arpcom.ac_enaddr[4] = (reg & 0xff);
1608 sc->sc_arpcom.ac_enaddr[5] = (reg >> 8) & 0xff;
1609 break;
1610 case DC_TYPE_CONEXANT0xC:
1611 bcopy(&sc->dc_srom + DC_CONEXANT_EE_NODEADDR0x19A,
1612 &sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN6);
1613 break;
1614 case DC_TYPE_XIRCOM0xB:
1615 /* Some newer units have the MAC at offset 8 */
1616 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 8, 3, 0);
1617
1618 if (sc->sc_arpcom.ac_enaddr[0] == 0x00 &&
1619 sc->sc_arpcom.ac_enaddr[1] == 0x10 &&
1620 sc->sc_arpcom.ac_enaddr[2] == 0xa4)
1621 break;
1622 if (sc->sc_arpcom.ac_enaddr[0] == 0x00 &&
1623 sc->sc_arpcom.ac_enaddr[1] == 0x80 &&
1624 sc->sc_arpcom.ac_enaddr[2] == 0xc7)
1625 break;
1626 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 3, 3, 0);
1627 break;
1628 default:
1629 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1630 DC_EE_NODEADDR10, 3, 0);
1631 break;
1632 }
1633hasmac:
1634
1635 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct dc_list_data),(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sizeof
(struct dc_list_data)), ((1 << 12)), (0), (sc->sc_listseg
), (1), (&sc->sc_listnseg), (0x0001 | 0x1000))
1636 PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sizeof
(struct dc_list_data)), ((1 << 12)), (0), (sc->sc_listseg
), (1), (&sc->sc_listnseg), (0x0001 | 0x1000))
1637 BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sizeof
(struct dc_list_data)), ((1 << 12)), (0), (sc->sc_listseg
), (1), (&sc->sc_listnseg), (0x0001 | 0x1000))
!= 0) {
1638 printf(": can't alloc list mem\n");
1639 goto fail;
1640 }
1641 if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (sc->
sc_listseg), (sc->sc_listnseg), (sizeof(struct dc_list_data
)), (&sc->sc_listkva), (0x0001))
1642 sizeof(struct dc_list_data), &sc->sc_listkva,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (sc->
sc_listseg), (sc->sc_listnseg), (sizeof(struct dc_list_data
)), (&sc->sc_listkva), (0x0001))
1643 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (sc->
sc_listseg), (sc->sc_listnseg), (sizeof(struct dc_list_data
)), (&sc->sc_listkva), (0x0001))
!= 0) {
1644 printf(": can't map list mem\n");
1645 goto fail;
1646 }
1647 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct dc_list_data), 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sizeof
(struct dc_list_data)), (1), (sizeof(struct dc_list_data)), (
0), (0x0001), (&sc->sc_listmap))
1648 sizeof(struct dc_list_data), 0, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sizeof
(struct dc_list_data)), (1), (sizeof(struct dc_list_data)), (
0), (0x0001), (&sc->sc_listmap))
1649 &sc->sc_listmap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sizeof
(struct dc_list_data)), (1), (sizeof(struct dc_list_data)), (
0), (0x0001), (&sc->sc_listmap))
!= 0) {
1650 printf(": can't alloc list map\n");
1651 goto fail;
1652 }
1653 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_listmap), (sc->sc_listkva), (sizeof(struct dc_list_data
)), (((void *)0)), (0x0001))
1654 sizeof(struct dc_list_data), NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_listmap), (sc->sc_listkva), (sizeof(struct dc_list_data
)), (((void *)0)), (0x0001))
!= 0) {
1655 printf(": can't load list map\n");
1656 goto fail;
1657 }
1658 sc->dc_ldata = (struct dc_list_data *)sc->sc_listkva;
1659
1660 for (i = 0; i < DC_RX_LIST_CNT64; i++) {
1661 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0001), (&sc->dc_cdata
.dc_rx_chain[i].sd_map))
1662 0, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0001), (&sc->dc_cdata
.dc_rx_chain[i].sd_map))
1663 &sc->dc_cdata.dc_rx_chain[i].sd_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0001), (&sc->dc_cdata
.dc_rx_chain[i].sd_map))
!= 0) {
1664 printf(": can't create rx map\n");
1665 return;
1666 }
1667 }
1668 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0001), (&sc->sc_rx_sparemap
))
1669 BUS_DMA_NOWAIT, &sc->sc_rx_sparemap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0001), (&sc->sc_rx_sparemap
))
!= 0) {
1670 printf(": can't create rx spare map\n");
1671 return;
1672 }
1673
1674 for (i = 0; i < DC_TX_LIST_CNT256; i++) {
1675 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), ((sc->dc_flags & 0x00000002) ? 1 : 256 - 5), ((
1 << 11)), (0), (0x0001), (&sc->dc_cdata.dc_tx_chain
[i].sd_map))
1676 (sc->dc_flags & DC_TX_COALESCE) ? 1 : DC_TX_LIST_CNT - 5,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), ((sc->dc_flags & 0x00000002) ? 1 : 256 - 5), ((
1 << 11)), (0), (0x0001), (&sc->dc_cdata.dc_tx_chain
[i].sd_map))
1677 MCLBYTES, 0, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), ((sc->dc_flags & 0x00000002) ? 1 : 256 - 5), ((
1 << 11)), (0), (0x0001), (&sc->dc_cdata.dc_tx_chain
[i].sd_map))
1678 &sc->dc_cdata.dc_tx_chain[i].sd_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), ((sc->dc_flags & 0x00000002) ? 1 : 256 - 5), ((
1 << 11)), (0), (0x0001), (&sc->dc_cdata.dc_tx_chain
[i].sd_map))
!= 0) {
1679 printf(": can't create tx map\n");
1680 return;
1681 }
1682 }
1683 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), ((sc->dc_flags & 0x00000002) ? 1 : 256 - 5), ((
1 << 11)), (0), (0x0001), (&sc->sc_tx_sparemap))
1684 (sc->dc_flags & DC_TX_COALESCE) ? 1 : DC_TX_LIST_CNT - 5,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), ((sc->dc_flags & 0x00000002) ? 1 : 256 - 5), ((
1 << 11)), (0), (0x0001), (&sc->sc_tx_sparemap))
1685 MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), ((sc->dc_flags & 0x00000002) ? 1 : 256 - 5), ((
1 << 11)), (0), (0x0001), (&sc->sc_tx_sparemap))
!= 0) {
1686 printf(": can't create tx spare map\n");
1687 return;
1688 }
1689
1690 /*
1691 * A 21143 or clone chip was detected. Inform the world.
1692 */
1693 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
1694
1695 ifp = &sc->sc_arpcom.ac_if;
1696 ifp->if_softc = sc;
1697 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
1698 ifp->if_ioctl = dc_ioctl;
1699 ifp->if_start = dc_start;
1700 ifp->if_watchdog = dc_watchdog;
1701 ifq_set_maxlen(&ifp->if_snd, DC_TX_LIST_CNT - 1)((&ifp->if_snd)->ifq_maxlen = (256 - 1));
1702 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ16);
1703
1704 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010;
1705
1706 /* Do MII setup. If this is a 21143, check for a PHY on the
1707 * MII bus after applying any necessary fixups to twiddle the
1708 * GPIO bits. If we don't end up finding a PHY, restore the
1709 * old selection (SIA only or SIA/SYM) and attach the dcphy
1710 * driver instead.
1711 */
1712 if (DC_IS_INTEL(sc)(sc->dc_type == 0x4 || sc->dc_type == 0xD)) {
1713 dc_apply_fixup(sc, IFM_AUTO0ULL);
1714 tmp = sc->dc_pmode;
1715 sc->dc_pmode = DC_PMODE_MII0x1;
1716 }
1717
1718 /*
1719 * Setup General Purpose port mode and data so the tulip can talk
1720 * to the MII. This needs to be done before mii_attach so that
1721 * we can actually see them.
1722 */
1723 if (DC_IS_XIRCOM(sc)(sc->dc_type == 0xB)) {
1724 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |((sc->dc_btag)->write_4((sc->dc_bhandle), (0x78), (0x08000000
| 0x02000000 | 0x00040000 | 0x00010000)))
1725 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x78), (0x08000000
| 0x02000000 | 0x00040000 | 0x00010000)))
;
1726 DELAY(10)(*delay_func)(10);
1727 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |((sc->dc_btag)->write_4((sc->dc_bhandle), (0x78), (0x02000000
| 0x00040000 | 0x00010000)))
1728 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x78), (0x02000000
| 0x00040000 | 0x00010000)))
;
1729 DELAY(10)(*delay_func)(10);
1730 }
1731
1732 sc->sc_mii.mii_ifp = ifp;
1733 sc->sc_mii.mii_readreg = dc_miibus_readreg;
1734 sc->sc_mii.mii_writereg = dc_miibus_writereg;
1735 sc->sc_mii.mii_statchg = dc_miibus_statchg;
1736 ifmedia_init(&sc->sc_mii.mii_media, 0, dc_ifmedia_upd, dc_ifmedia_sts);
1737 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY-1,
1738 MII_OFFSET_ANY-1, 0);
1739
1740 if (DC_IS_INTEL(sc)(sc->dc_type == 0x4 || sc->dc_type == 0xD)) {
1741 if (LIST_EMPTY(&sc->sc_mii.mii_phys)(((&sc->sc_mii.mii_phys)->lh_first) == ((void *)0))) {
1742 sc->dc_pmode = tmp;
1743 if (sc->dc_pmode != DC_PMODE_SIA0x3)
1744 sc->dc_pmode = DC_PMODE_SYM0x2;
1745 sc->dc_flags |= DC_21143_NWAY0x00000800;
1746 if (sc->dc_flags & DC_MOMENCO_BOTCH0x00020000)
1747 sc->dc_pmode = DC_PMODE_MII0x1;
1748 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff,
1749 MII_PHY_ANY-1, MII_OFFSET_ANY-1, 0);
1750 } else {
1751 /* we have a PHY, so we must clear this bit */
1752 sc->dc_flags &= ~DC_TULIP_LEDS0x00004000;
1753 }
1754 }
1755
1756 if (LIST_EMPTY(&sc->sc_mii.mii_phys)(((&sc->sc_mii.mii_phys)->lh_first) == ((void *)0))) {
1757 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_NONE2ULL, 0, NULL((void *)0));
1758 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_NONE2ULL);
1759 printf("%s: MII without any PHY!\n", sc->sc_dev.dv_xname);
1760 } else if (sc->dc_type == DC_TYPE_211450xD) {
1761 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_10_T3);
1762 } else
1763 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL);
1764
1765 if (DC_IS_DAVICOM(sc)(sc->dc_type == 0x8) && sc->dc_revision >= DC_REVISION_DM9102A0x30)
1766 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_HPNA_117,0,NULL((void *)0));
1767
1768 if (DC_IS_ADMTEK(sc)(sc->dc_type == 0x6 || sc->dc_type == 0x7)) {
1769 /*
1770 * Set automatic TX underrun recovery for the ADMtek chips
1771 */
1772 DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x88), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x88))) | (
0x00000001))))
;
1773 }
1774
1775 /*
1776 * Call MI attach routines.
1777 */
1778 if_attach(ifp);
1779 ether_ifattach(ifp);
1780
1781fail:
1782 return;
1783}
1784
1785/*
1786 * Initialize the transmit descriptors.
1787 */
1788int
1789dc_list_tx_init(struct dc_softc *sc)
1790{
1791 struct dc_chain_data *cd;
1792 struct dc_list_data *ld;
1793 int i;
1794 bus_addr_t next;
1795
1796 cd = &sc->dc_cdata;
1797 ld = sc->dc_ldata;
1798 for (i = 0; i < DC_TX_LIST_CNT256; i++) {
1799 next = sc->sc_listmap->dm_segs[0].ds_addr;
1800 if (i == (DC_TX_LIST_CNT256 - 1))
1801 next +=
1802 offsetof(struct dc_list_data, dc_tx_list[0])__builtin_offsetof(struct dc_list_data, dc_tx_list[0]);
1803 else
1804 next +=
1805 offsetof(struct dc_list_data, dc_tx_list[i + 1])__builtin_offsetof(struct dc_list_data, dc_tx_list[i + 1]);
1806 cd->dc_tx_chain[i].sd_mbuf = NULL((void *)0);
1807 ld->dc_tx_list[i].dc_datadc_ptr1 = htole32(0)((__uint32_t)(0));
1808 ld->dc_tx_list[i].dc_ctl = htole32(0)((__uint32_t)(0));
1809 ld->dc_tx_list[i].dc_nextdc_ptr2 = htole32(next)((__uint32_t)(next));
1810 }
1811
1812 cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0;
1813
1814 return (0);
1815}
1816
1817
1818/*
1819 * Initialize the RX descriptors and allocate mbufs for them. Note that
1820 * we arrange the descriptors in a closed ring, so that the last descriptor
1821 * points back to the first.
1822 */
1823int
1824dc_list_rx_init(struct dc_softc *sc)
1825{
1826 struct dc_chain_data *cd;
1827 struct dc_list_data *ld;
1828 int i;
1829 bus_addr_t next;
1830
1831 cd = &sc->dc_cdata;
1832 ld = sc->dc_ldata;
1833
1834 for (i = 0; i < DC_RX_LIST_CNT64; i++) {
1835 if (dc_newbuf(sc, i, NULL((void *)0)) == ENOBUFS55)
1836 return (ENOBUFS55);
1837 next = sc->sc_listmap->dm_segs[0].ds_addr;
1838 if (i == (DC_RX_LIST_CNT64 - 1))
1839 next +=
1840 offsetof(struct dc_list_data, dc_rx_list[0])__builtin_offsetof(struct dc_list_data, dc_rx_list[0]);
1841 else
1842 next +=
1843 offsetof(struct dc_list_data, dc_rx_list[i + 1])__builtin_offsetof(struct dc_list_data, dc_rx_list[i + 1]);
1844 ld->dc_rx_list[i].dc_nextdc_ptr2 = htole32(next)((__uint32_t)(next));
1845 }
1846
1847 cd->dc_rx_prod = 0;
1848
1849 return (0);
1850}
1851
1852/*
1853 * Initialize an RX descriptor and attach an MBUF cluster.
1854 */
1855int
1856dc_newbuf(struct dc_softc *sc, int i, struct mbuf *m)
1857{
1858 struct mbuf *m_new = NULL((void *)0);
1859 struct dc_desc *c;
1860 bus_dmamap_t map;
1861
1862 c = &sc->dc_ldata->dc_rx_list[i];
1863
1864 if (m == NULL((void *)0)) {
1865 MGETHDR(m_new, M_DONTWAIT, MT_DATA)m_new = m_gethdr((0x0002), (1));
1866 if (m_new == NULL((void *)0))
1867 return (ENOBUFS55);
1868
1869 MCLGET(m_new, M_DONTWAIT)(void) m_clget((m_new), (0x0002), (1 << 11));
1870 if (!(m_new->m_flagsm_hdr.mh_flags & M_EXT0x0001)) {
1871 m_freem(m_new);
1872 return (ENOBUFS55);
1873 }
1874 m_new->m_lenm_hdr.mh_len = m_new->m_pkthdrM_dat.MH.MH_pkthdr.len = MCLBYTES(1 << 11);
1875 if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rx_sparemap,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
sc->sc_rx_sparemap), (m_new), (0x0001))
1876 m_new, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
sc->sc_rx_sparemap), (m_new), (0x0001))
!= 0) {
1877 m_freem(m_new);
1878 return (ENOBUFS55);
1879 }
1880 map = sc->dc_cdata.dc_rx_chain[i].sd_map;
1881 sc->dc_cdata.dc_rx_chain[i].sd_map = sc->sc_rx_sparemap;
1882 sc->sc_rx_sparemap = map;
1883 } else {
1884 /*
1885 * We're re-using a previously allocated mbuf;
1886 * be sure to re-init pointers and lengths to
1887 * default values.
1888 */
1889 m_new = m;
1890 m_new->m_lenm_hdr.mh_len = m_new->m_pkthdrM_dat.MH.MH_pkthdr.len = MCLBYTES(1 << 11);
1891 m_new->m_datam_hdr.mh_data = m_new->m_extM_dat.MH.MH_dat.MH_ext.ext_buf;
1892 }
1893
1894 m_adj(m_new, sizeof(u_int64_t));
1895
1896 /*
1897 * If this is a PNIC chip, zero the buffer. This is part
1898 * of the workaround for the receive bug in the 82c168 and
1899 * 82c169 chips.
1900 */
1901 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR0x00000040)
1902 bzero(mtod(m_new, char *), m_new->m_len)__builtin_bzero((((char *)((m_new)->m_hdr.mh_data))), (m_new
->m_hdr.mh_len))
;
1903
1904 bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
dc_cdata.dc_rx_chain[i].sd_map), (0), (sc->dc_cdata.dc_rx_chain
[i].sd_map->dm_mapsize), (0x01))
1905 sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
dc_cdata.dc_rx_chain[i].sd_map), (0), (sc->dc_cdata.dc_rx_chain
[i].sd_map->dm_mapsize), (0x01))
1906 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
dc_cdata.dc_rx_chain[i].sd_map), (0), (sc->dc_cdata.dc_rx_chain
[i].sd_map->dm_mapsize), (0x01))
;
1907
1908 sc->dc_cdata.dc_rx_chain[i].sd_mbuf = m_new;
1909 c->dc_datadc_ptr1 = htole32(((__uint32_t)(sc->dc_cdata.dc_rx_chain[i].sd_map->dm_segs
[0].ds_addr + sizeof(u_int64_t)))
1910 sc->dc_cdata.dc_rx_chain[i].sd_map->dm_segs[0].ds_addr +((__uint32_t)(sc->dc_cdata.dc_rx_chain[i].sd_map->dm_segs
[0].ds_addr + sizeof(u_int64_t)))
1911 sizeof(u_int64_t))((__uint32_t)(sc->dc_cdata.dc_rx_chain[i].sd_map->dm_segs
[0].ds_addr + sizeof(u_int64_t)))
;
1912 c->dc_ctl = htole32(DC_RXCTL_RLINK | ETHER_MAX_DIX_LEN)((__uint32_t)(0x01000000 | 1536));
1913 c->dc_status = htole32(DC_RXSTAT_OWN)((__uint32_t)(0x80000000));
1914
1915 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_rx_list
[i])), (sizeof(struct dc_desc)), (0x01 | 0x04))
1916 offsetof(struct dc_list_data, dc_rx_list[i]),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_rx_list
[i])), (sizeof(struct dc_desc)), (0x01 | 0x04))
1917 sizeof(struct dc_desc),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_rx_list
[i])), (sizeof(struct dc_desc)), (0x01 | 0x04))
1918 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_rx_list
[i])), (sizeof(struct dc_desc)), (0x01 | 0x04))
;
1919
1920 return (0);
1921}
1922
1923/*
1924 * Grrrrr.
1925 * The PNIC chip has a terrible bug in it that manifests itself during
1926 * periods of heavy activity. The exact mode of failure if difficult to
1927 * pinpoint: sometimes it only happens in promiscuous mode, sometimes it
1928 * will happen on slow machines. The bug is that sometimes instead of
1929 * uploading one complete frame during reception, it uploads what looks
1930 * like the entire contents of its FIFO memory. The frame we want is at
1931 * the end of the whole mess, but we never know exactly how much data has
1932 * been uploaded, so salvaging the frame is hard.
1933 *
1934 * There is only one way to do it reliably, and it's disgusting.
1935 * Here's what we know:
1936 *
1937 * - We know there will always be somewhere between one and three extra
1938 * descriptors uploaded.
1939 *
1940 * - We know the desired received frame will always be at the end of the
1941 * total data upload.
1942 *
1943 * - We know the size of the desired received frame because it will be
1944 * provided in the length field of the status word in the last descriptor.
1945 *
1946 * Here's what we do:
1947 *
1948 * - When we allocate buffers for the receive ring, we bzero() them.
1949 * This means that we know that the buffer contents should be all
1950 * zeros, except for data uploaded by the chip.
1951 *
1952 * - We also force the PNIC chip to upload frames that include the
1953 * ethernet CRC at the end.
1954 *
1955 * - We gather all of the bogus frame data into a single buffer.
1956 *
1957 * - We then position a pointer at the end of this buffer and scan
1958 * backwards until we encounter the first non-zero byte of data.
1959 * This is the end of the received frame. We know we will encounter
1960 * some data at the end of the frame because the CRC will always be
1961 * there, so even if the sender transmits a packet of all zeros,
1962 * we won't be fooled.
1963 *
1964 * - We know the size of the actual received frame, so we subtract
1965 * that value from the current pointer location. This brings us
1966 * to the start of the actual received packet.
1967 *
1968 * - We copy this into an mbuf and pass it on, along with the actual
1969 * frame length.
1970 *
1971 * The performance hit is tremendous, but it beats dropping frames all
1972 * the time.
1973 */
1974
1975#define DC_WHOLEFRAME(0x00000200|0x00000100) (DC_RXSTAT_FIRSTFRAG0x00000200|DC_RXSTAT_LASTFRAG0x00000100)
1976void
1977dc_pnic_rx_bug_war(struct dc_softc *sc, int idx)
1978{
1979 struct dc_desc *cur_rx;
1980 struct dc_desc *c = NULL((void *)0);
1981 struct mbuf *m = NULL((void *)0);
1982 unsigned char *ptr;
1983 int i, total_len;
1984 u_int32_t rxstat = 0;
1985
1986 i = sc->dc_pnic_rx_bug_save;
1987 cur_rx = &sc->dc_ldata->dc_rx_list[idx];
1988 ptr = sc->dc_pnic_rx_buf;
1989 bzero(ptr, ETHER_MAX_DIX_LEN * 5)__builtin_bzero((ptr), (1536 * 5));
1990
1991 /* Copy all the bytes from the bogus buffers. */
1992 while (1) {
1993 c = &sc->dc_ldata->dc_rx_list[i];
1994 rxstat = letoh32(c->dc_status)((__uint32_t)(c->dc_status));
1995 m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
1996 bcopy(mtod(m, char *)((char *)((m)->m_hdr.mh_data)), ptr, ETHER_MAX_DIX_LEN1536);
1997 ptr += ETHER_MAX_DIX_LEN1536;
1998 /* If this is the last buffer, break out. */
1999 if (i == idx || rxstat & DC_RXSTAT_LASTFRAG0x00000100)
2000 break;
2001 dc_newbuf(sc, i, m);
2002 DC_INC(i, DC_RX_LIST_CNT)(i) = (i + 1) % 64;
2003 }
2004
2005 /* Find the length of the actual receive frame. */
2006 total_len = DC_RXBYTES(rxstat)((rxstat & 0x3FFF0000) >> 16);
2007
2008 /* Scan backwards until we hit a non-zero byte. */
2009 while(*ptr == 0x00)
2010 ptr--;
2011
2012 /* Round off. */
2013 if ((unsigned long)(ptr) & 0x3)
2014 ptr -= 1;
2015
2016 /* Now find the start of the frame. */
2017 ptr -= total_len;
2018 if (ptr < sc->dc_pnic_rx_buf)
2019 ptr = sc->dc_pnic_rx_buf;
2020
2021 /*
2022 * Now copy the salvaged frame to the last mbuf and fake up
2023 * the status word to make it look like a successful
2024 * frame reception.
2025 */
2026 dc_newbuf(sc, i, m);
2027 bcopy(ptr, mtod(m, char *)((char *)((m)->m_hdr.mh_data)), total_len);
2028 cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG)((__uint32_t)(rxstat | 0x00000200));
2029}
2030
2031/*
2032 * This routine searches the RX ring for dirty descriptors in the
2033 * event that the rxeof routine falls out of sync with the chip's
2034 * current descriptor pointer. This may happen sometimes as a result
2035 * of a "no RX buffer available" condition that happens when the chip
2036 * consumes all of the RX buffers before the driver has a chance to
2037 * process the RX ring. This routine may need to be called more than
2038 * once to bring the driver back in sync with the chip, however we
2039 * should still be getting RX DONE interrupts to drive the search
2040 * for new packets in the RX ring, so we should catch up eventually.
2041 */
2042int
2043dc_rx_resync(struct dc_softc *sc)
2044{
2045 u_int32_t stat;
2046 int i, pos, offset;
2047
2048 pos = sc->dc_cdata.dc_rx_prod;
2049
2050 for (i = 0; i < DC_RX_LIST_CNT64; i++) {
2051
2052 offset = offsetof(struct dc_list_data, dc_rx_list[pos])__builtin_offsetof(struct dc_list_data, dc_rx_list[pos]);
2053 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (offset), (sizeof(struct dc_desc)), (0x02 | 0x08
))
2054 offset, sizeof(struct dc_desc),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (offset), (sizeof(struct dc_desc)), (0x02 | 0x08
))
2055 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (offset), (sizeof(struct dc_desc)), (0x02 | 0x08
))
;
2056
2057 stat = sc->dc_ldata->dc_rx_list[pos].dc_status;
2058 if (!(stat & htole32(DC_RXSTAT_OWN)((__uint32_t)(0x80000000))))
2059 break;
2060 DC_INC(pos, DC_RX_LIST_CNT)(pos) = (pos + 1) % 64;
2061 }
2062
2063 /* If the ring really is empty, then just return. */
2064 if (i == DC_RX_LIST_CNT64)
2065 return (0);
2066
2067 /* We've fallen behind the chip: catch it. */
2068 sc->dc_cdata.dc_rx_prod = pos;
2069
2070 return (EAGAIN35);
2071}
2072
2073/*
2074 * A frame has been uploaded: pass the resulting mbuf chain up to
2075 * the higher level protocols.
2076 */
2077int
2078dc_rxeof(struct dc_softc *sc)
2079{
2080 struct mbuf *m;
2081 struct ifnet *ifp;
2082 struct dc_desc *cur_rx;
2083 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
2084 int i, offset, total_len = 0, consumed = 0;
2085 u_int32_t rxstat;
2086
2087 ifp = &sc->sc_arpcom.ac_if;
2088 i = sc->dc_cdata.dc_rx_prod;
2089
2090 for(;;) {
2091 struct mbuf *m0 = NULL((void *)0);
2092
2093 offset = offsetof(struct dc_list_data, dc_rx_list[i])__builtin_offsetof(struct dc_list_data, dc_rx_list[i]);
2094 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (offset), (sizeof(struct dc_desc)), (0x02 | 0x08
))
2095 offset, sizeof(struct dc_desc),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (offset), (sizeof(struct dc_desc)), (0x02 | 0x08
))
2096 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (offset), (sizeof(struct dc_desc)), (0x02 | 0x08
))
;
2097
2098 cur_rx = &sc->dc_ldata->dc_rx_list[i];
2099 rxstat = letoh32(cur_rx->dc_status)((__uint32_t)(cur_rx->dc_status));
2100 if (rxstat & DC_RXSTAT_OWN0x80000000)
2101 break;
2102
2103 m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
2104 total_len = DC_RXBYTES(rxstat)((rxstat & 0x3FFF0000) >> 16);
2105
2106 bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
dc_cdata.dc_rx_chain[i].sd_map), (0), (sc->dc_cdata.dc_rx_chain
[i].sd_map->dm_mapsize), (0x02))
2107 0, sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
dc_cdata.dc_rx_chain[i].sd_map), (0), (sc->dc_cdata.dc_rx_chain
[i].sd_map->dm_mapsize), (0x02))
2108 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
dc_cdata.dc_rx_chain[i].sd_map), (0), (sc->dc_cdata.dc_rx_chain
[i].sd_map->dm_mapsize), (0x02))
;
2109
2110 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR0x00000040) {
2111 if ((rxstat & DC_WHOLEFRAME(0x00000200|0x00000100)) != DC_WHOLEFRAME(0x00000200|0x00000100)) {
2112 if (rxstat & DC_RXSTAT_FIRSTFRAG0x00000200)
2113 sc->dc_pnic_rx_bug_save = i;
2114 if ((rxstat & DC_RXSTAT_LASTFRAG0x00000100) == 0) {
2115 DC_INC(i, DC_RX_LIST_CNT)(i) = (i + 1) % 64;
2116 continue;
2117 }
2118 dc_pnic_rx_bug_war(sc, i);
2119 rxstat = letoh32(cur_rx->dc_status)((__uint32_t)(cur_rx->dc_status));
2120 total_len = DC_RXBYTES(rxstat)((rxstat & 0x3FFF0000) >> 16);
2121 }
2122 }
2123
2124 sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL((void *)0);
2125
2126 /*
2127 * If an error occurs, update stats, clear the
2128 * status word and leave the mbuf cluster in place:
2129 * it should simply get re-used next time this descriptor
2130 * comes up in the ring. However, don't report long
2131 * frames as errors since they could be VLANs.
2132 */
2133 if ((rxstat & DC_RXSTAT_RXERR0x00008000)) {
2134 if (!(rxstat & DC_RXSTAT_GIANT0x00000080) ||
2135 (rxstat & (DC_RXSTAT_CRCERR0x00000002 | DC_RXSTAT_DRIBBLE0x00000004 |
2136 DC_RXSTAT_MIIERE0x00000008 | DC_RXSTAT_COLLSEEN0x00000040 |
2137 DC_RXSTAT_RUNT0x00000800 | DC_RXSTAT_DE0x00004000))) {
2138 ifp->if_ierrorsif_data.ifi_ierrors++;
2139 if (rxstat & DC_RXSTAT_COLLSEEN0x00000040)
2140 ifp->if_collisionsif_data.ifi_collisions++;
2141 dc_newbuf(sc, i, m);
2142 if (rxstat & DC_RXSTAT_CRCERR0x00000002) {
2143 DC_INC(i, DC_RX_LIST_CNT)(i) = (i + 1) % 64;
2144 continue;
2145 } else {
2146 dc_init(sc);
2147 break;
2148 }
2149 }
2150 }
2151
2152 /* No errors; receive the packet. */
2153 total_len -= ETHER_CRC_LEN4;
2154
2155 m0 = m_devget(mtod(m, char *)((char *)((m)->m_hdr.mh_data)), total_len, ETHER_ALIGN2);
2156 dc_newbuf(sc, i, m);
2157 DC_INC(i, DC_RX_LIST_CNT)(i) = (i + 1) % 64;
2158 if (m0 == NULL((void *)0)) {
2159 ifp->if_ierrorsif_data.ifi_ierrors++;
2160 continue;
2161 }
2162 m = m0;
2163
2164 consumed++;
2165 ml_enqueue(&ml, m);
2166 }
2167
2168 sc->dc_cdata.dc_rx_prod = i;
2169
2170 if_input(ifp, &ml);
2171
2172 return (consumed);
2173}
2174
2175/*
2176 * A frame was downloaded to the chip. It's safe for us to clean up
2177 * the list buffers.
2178 */
2179
2180void
2181dc_txeof(struct dc_softc *sc)
2182{
2183 struct dc_desc *cur_tx = NULL((void *)0);
2184 struct ifnet *ifp;
2185 int idx, offset;
2186
2187 ifp = &sc->sc_arpcom.ac_if;
2188
2189 /*
2190 * Go through our tx list and free mbufs for those
2191 * frames that have been transmitted.
2192 */
2193 idx = sc->dc_cdata.dc_tx_cons;
2194 while(idx != sc->dc_cdata.dc_tx_prod) {
2195 u_int32_t txstat;
2196
2197 offset = offsetof(struct dc_list_data, dc_tx_list[idx])__builtin_offsetof(struct dc_list_data, dc_tx_list[idx]);
2198 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (offset), (sizeof(struct dc_desc)), (0x02 | 0x08
))
2199 offset, sizeof(struct dc_desc),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (offset), (sizeof(struct dc_desc)), (0x02 | 0x08
))
2200 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (offset), (sizeof(struct dc_desc)), (0x02 | 0x08
))
;
2201
2202 cur_tx = &sc->dc_ldata->dc_tx_list[idx];
2203 txstat = letoh32(cur_tx->dc_status)((__uint32_t)(cur_tx->dc_status));
2204
2205 if (txstat & DC_TXSTAT_OWN0x80000000)
2206 break;
2207
2208 if (!(cur_tx->dc_ctl & htole32(DC_TXCTL_LASTFRAG)((__uint32_t)(0x40000000))) ||
2209 cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)((__uint32_t)(0x08000000))) {
2210 if (cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)((__uint32_t)(0x08000000))) {
2211 /*
2212 * Yes, the PNIC is so brain damaged
2213 * that it will sometimes generate a TX
2214 * underrun error while DMAing the RX
2215 * filter setup frame. If we detect this,
2216 * we have to send the setup frame again,
2217 * or else the filter won't be programmed
2218 * correctly.
2219 */
2220 if (DC_IS_PNIC(sc)(sc->dc_type == 0xA)) {
2221 if (txstat & DC_TXSTAT_ERRSUM0x00008000)
2222 dc_setfilt(sc);
2223 }
2224 sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL((void *)0);
2225 }
2226 sc->dc_cdata.dc_tx_cnt--;
2227 DC_INC(idx, DC_TX_LIST_CNT)(idx) = (idx + 1) % 256;
2228 continue;
2229 }
2230
2231 if (DC_IS_XIRCOM(sc)(sc->dc_type == 0xB) || DC_IS_CONEXANT(sc)(sc->dc_type == 0xC)) {
2232 /*
2233 * XXX: Why does my Xircom taunt me so?
2234 * For some reason it likes setting the CARRLOST flag
2235 * even when the carrier is there. wtf?!
2236 * Who knows, but Conexant chips have the
2237 * same problem. Maybe they took lessons
2238 * from Xircom.
2239 */
2240 if (/*sc->dc_type == DC_TYPE_21143 &&*/
2241 sc->dc_pmode == DC_PMODE_MII0x1 &&
2242 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM0x00008000|
2243 DC_TXSTAT_NOCARRIER0x00000400)))
2244 txstat &= ~DC_TXSTAT_ERRSUM0x00008000;
2245 } else {
2246 if (/*sc->dc_type == DC_TYPE_21143 &&*/
2247 sc->dc_pmode == DC_PMODE_MII0x1 &&
2248 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM0x00008000|
2249 DC_TXSTAT_NOCARRIER0x00000400|DC_TXSTAT_CARRLOST0x00000800)))
2250 txstat &= ~DC_TXSTAT_ERRSUM0x00008000;
2251 }
2252
2253 if (txstat & DC_TXSTAT_ERRSUM0x00008000) {
2254 ifp->if_oerrorsif_data.ifi_oerrors++;
2255 if (txstat & DC_TXSTAT_EXCESSCOLL0x00000100)
2256 ifp->if_collisionsif_data.ifi_collisions++;
2257 if (txstat & DC_TXSTAT_LATECOLL0x00000200)
2258 ifp->if_collisionsif_data.ifi_collisions++;
2259 if (!(txstat & DC_TXSTAT_UNDERRUN0x00000002)) {
2260 dc_init(sc);
2261 return;
2262 }
2263 }
2264
2265 ifp->if_collisionsif_data.ifi_collisions += (txstat & DC_TXSTAT_COLLCNT0x00000078) >> 3;
2266
2267 if (sc->dc_cdata.dc_tx_chain[idx].sd_map->dm_nsegs != 0) {
2268 bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[idx].sd_map;
2269
2270 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x08))
2271 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x08))
;
2272 bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map
))
;
2273 }
2274 if (sc->dc_cdata.dc_tx_chain[idx].sd_mbuf != NULL((void *)0)) {
2275 m_freem(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf);
2276 sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL((void *)0);
2277 }
2278
2279 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (offset), (sizeof(struct dc_desc)), (0x02 | 0x08
))
2280 offset, sizeof(struct dc_desc),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (offset), (sizeof(struct dc_desc)), (0x02 | 0x08
))
2281 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (offset), (sizeof(struct dc_desc)), (0x02 | 0x08
))
;
2282
2283 sc->dc_cdata.dc_tx_cnt--;
2284 DC_INC(idx, DC_TX_LIST_CNT)(idx) = (idx + 1) % 256;
2285 }
2286 sc->dc_cdata.dc_tx_cons = idx;
2287
2288 if (DC_TX_LIST_CNT256 - sc->dc_cdata.dc_tx_cnt > 5)
2289 ifq_clr_oactive(&ifp->if_snd);
2290 if (sc->dc_cdata.dc_tx_cnt == 0)
2291 ifp->if_timer = 0;
2292}
2293
2294void
2295dc_tick(void *xsc)
2296{
2297 struct dc_softc *sc = (struct dc_softc *)xsc;
2298 struct mii_data *mii;
2299 struct ifnet *ifp;
2300 int s;
2301 u_int32_t r;
2302
2303 s = splnet()splraise(0x7);
2304
2305 ifp = &sc->sc_arpcom.ac_if;
2306 mii = &sc->sc_mii;
2307
2308 if (sc->dc_flags & DC_REDUCED_MII_POLL0x00000200) {
2309 if (sc->dc_flags & DC_21143_NWAY0x00000800) {
2310 r = CSR_READ_4(sc, DC_10BTSTAT)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x60)));
2311 if (IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) ==
2312 IFM_100_TX6 && (r & DC_TSTAT_LS1000x00000002)) {
2313 sc->dc_link = 0;
2314 mii_mediachg(mii);
2315 }
2316 if (IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) ==
2317 IFM_10_T3 && (r & DC_TSTAT_LS100x00000004)) {
2318 sc->dc_link = 0;
2319 mii_mediachg(mii);
2320 }
2321 if (sc->dc_link == 0)
2322 mii_tick(mii);
2323 } else {
2324 /*
2325 * For NICs which never report DC_RXSTATE_WAIT, we
2326 * have to bite the bullet...
2327 */
2328 if ((DC_HAS_BROKEN_RXSTATE(sc)((sc->dc_type == 0x7) || (sc->dc_type == 0xC) || ((sc->
dc_type == 0x8) && sc->dc_revision >= 0x30))
|| (CSR_READ_4(sc,((sc->dc_btag)->read_4((sc->dc_bhandle), (0x28)))
2329 DC_ISR)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x28))) & DC_ISR_RX_STATE0x000E0000) == DC_RXSTATE_WAIT0x00060000) &&
2330 sc->dc_cdata.dc_tx_cnt == 0 && !DC_IS_ASIX(sc)(sc->dc_type == 0x5)) {
2331 mii_tick(mii);
2332 if (!(mii->mii_media_status & IFM_ACTIVE0x0000000000000002ULL))
2333 sc->dc_link = 0;
2334 }
2335 }
2336 } else
2337 mii_tick(mii);
2338
2339 /*
2340 * When the init routine completes, we expect to be able to send
2341 * packets right away, and in fact the network code will send a
2342 * gratuitous ARP the moment the init routine marks the interface
2343 * as running. However, even though the MAC may have been initialized,
2344 * there may be a delay of a few seconds before the PHY completes
2345 * autonegotiation and the link is brought up. Any transmissions
2346 * made during that delay will be lost. Dealing with this is tricky:
2347 * we can't just pause in the init routine while waiting for the
2348 * PHY to come ready since that would bring the whole system to
2349 * a screeching halt for several seconds.
2350 *
2351 * What we do here is prevent the TX start routine from sending
2352 * any packets until a link has been established. After the
2353 * interface has been initialized, the tick routine will poll
2354 * the state of the PHY until the IFM_ACTIVE flag is set. Until
2355 * that time, packets will stay in the send queue, and once the
2356 * link comes up, they will be flushed out to the wire.
2357 */
2358 if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE0x0000000000000002ULL &&
2359 IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) != IFM_NONE2ULL) {
2360 sc->dc_link++;
2361 if (ifq_empty(&ifp->if_snd)(((&ifp->if_snd)->ifq_len) == 0) == 0)
2362 dc_start(ifp);
2363 }
2364
2365 if (sc->dc_flags & DC_21143_NWAY0x00000800 && !sc->dc_link)
2366 timeout_add_msec(&sc->dc_tick_tmo, 100);
2367 else
2368 timeout_add_sec(&sc->dc_tick_tmo, 1);
2369
2370 splx(s)spllower(s);
2371}
2372
2373/* A transmit underrun has occurred. Back off the transmit threshold,
2374 * or switch to store and forward mode if we have to.
2375 */
2376void
2377dc_tx_underrun(struct dc_softc *sc)
2378{
2379 u_int32_t isr;
2380 int i;
2381
2382 if (DC_IS_DAVICOM(sc)(sc->dc_type == 0x8))
2383 dc_init(sc);
2384
2385 if (DC_IS_INTEL(sc)(sc->dc_type == 0x4 || sc->dc_type == 0xD)) {
2386 /*
2387 * The real 21143 requires that the transmitter be idle
2388 * in order to change the transmit threshold or store
2389 * and forward state.
2390 */
2391 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~(0x00002000))))
;
2392
2393 for (i = 0; i < DC_TIMEOUT1000; i++) {
2394 isr = CSR_READ_4(sc, DC_ISR)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x28)));
2395 if (isr & DC_ISR_TX_IDLE0x00000002)
2396 break;
2397 DELAY(10)(*delay_func)(10);
2398 }
2399 if (i == DC_TIMEOUT1000) {
2400 printf("%s: failed to force tx to idle state\n",
2401 sc->sc_dev.dv_xname);
2402 dc_init(sc);
2403 }
2404 }
2405
2406 sc->dc_txthresh += DC_TXTHRESH_INC0x00004000;
2407 if (sc->dc_txthresh > DC_TXTHRESH_MAX0x0000C000) {
2408 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00200000))))
;
2409 } else {
2410 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~(0x0000C000))))
;
2411 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
sc->dc_txthresh))))
;
2412 }
2413
2414 if (DC_IS_INTEL(sc)(sc->dc_type == 0x4 || sc->dc_type == 0xD))
2415 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00002000))))
;
2416
2417 return;
2418}
2419
2420int
2421dc_intr(void *arg)
2422{
2423 struct dc_softc *sc;
2424 struct ifnet *ifp;
2425 u_int32_t status, ints;
2426 int claimed = 0;
2427
2428 sc = arg;
2429
2430 ifp = &sc->sc_arpcom.ac_if;
2431
2432 ints = CSR_READ_4(sc, DC_ISR)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x28)));
2433 if ((ints & DC_INTRS(0x00000040|0x00000001|0x00000080|0x00000200| 0x00000002|0x00000004
|0x00000020|0x00002000| 0x00008000|0x00010000 )
) == 0)
2434 return (claimed);
2435 if (ints == 0xffffffff)
2436 return (0);
2437
2438 /* Suppress unwanted interrupts */
2439 if (!(ifp->if_flags & IFF_UP0x1)) {
2440 if (CSR_READ_4(sc, DC_ISR)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x28))) & DC_INTRS(0x00000040|0x00000001|0x00000080|0x00000200| 0x00000002|0x00000004
|0x00000020|0x00002000| 0x00008000|0x00010000 )
)
2441 dc_stop(sc, 0);
2442 return (claimed);
2443 }
2444
2445 /* Disable interrupts. */
2446 CSR_WRITE_4(sc, DC_IMR, 0x00000000)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x38), (0x00000000
)))
;
2447
2448 while (((status = CSR_READ_4(sc, DC_ISR)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x28)))) & DC_INTRS(0x00000040|0x00000001|0x00000080|0x00000200| 0x00000002|0x00000004
|0x00000020|0x00002000| 0x00008000|0x00010000 )
) &&
2449 status != 0xFFFFFFFF &&
2450 (ifp->if_flags & IFF_RUNNING0x40)) {
2451
2452 claimed = 1;
2453 CSR_WRITE_4(sc, DC_ISR, status)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x28), (status
)))
;
2454
2455 if (status & DC_ISR_RX_OK0x00000040) {
2456 if (dc_rxeof(sc) == 0) {
2457 while(dc_rx_resync(sc))
2458 dc_rxeof(sc);
2459 }
2460 }
2461
2462 if (status & (DC_ISR_TX_OK0x00000001|DC_ISR_TX_NOBUF0x00000004))
2463 dc_txeof(sc);
2464
2465 if (status & DC_ISR_TX_IDLE0x00000002) {
2466 dc_txeof(sc);
2467 if (sc->dc_cdata.dc_tx_cnt) {
2468 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00002000))))
;
2469 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x08), (0xFFFFFFFF
)))
;
2470 }
2471 }
2472
2473 if (status & DC_ISR_TX_UNDERRUN0x00000020)
2474 dc_tx_underrun(sc);
2475
2476 if ((status & DC_ISR_RX_WATDOGTIMEO0x00000200)
2477 || (status & DC_ISR_RX_NOBUF0x00000080)) {
2478 if (dc_rxeof(sc) == 0) {
2479 while(dc_rx_resync(sc))
2480 dc_rxeof(sc);
2481 }
2482 }
2483
2484 if (status & DC_ISR_BUS_ERR0x00002000)
2485 dc_init(sc);
2486 }
2487
2488 /* Re-enable interrupts. */
2489 CSR_WRITE_4(sc, DC_IMR, DC_INTRS)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x38), ((
0x00000040|0x00000001|0x00000080|0x00000200| 0x00000002|0x00000004
|0x00000020|0x00002000| 0x00008000|0x00010000 ))))
;
2490
2491 if (ifq_empty(&ifp->if_snd)(((&ifp->if_snd)->ifq_len) == 0) == 0)
2492 dc_start(ifp);
2493
2494 return (claimed);
2495}
2496
2497/*
2498 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2499 * pointers to the fragment pointers.
2500 */
2501int
2502dc_encap(struct dc_softc *sc, bus_dmamap_t map, struct mbuf *m, u_int32_t *idx)
2503{
2504 struct dc_desc *f = NULL((void *)0);
2505 int frag, cur, cnt = 0, i;
2506
2507 cur = frag = *idx;
2508
2509 for (i = 0; i < map->dm_nsegs; i++) {
2510 f = &sc->dc_ldata->dc_tx_list[frag];
2511 f->dc_ctl = htole32(DC_TXCTL_TLINK | map->dm_segs[i].ds_len)((__uint32_t)(0x01000000 | map->dm_segs[i].ds_len));
2512 if (cnt == 0) {
2513 f->dc_status = htole32(0)((__uint32_t)(0));
2514 f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG)((__uint32_t)(0x20000000));
2515 } else
2516 f->dc_status = htole32(DC_TXSTAT_OWN)((__uint32_t)(0x80000000));
2517 f->dc_datadc_ptr1 = htole32(map->dm_segs[i].ds_addr)((__uint32_t)(map->dm_segs[i].ds_addr));
2518 cur = frag;
2519 DC_INC(frag, DC_TX_LIST_CNT)(frag) = (frag + 1) % 256;
2520 cnt++;
2521 }
2522
2523 sc->dc_cdata.dc_tx_cnt += cnt;
2524 sc->dc_cdata.dc_tx_chain[cur].sd_mbuf = m;
2525 sc->sc_tx_sparemap = sc->dc_cdata.dc_tx_chain[cur].sd_map;
2526 sc->dc_cdata.dc_tx_chain[cur].sd_map = map;
2527 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG)((__uint32_t)(0x40000000));
2528 if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG0x00000020)
2529 sc->dc_ldata->dc_tx_list[*idx].dc_ctl |=
2530 htole32(DC_TXCTL_FINT)((__uint32_t)(0x80000000));
2531 if (sc->dc_flags & DC_TX_INTR_ALWAYS0x00000400)
2532 sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2533 htole32(DC_TXCTL_FINT)((__uint32_t)(0x80000000));
2534 if (sc->dc_flags & DC_TX_USE_TX_INTR0x00000008 && sc->dc_cdata.dc_tx_cnt > 64)
2535 sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2536 htole32(DC_TXCTL_FINT)((__uint32_t)(0x80000000));
2537 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x04))
2538 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x04))
;
2539
2540 sc->dc_ldata->dc_tx_list[*idx].dc_status = htole32(DC_TXSTAT_OWN)((__uint32_t)(0x80000000));
2541
2542 *idx = frag;
2543
2544 return (0);
2545}
2546
2547/*
2548 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2549 * to the mbuf data regions directly in the transmit lists. We also save a
2550 * copy of the pointers since the transmit list fragment pointers are
2551 * physical addresses.
2552 */
2553
2554static inline int
2555dc_fits(struct dc_softc *sc, int idx, bus_dmamap_t map)
2556{
2557 if (sc->dc_flags & DC_TX_ADMTEK_WAR0x00000004) {
2558 if (sc->dc_cdata.dc_tx_prod != idx &&
2559 idx + map->dm_nsegs >= DC_TX_LIST_CNT256)
2560 return (0);
2561 }
2562
2563 if (sc->dc_cdata.dc_tx_cnt + map->dm_nsegs + 5 > DC_TX_LIST_CNT256)
2564 return (0);
2565
2566 return (1);
2567}
2568
2569void
2570dc_start(struct ifnet *ifp)
2571{
2572 struct dc_softc *sc = ifp->if_softc;
2573 bus_dmamap_t map;
2574 struct mbuf *m;
2575 int idx;
2576
2577 if (!sc->dc_link && ifq_len(&ifp->if_snd)((&ifp->if_snd)->ifq_len) < 10)
2578 return;
2579
2580 if (ifq_is_oactive(&ifp->if_snd))
2581 return;
2582
2583 idx = sc->dc_cdata.dc_tx_prod;
2584
2585 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_tx_list
)), (sizeof(struct dc_desc) * 256), (0x02 | 0x08))
2586 offsetof(struct dc_list_data, dc_tx_list),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_tx_list
)), (sizeof(struct dc_desc) * 256), (0x02 | 0x08))
2587 sizeof(struct dc_desc) * DC_TX_LIST_CNT,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_tx_list
)), (sizeof(struct dc_desc) * 256), (0x02 | 0x08))
2588 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_tx_list
)), (sizeof(struct dc_desc) * 256), (0x02 | 0x08))
;
2589
2590 for (;;) {
2591 m = ifq_deq_begin(&ifp->if_snd);
2592 if (m == NULL((void *)0))
2593 break;
2594
2595 map = sc->sc_tx_sparemap;
2596 switch (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
map), (m), (0x0001 | 0))
2597 BUS_DMA_NOWAIT | BUS_DMA_OVERRUN)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
map), (m), (0x0001 | 0))
) {
2598 case 0:
2599 break;
2600 case EFBIG27:
2601 if (m_defrag(m, M_DONTWAIT0x0002) == 0 &&
2602 bus_dmamap_load_mbuf(sc->sc_dmat, map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
map), (m), (0x0001 | 0))
2603 BUS_DMA_NOWAIT | BUS_DMA_OVERRUN)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
map), (m), (0x0001 | 0))
== 0)
2604 break;
2605
2606 /* FALLTHROUGH */
2607 default:
2608 ifq_deq_commit(&ifp->if_snd, m);
2609 m_freem(m);
2610 ifp->if_oerrorsif_data.ifi_oerrors++;
2611 continue;
2612 }
2613
2614 if (!dc_fits(sc, idx, map)) {
2615 bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map
))
;
2616 ifq_deq_rollback(&ifp->if_snd, m);
2617 ifq_set_oactive(&ifp->if_snd);
2618 break;
2619 }
2620
2621 /* now we are committed to transmit the packet */
2622 ifq_deq_commit(&ifp->if_snd, m);
2623
2624 if (dc_encap(sc, map, m, &idx) != 0) {
2625 m_freem(m);
2626 ifp->if_oerrorsif_data.ifi_oerrors++;
2627 continue;
2628 }
2629
2630 /*
2631 * If there's a BPF listener, bounce a copy of this frame
2632 * to him.
2633 */
2634#if NBPFILTER1 > 0
2635 if (ifp->if_bpf)
2636 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
2637#endif
2638
2639 if (sc->dc_flags & DC_TX_ONE0x00008000) {
2640 ifq_set_oactive(&ifp->if_snd);
2641 break;
2642 }
2643 }
2644
2645 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_tx_list
)), (sizeof(struct dc_desc) * 256), (0x01 | 0x04))
2646 offsetof(struct dc_list_data, dc_tx_list),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_tx_list
)), (sizeof(struct dc_desc) * 256), (0x01 | 0x04))
2647 sizeof(struct dc_desc) * DC_TX_LIST_CNT,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_tx_list
)), (sizeof(struct dc_desc) * 256), (0x01 | 0x04))
2648 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (__builtin_offsetof(struct dc_list_data, dc_tx_list
)), (sizeof(struct dc_desc) * 256), (0x01 | 0x04))
;
2649
2650 if (idx == sc->dc_cdata.dc_tx_prod)
2651 return;
2652
2653 /* Transmit */
2654 sc->dc_cdata.dc_tx_prod = idx;
2655 if (!(sc->dc_flags & DC_TX_POLL0x00000001))
2656 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x08), (0xFFFFFFFF
)))
;
2657
2658 /*
2659 * Set a timeout in case the chip goes out to lunch.
2660 */
2661 ifp->if_timer = 5;
2662}
2663
2664void
2665dc_init(void *xsc)
2666{
2667 struct dc_softc *sc = xsc;
2668 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2669 struct mii_data *mii;
2670 int s;
2671
2672 s = splnet()splraise(0x7);
2673
2674 mii = &sc->sc_mii;
2675
2676 /*
2677 * Cancel pending I/O and free all RX/TX buffers.
2678 */
2679 dc_stop(sc, 0);
2680 dc_reset(sc);
2681
2682 /*
2683 * Set cache alignment and burst length.
2684 */
2685 if (DC_IS_ASIX(sc)(sc->dc_type == 0x5) || DC_IS_DAVICOM(sc)(sc->dc_type == 0x8))
2686 CSR_WRITE_4(sc, DC_BUSCTL, 0)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x00), (0
)))
;
2687 else
2688 CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x00), (0x00200000
|0x00800000)))
;
2689 /*
2690 * Evenly share the bus between receive and transmit process.
2691 */
2692 if (DC_IS_INTEL(sc)(sc->dc_type == 0x4 || sc->dc_type == 0xD))
2693 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x00), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x00))) | (
0x00000002))))
;
2694 if (DC_IS_DAVICOM(sc)(sc->dc_type == 0x8) || DC_IS_INTEL(sc)(sc->dc_type == 0x4 || sc->dc_type == 0xD)) {
2695 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x00), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x00))) | (
0x00000000))))
;
2696 } else {
2697 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x00), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x00))) | (
0x00001000))))
;
2698 }
2699 if (sc->dc_flags & DC_TX_POLL0x00000001)
2700 DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x00), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x00))) | (
0x00020000))))
;
2701 switch(sc->dc_cachesize) {
2702 case 32:
2703 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x00), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x00))) | (
0x0000C000))))
;
2704 break;
2705 case 16:
2706 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x00), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x00))) | (
0x00008000))))
;
2707 break;
2708 case 8:
2709 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x00), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x00))) | (
0x00004000))))
;
2710 break;
2711 case 0:
2712 default:
2713 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x00), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x00))) | (
0x00000000))))
;
2714 break;
2715 }
2716
2717 if (sc->dc_flags & DC_TX_STORENFWD0x00000100)
2718 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00200000))))
;
2719 else {
2720 if (sc->dc_txthresh > DC_TXTHRESH_MAX0x0000C000) {
2721 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00200000))))
;
2722 } else {
2723 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~(0x00200000))))
;
2724 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
sc->dc_txthresh))))
;
2725 }
2726 }
2727
2728 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x02000000))))
;
2729 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~(0x00020000))))
;
2730
2731 if (DC_IS_MACRONIX(sc)(sc->dc_type == 0x1 || sc->dc_type == 0x2 || sc->dc_type
== 0x3)
|| DC_IS_PNICII(sc)(sc->dc_type == 0x9)) {
2732 /*
2733 * The app notes for the 98713 and 98715A say that
2734 * in order to have the chips operate properly, a magic
2735 * number must be written to CSR16. Macronix does not
2736 * document the meaning of these bits so there's no way
2737 * to know exactly what they do. The 98713 has a magic
2738 * number all its own; the rest all use a different one.
2739 */
2740 DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x80), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x80))) &
~(0xFFFF0000))))
;
2741 if (sc->dc_type == DC_TYPE_987130x1)
2742 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x80), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x80))) | (
0x0F370000))))
;
2743 else
2744 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x80), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x80))) | (
0x0B3C0000))))
;
2745 }
2746
2747 if (DC_IS_XIRCOM(sc)(sc->dc_type == 0xB)) {
2748 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |((sc->dc_btag)->write_4((sc->dc_bhandle), (0x78), (0x08000000
| 0x02000000 | 0x00040000 | 0x00010000)))
2749 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x78), (0x08000000
| 0x02000000 | 0x00040000 | 0x00010000)))
;
2750 DELAY(10)(*delay_func)(10);
2751 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |((sc->dc_btag)->write_4((sc->dc_bhandle), (0x78), (0x02000000
| 0x00040000 | 0x00010000)))
2752 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x78), (0x02000000
| 0x00040000 | 0x00010000)))
;
2753 DELAY(10)(*delay_func)(10);
2754 }
2755
2756 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~(0x0000C000))))
;
2757 DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00000000))))
;
2758
2759 /* Init circular RX list. */
2760 if (dc_list_rx_init(sc) == ENOBUFS55) {
2761 printf("%s: initialization failed: no "
2762 "memory for rx buffers\n", sc->sc_dev.dv_xname);
2763 dc_stop(sc, 0);
2764 splx(s)spllower(s);
2765 return;
2766 }
2767
2768 /*
2769 * Init tx descriptors.
2770 */
2771 dc_list_tx_init(sc);
2772
2773 /*
2774 * Sync down both lists initialized.
2775 */
2776 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (0), (sc->sc_listmap->dm_mapsize), (0x01 |
0x04))
2777 0, sc->sc_listmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (0), (sc->sc_listmap->dm_mapsize), (0x01 |
0x04))
2778 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (0), (sc->sc_listmap->dm_mapsize), (0x01 |
0x04))
;
2779
2780 /*
2781 * Load the address of the RX list.
2782 */
2783 CSR_WRITE_4(sc, DC_RXADDR, sc->sc_listmap->dm_segs[0].ds_addr +((sc->dc_btag)->write_4((sc->dc_bhandle), (0x18), (sc
->sc_listmap->dm_segs[0].ds_addr + __builtin_offsetof(struct
dc_list_data, dc_rx_list[0]))))
2784 offsetof(struct dc_list_data, dc_rx_list[0]))((sc->dc_btag)->write_4((sc->dc_bhandle), (0x18), (sc
->sc_listmap->dm_segs[0].ds_addr + __builtin_offsetof(struct
dc_list_data, dc_rx_list[0]))))
;
2785 CSR_WRITE_4(sc, DC_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr +((sc->dc_btag)->write_4((sc->dc_bhandle), (0x20), (sc
->sc_listmap->dm_segs[0].ds_addr + __builtin_offsetof(struct
dc_list_data, dc_tx_list[0]))))
2786 offsetof(struct dc_list_data, dc_tx_list[0]))((sc->dc_btag)->write_4((sc->dc_bhandle), (0x20), (sc
->sc_listmap->dm_segs[0].ds_addr + __builtin_offsetof(struct
dc_list_data, dc_tx_list[0]))))
;
2787
2788 /*
2789 * Enable interrupts.
2790 */
2791 CSR_WRITE_4(sc, DC_IMR, DC_INTRS)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x38), ((
0x00000040|0x00000001|0x00000080|0x00000200| 0x00000002|0x00000004
|0x00000020|0x00002000| 0x00008000|0x00010000 ))))
;
2792 CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x28), (0xFFFFFFFF
)))
;
2793
2794 /* Enable transmitter. */
2795 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00002000))))
;
2796
2797 /*
2798 * If this is an Intel 21143 and we're not using the
2799 * MII port, program the LED control pins so we get
2800 * link and activity indications.
2801 */
2802 if (sc->dc_flags & DC_TULIP_LEDS0x00004000) {
2803 CSR_WRITE_4(sc, DC_WATCHDOG,((sc->dc_btag)->write_4((sc->dc_bhandle), (0x78), (0x08000000
|0x00800000|0x00200000)))
2804 DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x78), (0x08000000
|0x00800000|0x00200000)))
;
2805 CSR_WRITE_4(sc, DC_WATCHDOG, 0)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x78), (0
)))
;
2806 }
2807
2808 /*
2809 * Load the RX/multicast filter. We do this sort of late
2810 * because the filter programming scheme on the 21143 and
2811 * some clones requires DMAing a setup frame via the TX
2812 * engine, and we need the transmitter enabled for that.
2813 */
2814 dc_setfilt(sc);
2815
2816 /* Enable receiver. */
2817 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) | (
0x00000002))))
;
2818 CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x10), (0xFFFFFFFF
)))
;
2819
2820 mii_mediachg(mii);
2821 dc_setcfg(sc, sc->dc_if_media);
2822
2823 ifp->if_flags |= IFF_RUNNING0x40;
2824 ifq_clr_oactive(&ifp->if_snd);
2825
2826 splx(s)spllower(s);
2827
2828 timeout_set(&sc->dc_tick_tmo, dc_tick, sc);
2829
2830 if (IFM_SUBTYPE(mii->mii_media.ifm_media)((mii->mii_media.ifm_media) & 0x00000000000000ffULL) == IFM_HPNA_117)
2831 sc->dc_link = 1;
2832 else {
2833 if (sc->dc_flags & DC_21143_NWAY0x00000800)
2834 timeout_add_msec(&sc->dc_tick_tmo, 100);
2835 else
2836 timeout_add_sec(&sc->dc_tick_tmo, 1);
2837 }
2838
2839#ifdef SRM_MEDIA
2840 if(sc->dc_srm_media) {
2841 struct ifreq ifr;
2842
2843 ifr.ifr_mediaifr_ifru.ifru_media = sc->dc_srm_media;
2844 ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
);
2845 sc->dc_srm_media = 0;
2846 }
2847#endif
2848}
2849
2850/*
2851 * Set media options.
2852 */
2853int
2854dc_ifmedia_upd(struct ifnet *ifp)
2855{
2856 struct dc_softc *sc;
2857 struct mii_data *mii;
2858 struct ifmedia *ifm;
2859
2860 sc = ifp->if_softc;
2861 mii = &sc->sc_mii;
2862 mii_mediachg(mii);
2863
2864 ifm = &mii->mii_media;
2865
2866 if (DC_IS_DAVICOM(sc)(sc->dc_type == 0x8) &&
2867 IFM_SUBTYPE(ifm->ifm_media)((ifm->ifm_media) & 0x00000000000000ffULL) == IFM_HPNA_117)
2868 dc_setcfg(sc, ifm->ifm_media);
2869 else
2870 sc->dc_link = 0;
2871
2872 return (0);
2873}
2874
2875/*
2876 * Report current media status.
2877 */
2878void
2879dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2880{
2881 struct dc_softc *sc;
2882 struct mii_data *mii;
2883 struct ifmedia *ifm;
2884
2885 sc = ifp->if_softc;
2886 mii = &sc->sc_mii;
2887 mii_pollstat(mii);
2888 ifm = &mii->mii_media;
2889 if (DC_IS_DAVICOM(sc)(sc->dc_type == 0x8)) {
2890 if (IFM_SUBTYPE(ifm->ifm_media)((ifm->ifm_media) & 0x00000000000000ffULL) == IFM_HPNA_117) {
2891 ifmr->ifm_active = ifm->ifm_media;
2892 ifmr->ifm_status = 0;
2893 return;
2894 }
2895 }
2896 ifmr->ifm_active = mii->mii_media_active;
2897 ifmr->ifm_status = mii->mii_media_status;
2898}
2899
2900int
2901dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2902{
2903 struct dc_softc *sc = ifp->if_softc;
2904 struct ifreq *ifr = (struct ifreq *) data;
2905 int s, error = 0;
2906
2907 s = splnet()splraise(0x7);
2908
2909 switch(command) {
2910 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
2911 ifp->if_flags |= IFF_UP0x1;
2912 if (!(ifp->if_flags & IFF_RUNNING0x40))
2913 dc_init(sc);
2914 break;
2915 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
2916 if (ifp->if_flags & IFF_UP0x1) {
2917 if (ifp->if_flags & IFF_RUNNING0x40)
2918 error = ENETRESET52;
2919 else {
2920 sc->dc_txthresh = 0;
2921 dc_init(sc);
2922 }
2923 } else {
2924 if (ifp->if_flags & IFF_RUNNING0x40)
2925 dc_stop(sc, 0);
2926 }
2927 break;
2928 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
2929 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
2930 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
2931#ifdef SRM_MEDIA
2932 if (sc->dc_srm_media)
2933 sc->dc_srm_media = 0;
2934#endif
2935 break;
2936 default:
2937 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
2938 }
2939
2940 if (error == ENETRESET52) {
2941 if (ifp->if_flags & IFF_RUNNING0x40)
2942 dc_setfilt(sc);
2943 error = 0;
2944 }
2945
2946 splx(s)spllower(s);
2947 return (error);
2948}
2949
2950void
2951dc_watchdog(struct ifnet *ifp)
2952{
2953 struct dc_softc *sc;
2954
2955 sc = ifp->if_softc;
2956
2957 ifp->if_oerrorsif_data.ifi_oerrors++;
2958 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2959
2960 dc_init(sc);
2961
2962 if (ifq_empty(&ifp->if_snd)(((&ifp->if_snd)->ifq_len) == 0) == 0)
2963 dc_start(ifp);
2964}
2965
2966/*
2967 * Stop the adapter and free any mbufs allocated to the
2968 * RX and TX lists.
2969 */
2970void
2971dc_stop(struct dc_softc *sc, int softonly)
2972{
2973 struct ifnet *ifp;
2974 u_int32_t isr;
2975 int i;
2976
2977 ifp = &sc->sc_arpcom.ac_if;
2978 ifp->if_timer = 0;
2979
2980 timeout_del(&sc->dc_tick_tmo);
2981
2982 ifp->if_flags &= ~IFF_RUNNING0x40;
2983 ifq_clr_oactive(&ifp->if_snd);
2984
2985 if (!softonly) {
2986 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON))((sc->dc_btag)->write_4((sc->dc_bhandle), (0x30), ((
(sc->dc_btag)->read_4((sc->dc_bhandle), (0x30))) &
~((0x00000002|0x00002000)))))
;
2987
2988 for (i = 0; i < DC_TIMEOUT1000; i++) {
2989 isr = CSR_READ_4(sc, DC_ISR)((sc->dc_btag)->read_4((sc->dc_bhandle), (0x28)));
2990 if ((isr & DC_ISR_TX_IDLE0x00000002 ||
2991 (isr & DC_ISR_TX_STATE0x00700000) == DC_TXSTATE_RESET0x00000000) &&
2992 (isr & DC_ISR_RX_STATE0x000E0000) == DC_RXSTATE_STOPPED0x00000000)
2993 break;
2994 DELAY(10)(*delay_func)(10);
2995 }
2996
2997 if (i == DC_TIMEOUT1000) {
2998 if (!((isr & DC_ISR_TX_IDLE0x00000002) ||
2999 (isr & DC_ISR_TX_STATE0x00700000) == DC_TXSTATE_RESET0x00000000) &&
3000 !DC_IS_ASIX(sc)(sc->dc_type == 0x5) && !DC_IS_DAVICOM(sc)(sc->dc_type == 0x8))
3001 printf("%s: failed to force tx to idle state\n",
3002 sc->sc_dev.dv_xname);
3003 if (!((isr & DC_ISR_RX_STATE0x000E0000) == DC_RXSTATE_STOPPED0x00000000) &&
3004 !DC_HAS_BROKEN_RXSTATE(sc)((sc->dc_type == 0x7) || (sc->dc_type == 0xC) || ((sc->
dc_type == 0x8) && sc->dc_revision >= 0x30))
)
3005 printf("%s: failed to force rx to idle state\n",
3006 sc->sc_dev.dv_xname);
3007 }
3008
3009 CSR_WRITE_4(sc, DC_IMR, 0x00000000)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x38), (0x00000000
)))
;
3010 CSR_WRITE_4(sc, DC_TXADDR, 0x00000000)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x20), (0x00000000
)))
;
3011 CSR_WRITE_4(sc, DC_RXADDR, 0x00000000)((sc->dc_btag)->write_4((sc->dc_bhandle), (0x18), (0x00000000
)))
;
3012 sc->dc_link = 0;
3013 }
3014
3015 /*
3016 * Free data in the RX lists.
3017 */
3018 for (i = 0; i < DC_RX_LIST_CNT64; i++) {
3019 if (sc->dc_cdata.dc_rx_chain[i].sd_map->dm_nsegs != 0) {
3020 bus_dmamap_t map = sc->dc_cdata.dc_rx_chain[i].sd_map;
3021
3022 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x02))
3023 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x02))
;
3024 bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map
))
;
3025 }
3026 if (sc->dc_cdata.dc_rx_chain[i].sd_mbuf != NULL((void *)0)) {
3027 m_freem(sc->dc_cdata.dc_rx_chain[i].sd_mbuf);
3028 sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL((void *)0);
3029 }
3030 }
3031 bzero(&sc->dc_ldata->dc_rx_list, sizeof(sc->dc_ldata->dc_rx_list))__builtin_bzero((&sc->dc_ldata->dc_rx_list), (sizeof
(sc->dc_ldata->dc_rx_list)))
;
3032
3033 /*
3034 * Free the TX list buffers.
3035 */
3036 for (i = 0; i < DC_TX_LIST_CNT256; i++) {
3037 if (sc->dc_cdata.dc_tx_chain[i].sd_map->dm_nsegs != 0) {
3038 bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[i].sd_map;
3039
3040 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x08))
3041 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x08))
;
3042 bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map
))
;
3043 }
3044 if (sc->dc_cdata.dc_tx_chain[i].sd_mbuf != NULL((void *)0)) {
3045 if (sc->dc_ldata->dc_tx_list[i].dc_ctl &
3046 htole32(DC_TXCTL_SETUP)((__uint32_t)(0x08000000))) {
3047 sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL((void *)0);
3048 continue;
3049 }
3050 m_freem(sc->dc_cdata.dc_tx_chain[i].sd_mbuf);
3051 sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL((void *)0);
3052 }
3053 }
3054 bzero(&sc->dc_ldata->dc_tx_list, sizeof(sc->dc_ldata->dc_tx_list))__builtin_bzero((&sc->dc_ldata->dc_tx_list), (sizeof
(sc->dc_ldata->dc_tx_list)))
;
3055
3056 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (0), (sc->sc_listmap->dm_mapsize), (0x01 |
0x04))
3057 0, sc->sc_listmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (0), (sc->sc_listmap->dm_mapsize), (0x01 |
0x04))
3058 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_listmap), (0), (sc->sc_listmap->dm_mapsize), (0x01 |
0x04))
;
3059}
3060
3061int
3062dc_activate(struct device *self, int act)
3063{
3064 struct dc_softc *sc = (struct dc_softc *)self;
3065 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3066 int rv = 0;
3067
3068 switch (act) {
3069 case DVACT_SUSPEND3:
3070 if (ifp->if_flags & IFF_RUNNING0x40)
3071 dc_stop(sc, 0);
3072 rv = config_activate_children(self, act);
3073 break;
3074 case DVACT_RESUME4:
3075 if (ifp->if_flags & IFF_UP0x1)
3076 dc_init(sc);
3077 break;
3078 default:
3079 rv = config_activate_children(self, act);
3080 break;
3081 }
3082 return (rv);
3083}
3084
3085int
3086dc_detach(struct dc_softc *sc)
3087{
3088 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3089 int i;
3090
3091 dc_stop(sc, 1);
3092
3093 if (LIST_FIRST(&sc->sc_mii.mii_phys)((&sc->sc_mii.mii_phys)->lh_first) != NULL((void *)0))
3094 mii_detach(&sc->sc_mii, MII_PHY_ANY-1, MII_OFFSET_ANY-1);
3095
3096 if (sc->dc_srom)
3097 free(sc->dc_srom, M_DEVBUF2, sc->dc_sromsize);
3098
3099 for (i = 0; i < DC_RX_LIST_CNT64; i++)
3100 bus_dmamap_destroy(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->dc_cdata.dc_rx_chain[i].sd_map))
;
3101 if (sc->sc_rx_sparemap)
3102 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_sparemap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_rx_sparemap))
;
3103 for (i = 0; i < DC_TX_LIST_CNT256; i++)
3104 bus_dmamap_destroy(sc->sc_dmat, sc->dc_cdata.dc_tx_chain[i].sd_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->dc_cdata.dc_tx_chain[i].sd_map))
;
3105 if (sc->sc_tx_sparemap)
3106 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_sparemap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_tx_sparemap))
;
3107
3108 /// XXX bus_dmamap_sync
3109 bus_dmamap_unload(sc->sc_dmat, sc->sc_listmap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
sc_listmap))
;
3110 bus_dmamem_unmap(sc->sc_dmat, sc->sc_listkva, sc->sc_listnseg)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (sc->
sc_listkva), (sc->sc_listnseg))
;
3111 bus_dmamap_destroy(sc->sc_dmat, sc->sc_listmap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_listmap))
;
3112 bus_dmamem_free(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (sc->
sc_listseg), (sc->sc_listnseg))
;
3113
3114 ether_ifdetach(ifp);
3115 if_detach(ifp);
3116 return (0);
3117}
3118
3119struct cfdriver dc_cd = {
3120 0, "dc", DV_IFNET
3121};