Bug Summary

File:dev/ic/re.c
Warning:line 1753, column 16
Access to field 'rl_cmdstat' results in a dereference of an undefined pointer value (loaded from variable 'd')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name re.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/ic/re.c
1/* $OpenBSD: re.c,v 1.211 2021/05/17 11:59:53 visa Exp $ */
2/* $FreeBSD: if_re.c,v 1.31 2004/09/04 07:54:05 ru Exp $ */
3/*
4 * Copyright (c) 1997, 1998-2003
5 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/*
36 * Realtek 8139C+/8169/8169S/8110S PCI NIC driver
37 *
38 * Written by Bill Paul <wpaul@windriver.com>
39 * Senior Networking Software Engineer
40 * Wind River Systems
41 */
42
43/*
44 * This driver is designed to support Realtek's next generation of
45 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
46 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
47 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E.
48 *
49 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
50 * with the older 8139 family, however it also supports a special
51 * C+ mode of operation that provides several new performance enhancing
52 * features. These include:
53 *
54 * o Descriptor based DMA mechanism. Each descriptor represents
55 * a single packet fragment. Data buffers may be aligned on
56 * any byte boundary.
57 *
58 * o 64-bit DMA
59 *
60 * o TCP/IP checksum offload for both RX and TX
61 *
62 * o High and normal priority transmit DMA rings
63 *
64 * o VLAN tag insertion and extraction
65 *
66 * o TCP large send (segmentation offload)
67 *
68 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
69 * programming API is fairly straightforward. The RX filtering, EEPROM
70 * access and PHY access is the same as it is on the older 8139 series
71 * chips.
72 *
73 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
74 * same programming API and feature set as the 8139C+ with the following
75 * differences and additions:
76 *
77 * o 1000Mbps mode
78 *
79 * o Jumbo frames
80 *
81 * o GMII and TBI ports/registers for interfacing with copper
82 * or fiber PHYs
83 *
84 * o RX and TX DMA rings can have up to 1024 descriptors
85 * (the 8139C+ allows a maximum of 64)
86 *
87 * o Slight differences in register layout from the 8139C+
88 *
89 * The TX start and timer interrupt registers are at different locations
90 * on the 8169 than they are on the 8139C+. Also, the status word in the
91 * RX descriptor has a slightly different bit layout. The 8169 does not
92 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
93 * copper gigE PHY.
94 *
95 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
96 * (the 'S' stands for 'single-chip'). These devices have the same
97 * programming API as the older 8169, but also have some vendor-specific
98 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
99 * part designed to be pin-compatible with the Realtek 8100 10/100 chip.
100 *
101 * This driver takes advantage of the RX and TX checksum offload and
102 * VLAN tag insertion/extraction features. It also implements TX
103 * interrupt moderation using the timer interrupt registers, which
104 * significantly reduces TX interrupt load. There is also support
105 * for jumbo frames, however the 8169/8169S/8110S can not transmit
106 * jumbo frames larger than 7440, so the max MTU possible with this
107 * driver is 7422 bytes.
108 */
109
110#include "bpfilter.h"
111#include "vlan.h"
112
113#include <sys/param.h>
114#include <sys/endian.h>
115#include <sys/systm.h>
116#include <sys/sockio.h>
117#include <sys/mbuf.h>
118#include <sys/malloc.h>
119#include <sys/kernel.h>
120#include <sys/device.h>
121#include <sys/timeout.h>
122#include <sys/socket.h>
123#include <sys/atomic.h>
124
125#include <machine/bus.h>
126
127#include <net/if.h>
128#include <net/if_media.h>
129
130#include <netinet/in.h>
131#include <netinet/ip.h>
132#include <netinet/if_ether.h>
133
134#if NBPFILTER1 > 0
135#include <net/bpf.h>
136#endif
137
138#include <dev/mii/mii.h>
139#include <dev/mii/miivar.h>
140
141#include <dev/pci/pcidevs.h>
142
143#include <dev/ic/rtl81x9reg.h>
144#include <dev/ic/revar.h>
145
146#ifdef RE_DEBUG
147int redebug = 0;
148#define DPRINTF(x) do { if (redebug) printf x; } while (0)
149#else
150#define DPRINTF(x)
151#endif
152
153static inline void re_set_bufaddr(struct rl_desc *, bus_addr_t);
154
155int re_encap(struct rl_softc *, unsigned int, struct mbuf *);
156
157int re_newbuf(struct rl_softc *);
158int re_rx_list_init(struct rl_softc *);
159void re_rx_list_fill(struct rl_softc *);
160int re_tx_list_init(struct rl_softc *);
161int re_rxeof(struct rl_softc *);
162int re_txeof(struct rl_softc *);
163void re_tick(void *);
164void re_start(struct ifqueue *);
165void re_txstart(void *);
166int re_ioctl(struct ifnet *, u_long, caddr_t);
167void re_watchdog(struct ifnet *);
168int re_ifmedia_upd(struct ifnet *);
169void re_ifmedia_sts(struct ifnet *, struct ifmediareq *);
170
171void re_set_jumbo(struct rl_softc *);
172
173void re_eeprom_putbyte(struct rl_softc *, int);
174void re_eeprom_getword(struct rl_softc *, int, u_int16_t *);
175void re_read_eeprom(struct rl_softc *, caddr_t, int, int);
176
177int re_gmii_readreg(struct device *, int, int);
178void re_gmii_writereg(struct device *, int, int, int);
179
180int re_miibus_readreg(struct device *, int, int);
181void re_miibus_writereg(struct device *, int, int, int);
182void re_miibus_statchg(struct device *);
183
184void re_iff(struct rl_softc *);
185
186void re_setup_hw_im(struct rl_softc *);
187void re_setup_sim_im(struct rl_softc *);
188void re_disable_hw_im(struct rl_softc *);
189void re_disable_sim_im(struct rl_softc *);
190void re_config_imtype(struct rl_softc *, int);
191void re_setup_intr(struct rl_softc *, int, int);
192#ifndef SMALL_KERNEL
193int re_wol(struct ifnet*, int);
194#endif
195
196void in_delayed_cksum(struct mbuf *);
197
198struct cfdriver re_cd = {
199 0, "re", DV_IFNET
200};
201
202#define EE_SET(x)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) |
x)))
\
203 CSR_WRITE_1(sc, RL_EECMD, \((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) |
x)))
204 CSR_READ_1(sc, RL_EECMD) | x)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) |
x)))
205
206#define EE_CLR(x)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) &
~x)))
\
207 CSR_WRITE_1(sc, RL_EECMD, \((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) &
~x)))
208 CSR_READ_1(sc, RL_EECMD) & ~x)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) &
~x)))
209
210#define RL_FRAMELEN(mtu)(mtu + ((6 * 2) + 2) + 4 + 4) \
211 (mtu + ETHER_HDR_LEN((6 * 2) + 2) + ETHER_CRC_LEN4 + \
212 ETHER_VLAN_ENCAP_LEN4)
213
214static const struct re_revision {
215 u_int32_t re_chipid;
216 const char *re_name;
217} re_revisions[] = {
218 { RL_HWREV_81000x78800000, "RTL8100" },
219 { RL_HWREV_8100E0x30800000, "RTL8100E" },
220 { RL_HWREV_8100E_SPIN20x38800000, "RTL8100E 2" },
221 { RL_HWREV_81010x74c00000, "RTL8101" },
222 { RL_HWREV_8101E0x34000000, "RTL8101E" },
223 { RL_HWREV_8102E0x34800000, "RTL8102E" },
224 { RL_HWREV_8106E0x44800000, "RTL8106E" },
225 { RL_HWREV_8401E0x24000000, "RTL8401E" },
226 { RL_HWREV_84020x44000000, "RTL8402" },
227 { RL_HWREV_84110x48800000, "RTL8411" },
228 { RL_HWREV_8411B0x5c800000, "RTL8411B" },
229 { RL_HWREV_8102EL0x24800000, "RTL8102EL" },
230 { RL_HWREV_8102EL_SPIN10x24C00000, "RTL8102EL 1" },
231 { RL_HWREV_8103E0x34C00000, "RTL8103E" },
232 { RL_HWREV_8110S0x04000000, "RTL8110S" },
233 { RL_HWREV_8139CPLUS0x74800000, "RTL8139C+" },
234 { RL_HWREV_8168B_SPIN10x30000000, "RTL8168 1" },
235 { RL_HWREV_8168B_SPIN20x38000000, "RTL8168 2" },
236 { RL_HWREV_8168B_SPIN30x38400000, "RTL8168 3" },
237 { RL_HWREV_8168C0x3c000000, "RTL8168C/8111C" },
238 { RL_HWREV_8168C_SPIN20x3c400000, "RTL8168C/8111C" },
239 { RL_HWREV_8168CP0x3c800000, "RTL8168CP/8111CP" },
240 { RL_HWREV_8168F0x48000000, "RTL8168F/8111F" },
241 { RL_HWREV_8168G0x4c000000, "RTL8168G/8111G" },
242 { RL_HWREV_8168GU0x50800000, "RTL8168GU/8111GU" },
243 { RL_HWREV_8168H0x54000000, "RTL8168H/8111H" },
244 { RL_HWREV_8105E0x40800000, "RTL8105E" },
245 { RL_HWREV_8105E_SPIN10x40C00000, "RTL8105E" },
246 { RL_HWREV_8168D0x28000000, "RTL8168D/8111D" },
247 { RL_HWREV_8168DP0x28800000, "RTL8168DP/8111DP" },
248 { RL_HWREV_8168E0x2C000000, "RTL8168E/8111E" },
249 { RL_HWREV_8168E_VL0x2C800000, "RTL8168E/8111E-VL" },
250 { RL_HWREV_8168EP0x50000000, "RTL8168EP/8111EP" },
251 { RL_HWREV_8168FP0x54800000, "RTL8168FP/8111FP" },
252 { RL_HWREV_81690x00000000, "RTL8169" },
253 { RL_HWREV_8169_8110SB0x10000000, "RTL8169/8110SB" },
254 { RL_HWREV_8169_8110SBL0x7cc00000, "RTL8169SBL" },
255 { RL_HWREV_8169_8110SCd0x18000000, "RTL8169/8110SCd" },
256 { RL_HWREV_8169_8110SCe0x98000000, "RTL8169/8110SCe" },
257 { RL_HWREV_8169S0x00800000, "RTL8169S" },
258
259 { 0, NULL((void *)0) }
260};
261
262
263static inline void
264re_set_bufaddr(struct rl_desc *d, bus_addr_t addr)
265{
266 d->rl_bufaddr_lo = htole32((uint32_t)addr)((__uint32_t)((uint32_t)addr));
267 if (sizeof(bus_addr_t) == sizeof(uint64_t))
268 d->rl_bufaddr_hi = htole32((uint64_t)addr >> 32)((__uint32_t)((uint64_t)addr >> 32));
269 else
270 d->rl_bufaddr_hi = 0;
271}
272
273/*
274 * Send a read command and address to the EEPROM, check for ACK.
275 */
276void
277re_eeprom_putbyte(struct rl_softc *sc, int addr)
278{
279 int d, i;
280
281 d = addr | (RL_9346_READ0x6 << sc->rl_eewidth);
282
283 /*
284 * Feed in each bit and strobe the clock.
285 */
286
287 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) {
288 if (d & i)
289 EE_SET(RL_EE_DATAIN)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) |
0x02)))
;
290 else
291 EE_CLR(RL_EE_DATAIN)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) &
~0x02)))
;
292 DELAY(100)(*delay_func)(100);
293 EE_SET(RL_EE_CLK)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) |
0x04)))
;
294 DELAY(150)(*delay_func)(150);
295 EE_CLR(RL_EE_CLK)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) &
~0x04)))
;
296 DELAY(100)(*delay_func)(100);
297 }
298}
299
300/*
301 * Read a word of data stored in the EEPROM at address 'addr.'
302 */
303void
304re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest)
305{
306 int i;
307 u_int16_t word = 0;
308
309 /*
310 * Send address of word we want to read.
311 */
312 re_eeprom_putbyte(sc, addr);
313
314 /*
315 * Start reading bits from EEPROM.
316 */
317 for (i = 0x8000; i; i >>= 1) {
318 EE_SET(RL_EE_CLK)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) |
0x04)))
;
319 DELAY(100)(*delay_func)(100);
320 if (CSR_READ_1(sc, RL_EECMD)((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) & RL_EE_DATAOUT0x01)
321 word |= i;
322 EE_CLR(RL_EE_CLK)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) &
~0x04)))
;
323 DELAY(100)(*delay_func)(100);
324 }
325
326 *dest = word;
327}
328
329/*
330 * Read a sequence of words from the EEPROM.
331 */
332void
333re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt)
334{
335 int i;
336 u_int16_t word = 0, *ptr;
337
338 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) |
(0x80))))
;
339
340 DELAY(100)(*delay_func)(100);
341
342 for (i = 0; i < cnt; i++) {
343 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) |
(0x08))))
;
344 re_eeprom_getword(sc, off + i, &word);
345 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) &
~(0x08))))
;
346 ptr = (u_int16_t *)(dest + (i * 2));
347 *ptr = word;
348 }
349
350 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0050))) &
~(0x80))))
;
351}
352
353int
354re_gmii_readreg(struct device *self, int phy, int reg)
355{
356 struct rl_softc *sc = (struct rl_softc *)self;
357 u_int32_t rval;
358 int i;
359
360 if (phy != 7)
361 return (0);
362
363 /* Let the rgephy driver read the GMEDIASTAT register */
364
365 if (reg == RL_GMEDIASTAT0x006C) {
366 rval = CSR_READ_1(sc, RL_GMEDIASTAT)((sc->rl_btag)->read_1((sc->rl_bhandle), (0x006C)));
367 return (rval);
368 }
369
370 CSR_WRITE_4(sc, RL_PHYAR, reg << 16)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0060), (
reg << 16)))
;
371
372 for (i = 0; i < RL_PHY_TIMEOUT20; i++) {
373 rval = CSR_READ_4(sc, RL_PHYAR)((sc->rl_btag)->read_4((sc->rl_bhandle), (0x0060)));
374 if (rval & RL_PHYAR_BUSY0x80000000)
375 break;
376 DELAY(25)(*delay_func)(25);
377 }
378
379 if (i == RL_PHY_TIMEOUT20) {
380 printf ("%s: PHY read failed\n", sc->sc_dev.dv_xname);
381 return (0);
382 }
383
384 DELAY(20)(*delay_func)(20);
385
386 return (rval & RL_PHYAR_PHYDATA0x0000FFFF);
387}
388
389void
390re_gmii_writereg(struct device *dev, int phy, int reg, int data)
391{
392 struct rl_softc *sc = (struct rl_softc *)dev;
393 u_int32_t rval;
394 int i;
395
396 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0060), (
(reg << 16) | (data & 0x0000FFFF) | 0x80000000)))
397 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0060), (
(reg << 16) | (data & 0x0000FFFF) | 0x80000000)))
;
398
399 for (i = 0; i < RL_PHY_TIMEOUT20; i++) {
400 rval = CSR_READ_4(sc, RL_PHYAR)((sc->rl_btag)->read_4((sc->rl_bhandle), (0x0060)));
401 if (!(rval & RL_PHYAR_BUSY0x80000000))
402 break;
403 DELAY(25)(*delay_func)(25);
404 }
405
406 if (i == RL_PHY_TIMEOUT20)
407 printf ("%s: PHY write failed\n", sc->sc_dev.dv_xname);
408
409 DELAY(20)(*delay_func)(20);
410}
411
412int
413re_miibus_readreg(struct device *dev, int phy, int reg)
414{
415 struct rl_softc *sc = (struct rl_softc *)dev;
416 u_int16_t rval = 0;
417 u_int16_t re8139_reg = 0;
418 int s;
419
420 s = splnet()splraise(0x7);
421
422 if (sc->sc_hwrev != RL_HWREV_8139CPLUS0x74800000) {
423 rval = re_gmii_readreg(dev, phy, reg);
424 splx(s)spllower(s);
425 return (rval);
426 }
427
428 /* Pretend the internal PHY is only at address 0 */
429 if (phy) {
430 splx(s)spllower(s);
431 return (0);
432 }
433 switch(reg) {
434 case MII_BMCR0x00:
435 re8139_reg = RL_BMCR0x0062;
436 break;
437 case MII_BMSR0x01:
438 re8139_reg = RL_BMSR0x0064;
439 break;
440 case MII_ANAR0x04:
441 re8139_reg = RL_ANAR0x0066;
442 break;
443 case MII_ANER0x06:
444 re8139_reg = RL_ANER0x006A;
445 break;
446 case MII_ANLPAR0x05:
447 re8139_reg = RL_LPAR0x0068;
448 break;
449 case MII_PHYIDR10x02:
450 case MII_PHYIDR20x03:
451 splx(s)spllower(s);
452 return (0);
453 /*
454 * Allow the rlphy driver to read the media status
455 * register. If we have a link partner which does not
456 * support NWAY, this is the register which will tell
457 * us the results of parallel detection.
458 */
459 case RL_MEDIASTAT0x0058:
460 rval = CSR_READ_1(sc, RL_MEDIASTAT)((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0058)));
461 splx(s)spllower(s);
462 return (rval);
463 default:
464 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg);
465 splx(s)spllower(s);
466 return (0);
467 }
468 rval = CSR_READ_2(sc, re8139_reg)((sc->rl_btag)->read_2((sc->rl_bhandle), (re8139_reg
)))
;
469 if (re8139_reg == RL_BMCR0x0062) {
470 /* 8139C+ has different bit layout. */
471 rval &= ~(BMCR_LOOP0x4000 | BMCR_ISO0x0400);
472 }
473 splx(s)spllower(s);
474 return (rval);
475}
476
477void
478re_miibus_writereg(struct device *dev, int phy, int reg, int data)
479{
480 struct rl_softc *sc = (struct rl_softc *)dev;
481 u_int16_t re8139_reg = 0;
482 int s;
483
484 s = splnet()splraise(0x7);
485
486 if (sc->sc_hwrev != RL_HWREV_8139CPLUS0x74800000) {
487 re_gmii_writereg(dev, phy, reg, data);
488 splx(s)spllower(s);
489 return;
490 }
491
492 /* Pretend the internal PHY is only at address 0 */
493 if (phy) {
494 splx(s)spllower(s);
495 return;
496 }
497 switch(reg) {
498 case MII_BMCR0x00:
499 re8139_reg = RL_BMCR0x0062;
500 /* 8139C+ has different bit layout. */
501 data &= ~(BMCR_LOOP0x4000 | BMCR_ISO0x0400);
502 break;
503 case MII_BMSR0x01:
504 re8139_reg = RL_BMSR0x0064;
505 break;
506 case MII_ANAR0x04:
507 re8139_reg = RL_ANAR0x0066;
508 break;
509 case MII_ANER0x06:
510 re8139_reg = RL_ANER0x006A;
511 break;
512 case MII_ANLPAR0x05:
513 re8139_reg = RL_LPAR0x0068;
514 break;
515 case MII_PHYIDR10x02:
516 case MII_PHYIDR20x03:
517 splx(s)spllower(s);
518 return;
519 break;
520 default:
521 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg);
522 splx(s)spllower(s);
523 return;
524 }
525 CSR_WRITE_2(sc, re8139_reg, data)((sc->rl_btag)->write_2((sc->rl_bhandle), (re8139_reg
), (data)))
;
526 splx(s)spllower(s);
527}
528
529void
530re_miibus_statchg(struct device *dev)
531{
532 struct rl_softc *sc = (struct rl_softc *)dev;
533 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
534 struct mii_data *mii = &sc->sc_mii;
535
536 if ((ifp->if_flags & IFF_RUNNING0x40) == 0)
537 return;
538
539 sc->rl_flags &= ~RL_FLAG_LINK0x00002000;
540 if ((mii->mii_media_status & (IFM_ACTIVE0x0000000000000002ULL | IFM_AVALID0x0000000000000001ULL)) ==
541 (IFM_ACTIVE0x0000000000000002ULL | IFM_AVALID0x0000000000000001ULL)) {
542 switch (IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL)) {
543 case IFM_10_T3:
544 case IFM_100_TX6:
545 sc->rl_flags |= RL_FLAG_LINK0x00002000;
546 break;
547 case IFM_1000_T16:
548 if ((sc->rl_flags & RL_FLAG_FASTETHER0x00040000) != 0)
549 break;
550 sc->rl_flags |= RL_FLAG_LINK0x00002000;
551 break;
552 default:
553 break;
554 }
555 }
556
557 /*
558 * Realtek controllers do not provide an interface to
559 * Tx/Rx MACs for resolved speed, duplex and flow-control
560 * parameters.
561 */
562}
563
564void
565re_iff(struct rl_softc *sc)
566{
567 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
568 int h = 0;
569 u_int32_t hashes[2];
570 u_int32_t rxfilt;
571 struct arpcom *ac = &sc->sc_arpcom;
572 struct ether_multi *enm;
573 struct ether_multistep step;
574
575 rxfilt = CSR_READ_4(sc, RL_RXCFG)((sc->rl_btag)->read_4((sc->rl_bhandle), (0x0044)));
576 rxfilt &= ~(RL_RXCFG_RX_ALLPHYS0x00000001 | RL_RXCFG_RX_BROAD0x00000008 |
577 RL_RXCFG_RX_INDIV0x00000002 | RL_RXCFG_RX_MULTI0x00000004);
578 ifp->if_flags &= ~IFF_ALLMULTI0x200;
579
580 /*
581 * Always accept frames destined to our station address.
582 * Always accept broadcast frames.
583 */
584 rxfilt |= RL_RXCFG_RX_INDIV0x00000002 | RL_RXCFG_RX_BROAD0x00000008;
585
586 if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0) {
587 ifp->if_flags |= IFF_ALLMULTI0x200;
588 rxfilt |= RL_RXCFG_RX_MULTI0x00000004;
589 if (ifp->if_flags & IFF_PROMISC0x100)
590 rxfilt |= RL_RXCFG_RX_ALLPHYS0x00000001;
591 hashes[0] = hashes[1] = 0xFFFFFFFF;
592 } else {
593 rxfilt |= RL_RXCFG_RX_MULTI0x00000004;
594 /* Program new filter. */
595 bzero(hashes, sizeof(hashes))__builtin_bzero((hashes), (sizeof(hashes)));
596
597 ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first
); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step
)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while
( 0)
;
598 while (enm != NULL((void *)0)) {
599 h = ether_crc32_be(enm->enm_addrlo,
600 ETHER_ADDR_LEN6) >> 26;
601
602 if (h < 32)
603 hashes[0] |= (1 << h);
604 else
605 hashes[1] |= (1 << (h - 32));
606
607 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
608 }
609 }
610
611 /*
612 * For some unfathomable reason, Realtek decided to reverse
613 * the order of the multicast hash registers in the PCI Express
614 * parts. This means we have to write the hash pattern in reverse
615 * order for those devices.
616 */
617 if (sc->rl_flags & RL_FLAG_PCIE0x00000004) {
618 CSR_WRITE_4(sc, RL_MAR0, swap32(hashes[1]))((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0008), (
(__uint32_t)(__builtin_constant_p(hashes[1]) ? (__uint32_t)((
(__uint32_t)(hashes[1]) & 0xff) << 24 | ((__uint32_t
)(hashes[1]) & 0xff00) << 8 | ((__uint32_t)(hashes[
1]) & 0xff0000) >> 8 | ((__uint32_t)(hashes[1]) &
0xff000000) >> 24) : __swap32md(hashes[1])))))
;
619 CSR_WRITE_4(sc, RL_MAR4, swap32(hashes[0]))((sc->rl_btag)->write_4((sc->rl_bhandle), (0x000C), (
(__uint32_t)(__builtin_constant_p(hashes[0]) ? (__uint32_t)((
(__uint32_t)(hashes[0]) & 0xff) << 24 | ((__uint32_t
)(hashes[0]) & 0xff00) << 8 | ((__uint32_t)(hashes[
0]) & 0xff0000) >> 8 | ((__uint32_t)(hashes[0]) &
0xff000000) >> 24) : __swap32md(hashes[0])))))
;
620 } else {
621 CSR_WRITE_4(sc, RL_MAR0, hashes[0])((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0008), (
hashes[0])))
;
622 CSR_WRITE_4(sc, RL_MAR4, hashes[1])((sc->rl_btag)->write_4((sc->rl_bhandle), (0x000C), (
hashes[1])))
;
623 }
624
625 CSR_WRITE_4(sc, RL_RXCFG, rxfilt)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0044), (
rxfilt)))
;
626}
627
628void
629re_reset(struct rl_softc *sc)
630{
631 int i;
632
633 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0037), (
0x0010)))
;
634
635 for (i = 0; i < RL_TIMEOUT1000; i++) {
636 DELAY(10)(*delay_func)(10);
637 if (!(CSR_READ_1(sc, RL_COMMAND)((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0037))) & RL_CMD_RESET0x0010))
638 break;
639 }
640 if (i == RL_TIMEOUT1000)
641 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
642
643 if (sc->rl_flags & RL_FLAG_MACRESET0x00000200)
644 CSR_WRITE_1(sc, RL_LDPS, 1)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0082), (
1)))
;
645}
646
647/*
648 * Attach the interface. Allocate softc structures, do ifmedia
649 * setup and ethernet/BPF attach.
650 */
651int
652re_attach(struct rl_softc *sc, const char *intrstr)
653{
654 u_char eaddr[ETHER_ADDR_LEN6];
655 u_int16_t as[ETHER_ADDR_LEN6 / 2];
656 struct ifnet *ifp;
657 u_int16_t re_did = 0;
658 int error = 0, i;
659 const struct re_revision *rr;
660 const char *re_name = NULL((void *)0);
661
662 sc->sc_hwrev = CSR_READ_4(sc, RL_TXCFG)((sc->rl_btag)->read_4((sc->rl_bhandle), (0x0040))) & RL_TXCFG_HWREV0x7C800000;
663
664 switch (sc->sc_hwrev) {
665 case RL_HWREV_8139CPLUS0x74800000:
666 sc->rl_flags |= RL_FLAG_FASTETHER0x00040000 | RL_FLAG_AUTOPAD0x00001000;
667 sc->rl_max_mtu = RL_MTU(1518 - ((6 * 2) + 2) - 4);
668 break;
669 case RL_HWREV_8100E0x30800000:
670 case RL_HWREV_8100E_SPIN20x38800000:
671 case RL_HWREV_8101E0x34000000:
672 sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_FASTETHER0x00040000;
673 sc->rl_max_mtu = RL_MTU(1518 - ((6 * 2) + 2) - 4);
674 break;
675 case RL_HWREV_8103E0x34C00000:
676 sc->rl_flags |= RL_FLAG_MACSLEEP0x00000800;
677 /* FALLTHROUGH */
678 case RL_HWREV_8102E0x34800000:
679 case RL_HWREV_8102EL0x24800000:
680 case RL_HWREV_8102EL_SPIN10x24C00000:
681 sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PAR0x00000010 |
682 RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 | RL_FLAG_FASTETHER0x00040000 |
683 RL_FLAG_CMDSTOP0x00000400 | RL_FLAG_AUTOPAD0x00001000;
684 sc->rl_max_mtu = RL_MTU(1518 - ((6 * 2) + 2) - 4);
685 break;
686 case RL_HWREV_8401E0x24000000:
687 case RL_HWREV_8105E0x40800000:
688 case RL_HWREV_8105E_SPIN10x40C00000:
689 case RL_HWREV_8106E0x44800000:
690 sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PHYWAKE_PM0x00004000 |
691 RL_FLAG_PAR0x00000010 | RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 |
692 RL_FLAG_FASTETHER0x00040000 | RL_FLAG_CMDSTOP0x00000400 | RL_FLAG_AUTOPAD0x00001000;
693 sc->rl_max_mtu = RL_MTU(1518 - ((6 * 2) + 2) - 4);
694 break;
695 case RL_HWREV_84020x44000000:
696 sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PHYWAKE_PM0x00004000 |
697 RL_FLAG_PAR0x00000010 | RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 |
698 RL_FLAG_FASTETHER0x00040000 | RL_FLAG_CMDSTOP0x00000400 | RL_FLAG_AUTOPAD0x00001000 |
699 RL_FLAG_CMDSTOP_WAIT_TXQ0x00080000;
700 sc->rl_max_mtu = RL_MTU(1518 - ((6 * 2) + 2) - 4);
701 break;
702 case RL_HWREV_8168B_SPIN10x30000000:
703 case RL_HWREV_8168B_SPIN20x38000000:
704 sc->rl_flags |= RL_FLAG_WOLRXENB0x00800000;
705 /* FALLTHROUGH */
706 case RL_HWREV_8168B_SPIN30x38400000:
707 sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_MACSTAT0x00000040;
708 sc->rl_max_mtu = RL_MTU(1518 - ((6 * 2) + 2) - 4);
709 break;
710 case RL_HWREV_8168C_SPIN20x3c400000:
711 sc->rl_flags |= RL_FLAG_MACSLEEP0x00000800;
712 /* FALLTHROUGH */
713 case RL_HWREV_8168C0x3c000000:
714 case RL_HWREV_8168CP0x3c800000:
715 sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PAR0x00000010 |
716 RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 | RL_FLAG_CMDSTOP0x00000400 |
717 RL_FLAG_AUTOPAD0x00001000 | RL_FLAG_JUMBOV20x00100000 | RL_FLAG_WOL_MANLINK0x00200000;
718 sc->rl_max_mtu = RL_JUMBO_MTU_6K((6 * 1024) - ((6 * 2) + 2) - 4 - 4);
719 break;
720 case RL_HWREV_8168D0x28000000:
721 sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PHYWAKE_PM0x00004000 |
722 RL_FLAG_PAR0x00000010 | RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 |
723 RL_FLAG_CMDSTOP0x00000400 | RL_FLAG_AUTOPAD0x00001000 | RL_FLAG_JUMBOV20x00100000 |
724 RL_FLAG_WOL_MANLINK0x00200000;
725 sc->rl_max_mtu = RL_JUMBO_MTU_9K((9 * 1024) - ((6 * 2) + 2) - 4 - 4);
726 break;
727 case RL_HWREV_8168DP0x28800000:
728 sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PAR0x00000010 |
729 RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 | RL_FLAG_AUTOPAD0x00001000 |
730 RL_FLAG_JUMBOV20x00100000 | RL_FLAG_WAIT_TXPOLL0x00400000 | RL_FLAG_WOL_MANLINK0x00200000;
731 sc->rl_max_mtu = RL_JUMBO_MTU_9K((9 * 1024) - ((6 * 2) + 2) - 4 - 4);
732 break;
733 case RL_HWREV_8168E0x2C000000:
734 sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PHYWAKE_PM0x00004000 |
735 RL_FLAG_PAR0x00000010 | RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 |
736 RL_FLAG_CMDSTOP0x00000400 | RL_FLAG_AUTOPAD0x00001000 | RL_FLAG_JUMBOV20x00100000 |
737 RL_FLAG_WOL_MANLINK0x00200000;
738 sc->rl_max_mtu = RL_JUMBO_MTU_9K((9 * 1024) - ((6 * 2) + 2) - 4 - 4);
739 break;
740 case RL_HWREV_8168E_VL0x2C800000:
741 sc->rl_flags |= RL_FLAG_EARLYOFF0x00008000 | RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PAR0x00000010 |
742 RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 | RL_FLAG_CMDSTOP0x00000400 |
743 RL_FLAG_AUTOPAD0x00001000 | RL_FLAG_JUMBOV20x00100000 | RL_FLAG_CMDSTOP_WAIT_TXQ0x00080000 |
744 RL_FLAG_WOL_MANLINK0x00200000;
745 sc->rl_max_mtu = RL_JUMBO_MTU_6K((6 * 1024) - ((6 * 2) + 2) - 4 - 4);
746 break;
747 case RL_HWREV_8168F0x48000000:
748 sc->rl_flags |= RL_FLAG_EARLYOFF0x00008000;
749 /* FALLTHROUGH */
750 case RL_HWREV_84110x48800000:
751 sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PAR0x00000010 |
752 RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 | RL_FLAG_CMDSTOP0x00000400 |
753 RL_FLAG_AUTOPAD0x00001000 | RL_FLAG_JUMBOV20x00100000 | RL_FLAG_CMDSTOP_WAIT_TXQ0x00080000 |
754 RL_FLAG_WOL_MANLINK0x00200000;
755 sc->rl_max_mtu = RL_JUMBO_MTU_9K((9 * 1024) - ((6 * 2) + 2) - 4 - 4);
756 break;
757 case RL_HWREV_8168EP0x50000000:
758 case RL_HWREV_8168FP0x54800000:
759 case RL_HWREV_8168G0x4c000000:
760 case RL_HWREV_8168GU0x50800000:
761 case RL_HWREV_8168H0x54000000:
762 case RL_HWREV_8411B0x5c800000:
763 if (sc->sc_product == PCI_PRODUCT_REALTEK_RT8101E0x8136) {
764 /* RTL8106EUS */
765 sc->rl_flags |= RL_FLAG_FASTETHER0x00040000;
766 sc->rl_max_mtu = RL_MTU(1518 - ((6 * 2) + 2) - 4);
767 } else {
768 sc->rl_flags |= RL_FLAG_JUMBOV20x00100000 | RL_FLAG_WOL_MANLINK0x00200000;
769 sc->rl_max_mtu = RL_JUMBO_MTU_9K((9 * 1024) - ((6 * 2) + 2) - 4 - 4);
770 }
771
772 sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008 | RL_FLAG_PAR0x00000010 |
773 RL_FLAG_DESCV20x00000020 | RL_FLAG_MACSTAT0x00000040 | RL_FLAG_CMDSTOP0x00000400 |
774 RL_FLAG_AUTOPAD0x00001000 | RL_FLAG_CMDSTOP_WAIT_TXQ0x00080000 |
775 RL_FLAG_EARLYOFFV20x00010000 | RL_FLAG_RXDV_GATED0x00020000;
776 break;
777 case RL_HWREV_8169_8110SB0x10000000:
778 case RL_HWREV_8169_8110SBL0x7cc00000:
779 case RL_HWREV_8169_8110SCd0x18000000:
780 case RL_HWREV_8169_8110SCe0x98000000:
781 sc->rl_flags |= RL_FLAG_PHYWAKE0x00000008;
782 /* FALLTHROUGH */
783 case RL_HWREV_81690x00000000:
784 case RL_HWREV_8169S0x00800000:
785 case RL_HWREV_8110S0x04000000:
786 sc->rl_flags |= RL_FLAG_MACRESET0x00000200;
787 sc->rl_max_mtu = RL_JUMBO_MTU_7K((7 * 1024) - ((6 * 2) + 2) - 4 - 4);
788 break;
789 default:
790 break;
791 }
792
793 if (sc->sc_hwrev == RL_HWREV_8139CPLUS0x74800000) {
794 sc->rl_cfg0 = RL_8139_CFG00x0051;
795 sc->rl_cfg1 = RL_8139_CFG10x0052;
796 sc->rl_cfg2 = 0;
797 sc->rl_cfg3 = RL_8139_CFG30x0059;
798 sc->rl_cfg4 = RL_8139_CFG40x005A;
799 sc->rl_cfg5 = RL_8139_CFG50x00D8;
800 } else {
801 sc->rl_cfg0 = RL_CFG00x0051;
802 sc->rl_cfg1 = RL_CFG10x0052;
803 sc->rl_cfg2 = RL_CFG20x0053;
804 sc->rl_cfg3 = RL_CFG30x0054;
805 sc->rl_cfg4 = RL_CFG40x0055;
806 sc->rl_cfg5 = RL_CFG50x0056;
807 }
808
809 /* Reset the adapter. */
810 re_reset(sc);
811
812 sc->rl_tx_time = 5; /* 125us */
813 sc->rl_rx_time = 2; /* 50us */
814 if (sc->rl_flags & RL_FLAG_PCIE0x00000004)
815 sc->rl_sim_time = 75; /* 75us */
816 else
817 sc->rl_sim_time = 125; /* 125us */
818 sc->rl_imtype = RL_IMTYPE_SIM1; /* simulated interrupt moderation */
819
820 if (sc->sc_hwrev == RL_HWREV_8139CPLUS0x74800000)
821 sc->rl_bus_speed = 33; /* XXX */
822 else if (sc->rl_flags & RL_FLAG_PCIE0x00000004)
823 sc->rl_bus_speed = 125;
824 else {
825 u_int8_t cfg2;
826
827 cfg2 = CSR_READ_1(sc, sc->rl_cfg2)((sc->rl_btag)->read_1((sc->rl_bhandle), (sc->rl_cfg2
)))
;
828 switch (cfg2 & RL_CFG2_PCI_MASK0x07) {
829 case RL_CFG2_PCI_33MHZ0x00:
830 sc->rl_bus_speed = 33;
831 break;
832 case RL_CFG2_PCI_66MHZ0x01:
833 sc->rl_bus_speed = 66;
834 break;
835 default:
836 printf("%s: unknown bus speed, assume 33MHz\n",
837 sc->sc_dev.dv_xname);
838 sc->rl_bus_speed = 33;
839 break;
840 }
841
842 if (cfg2 & RL_CFG2_PCI_64BIT0x08)
843 sc->rl_flags |= RL_FLAG_PCI640x00000002;
844 }
845
846 re_config_imtype(sc, sc->rl_imtype);
847
848 if (sc->rl_flags & RL_FLAG_PAR0x00000010) {
849 /*
850 * XXX Should have a better way to extract station
851 * address from EEPROM.
852 */
853 for (i = 0; i < ETHER_ADDR_LEN6; i++)
854 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i)((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0000 + i
)))
;
855 } else {
856 sc->rl_eewidth = RL_9356_ADDR_LEN8;
857 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
858 if (re_did != 0x8129)
859 sc->rl_eewidth = RL_9346_ADDR_LEN6;
860
861 /*
862 * Get station address from the EEPROM.
863 */
864 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR0x07, 3);
865 for (i = 0; i < ETHER_ADDR_LEN6 / 2; i++)
866 as[i] = letoh16(as[i])((__uint16_t)(as[i]));
867 bcopy(as, eaddr, ETHER_ADDR_LEN6);
868 }
869
870 /*
871 * Set RX length mask, TX poll request register
872 * and descriptor count.
873 */
874 if (sc->sc_hwrev == RL_HWREV_8139CPLUS0x74800000) {
875 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN0x00001FFF;
876 sc->rl_txstart = RL_TXSTART0x00D9;
877 sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT64;
878 sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT64;
879 sc->rl_ldata.rl_tx_ndescs = RL_8139_NTXSEGS8;
880 } else {
881 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN0x00003FFF;
882 sc->rl_txstart = RL_GTXSTART0x0038;
883 sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT1024;
884 sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT1024;
885 sc->rl_ldata.rl_tx_ndescs = RL_8169_NTXSEGS32;
886 }
887
888 bcopy(eaddr, (char *)&sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN6);
889
890 for (rr = re_revisions; rr->re_name != NULL((void *)0); rr++) {
891 if (rr->re_chipid == sc->sc_hwrev)
892 re_name = rr->re_name;
893 }
894
895 if (re_name == NULL((void *)0))
896 printf(": unknown ASIC (0x%04x)", sc->sc_hwrev >> 16);
897 else
898 printf(": %s (0x%04x)", re_name, sc->sc_hwrev >> 16);
899
900 printf(", %s, address %s\n", intrstr,
901 ether_sprintf(sc->sc_arpcom.ac_enaddr));
902
903 /* Allocate DMA'able memory for the TX ring */
904 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_TX_LIST_SZ(sc),(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (((sc
)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), (256
), (0), (&sc->rl_ldata.rl_tx_listseg), (1), (&sc->
rl_ldata.rl_tx_listnseg), (0x0001 | 0x1000))
905 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_tx_listseg, 1,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (((sc
)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), (256
), (0), (&sc->rl_ldata.rl_tx_listseg), (1), (&sc->
rl_ldata.rl_tx_listnseg), (0x0001 | 0x1000))
906 &sc->rl_ldata.rl_tx_listnseg, BUS_DMA_NOWAIT |(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (((sc
)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), (256
), (0), (&sc->rl_ldata.rl_tx_listseg), (1), (&sc->
rl_ldata.rl_tx_listnseg), (0x0001 | 0x1000))
907 BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (((sc
)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), (256
), (0), (&sc->rl_ldata.rl_tx_listseg), (1), (&sc->
rl_ldata.rl_tx_listnseg), (0x0001 | 0x1000))
) != 0) {
908 printf("%s: can't allocate tx listseg, error = %d\n",
909 sc->sc_dev.dv_xname, error);
910 goto fail_0;
911 }
912
913 /* Load the map for the TX ring. */
914 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_tx_listseg,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->rl_ldata.rl_tx_listseg), (sc->rl_ldata.rl_tx_listnseg
), (((sc)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc
))), ((caddr_t *)&sc->rl_ldata.rl_tx_list), (0x0004 | 0x0001
))
915 sc->rl_ldata.rl_tx_listnseg, RL_TX_LIST_SZ(sc),(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->rl_ldata.rl_tx_listseg), (sc->rl_ldata.rl_tx_listnseg
), (((sc)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc
))), ((caddr_t *)&sc->rl_ldata.rl_tx_list), (0x0004 | 0x0001
))
916 (caddr_t *)&sc->rl_ldata.rl_tx_list,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->rl_ldata.rl_tx_listseg), (sc->rl_ldata.rl_tx_listnseg
), (((sc)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc
))), ((caddr_t *)&sc->rl_ldata.rl_tx_list), (0x0004 | 0x0001
))
917 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->rl_ldata.rl_tx_listseg), (sc->rl_ldata.rl_tx_listnseg
), (((sc)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc
))), ((caddr_t *)&sc->rl_ldata.rl_tx_list), (0x0004 | 0x0001
))
) != 0) {
918 printf("%s: can't map tx list, error = %d\n",
919 sc->sc_dev.dv_xname, error);
920 goto fail_1;
921 }
922
923 if ((error = bus_dmamap_create(sc->sc_dmat, RL_TX_LIST_SZ(sc), 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (((sc
)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), (1)
, (((sc)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)
)), (0), (0), (&sc->rl_ldata.rl_tx_list_map))
924 RL_TX_LIST_SZ(sc), 0, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (((sc
)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), (1)
, (((sc)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)
)), (0), (0), (&sc->rl_ldata.rl_tx_list_map))
925 &sc->rl_ldata.rl_tx_list_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (((sc
)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), (1)
, (((sc)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)
)), (0), (0), (&sc->rl_ldata.rl_tx_list_map))
) != 0) {
926 printf("%s: can't create tx list map, error = %d\n",
927 sc->sc_dev.dv_xname, error);
928 goto fail_2;
929 }
930
931 if ((error = bus_dmamap_load(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
rl_ldata.rl_tx_list_map), (sc->rl_ldata.rl_tx_list), (((sc
)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), (((
void *)0)), (0x0001))
932 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
rl_ldata.rl_tx_list_map), (sc->rl_ldata.rl_tx_list), (((sc
)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), (((
void *)0)), (0x0001))
933 RL_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
rl_ldata.rl_tx_list_map), (sc->rl_ldata.rl_tx_list), (((sc
)->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))), (((
void *)0)), (0x0001))
) != 0) {
934 printf("%s: can't load tx list, error = %d\n",
935 sc->sc_dev.dv_xname, error);
936 goto fail_3;
937 }
938
939 /* Create DMA maps for TX buffers */
940 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
941 error = bus_dmamap_create(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((9 *
1024)), (sc->rl_ldata.rl_tx_ndescs), ((9 * 1024)), (0), (
0), (&sc->rl_ldata.rl_txq[i].txq_dmamap))
942 RL_JUMBO_FRAMELEN, sc->rl_ldata.rl_tx_ndescs,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((9 *
1024)), (sc->rl_ldata.rl_tx_ndescs), ((9 * 1024)), (0), (
0), (&sc->rl_ldata.rl_txq[i].txq_dmamap))
943 RL_JUMBO_FRAMELEN, 0, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((9 *
1024)), (sc->rl_ldata.rl_tx_ndescs), ((9 * 1024)), (0), (
0), (&sc->rl_ldata.rl_txq[i].txq_dmamap))
944 &sc->rl_ldata.rl_txq[i].txq_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((9 *
1024)), (sc->rl_ldata.rl_tx_ndescs), ((9 * 1024)), (0), (
0), (&sc->rl_ldata.rl_txq[i].txq_dmamap))
;
945 if (error) {
946 printf("%s: can't create DMA map for TX\n",
947 sc->sc_dev.dv_xname);
948 goto fail_4;
949 }
950 }
951
952 /* Allocate DMA'able memory for the RX ring */
953 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_RX_DMAMEM_SZ(sc),(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((((sc
)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + (((
6 * 2) + 2) + 28))), (256), (0), (&sc->rl_ldata.rl_rx_listseg
), (1), (&sc->rl_ldata.rl_rx_listnseg), (0x0001 | 0x1000
))
954 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_rx_listseg, 1,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((((sc
)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + (((
6 * 2) + 2) + 28))), (256), (0), (&sc->rl_ldata.rl_rx_listseg
), (1), (&sc->rl_ldata.rl_rx_listnseg), (0x0001 | 0x1000
))
955 &sc->rl_ldata.rl_rx_listnseg, BUS_DMA_NOWAIT |(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((((sc
)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + (((
6 * 2) + 2) + 28))), (256), (0), (&sc->rl_ldata.rl_rx_listseg
), (1), (&sc->rl_ldata.rl_rx_listnseg), (0x0001 | 0x1000
))
956 BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((((sc
)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + (((
6 * 2) + 2) + 28))), (256), (0), (&sc->rl_ldata.rl_rx_listseg
), (1), (&sc->rl_ldata.rl_rx_listnseg), (0x0001 | 0x1000
))
) != 0) {
957 printf("%s: can't allocate rx listnseg, error = %d\n",
958 sc->sc_dev.dv_xname, error);
959 goto fail_4;
960 }
961
962 /* Load the map for the RX ring. */
963 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_rx_listseg,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->rl_ldata.rl_rx_listseg), (sc->rl_ldata.rl_rx_listnseg
), ((((sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc
)) + (((6 * 2) + 2) + 28))), ((caddr_t *)&sc->rl_ldata
.rl_rx_list), (0x0004 | 0x0001))
964 sc->rl_ldata.rl_rx_listnseg, RL_RX_DMAMEM_SZ(sc),(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->rl_ldata.rl_rx_listseg), (sc->rl_ldata.rl_rx_listnseg
), ((((sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc
)) + (((6 * 2) + 2) + 28))), ((caddr_t *)&sc->rl_ldata
.rl_rx_list), (0x0004 | 0x0001))
965 (caddr_t *)&sc->rl_ldata.rl_rx_list,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->rl_ldata.rl_rx_listseg), (sc->rl_ldata.rl_rx_listnseg
), ((((sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc
)) + (((6 * 2) + 2) + 28))), ((caddr_t *)&sc->rl_ldata
.rl_rx_list), (0x0004 | 0x0001))
966 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->rl_ldata.rl_rx_listseg), (sc->rl_ldata.rl_rx_listnseg
), ((((sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc
)) + (((6 * 2) + 2) + 28))), ((caddr_t *)&sc->rl_ldata
.rl_rx_list), (0x0004 | 0x0001))
) != 0) {
967 printf("%s: can't map rx list, error = %d\n",
968 sc->sc_dev.dv_xname, error);
969 goto fail_5;
970
971 }
972
973 if ((error = bus_dmamap_create(sc->sc_dmat, RL_RX_DMAMEM_SZ(sc), 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((((
sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + (
((6 * 2) + 2) + 28))), (1), ((((sc)->rl_ldata.rl_rx_desc_cnt
* sizeof(struct rl_desc)) + (((6 * 2) + 2) + 28))), (0), (0)
, (&sc->rl_ldata.rl_rx_list_map))
974 RL_RX_DMAMEM_SZ(sc), 0, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((((
sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + (
((6 * 2) + 2) + 28))), (1), ((((sc)->rl_ldata.rl_rx_desc_cnt
* sizeof(struct rl_desc)) + (((6 * 2) + 2) + 28))), (0), (0)
, (&sc->rl_ldata.rl_rx_list_map))
975 &sc->rl_ldata.rl_rx_list_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((((
sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + (
((6 * 2) + 2) + 28))), (1), ((((sc)->rl_ldata.rl_rx_desc_cnt
* sizeof(struct rl_desc)) + (((6 * 2) + 2) + 28))), (0), (0)
, (&sc->rl_ldata.rl_rx_list_map))
) != 0) {
976 printf("%s: can't create rx list map, error = %d\n",
977 sc->sc_dev.dv_xname, error);
978 goto fail_6;
979 }
980
981 if ((error = bus_dmamap_load(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
rl_ldata.rl_rx_list_map), (sc->rl_ldata.rl_rx_list), ((((sc
)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + (((
6 * 2) + 2) + 28))), (((void *)0)), (0x0001))
982 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
rl_ldata.rl_rx_list_map), (sc->rl_ldata.rl_rx_list), ((((sc
)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + (((
6 * 2) + 2) + 28))), (((void *)0)), (0x0001))
983 RL_RX_DMAMEM_SZ(sc), NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
rl_ldata.rl_rx_list_map), (sc->rl_ldata.rl_rx_list), ((((sc
)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)) + (((
6 * 2) + 2) + 28))), (((void *)0)), (0x0001))
) != 0) {
984 printf("%s: can't load rx list, error = %d\n",
985 sc->sc_dev.dv_xname, error);
986 goto fail_7;
987 }
988
989 /* Create DMA maps for RX buffers */
990 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
991 error = bus_dmamap_create(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((sc
->rl_max_mtu + ((6 * 2) + 2) + 4 + 4)), (1), ((sc->rl_max_mtu
+ ((6 * 2) + 2) + 4 + 4)), (0), (0), (&sc->rl_ldata.rl_rxsoft
[i].rxs_dmamap))
992 RL_FRAMELEN(sc->rl_max_mtu), 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((sc
->rl_max_mtu + ((6 * 2) + 2) + 4 + 4)), (1), ((sc->rl_max_mtu
+ ((6 * 2) + 2) + 4 + 4)), (0), (0), (&sc->rl_ldata.rl_rxsoft
[i].rxs_dmamap))
993 RL_FRAMELEN(sc->rl_max_mtu), 0, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((sc
->rl_max_mtu + ((6 * 2) + 2) + 4 + 4)), (1), ((sc->rl_max_mtu
+ ((6 * 2) + 2) + 4 + 4)), (0), (0), (&sc->rl_ldata.rl_rxsoft
[i].rxs_dmamap))
994 &sc->rl_ldata.rl_rxsoft[i].rxs_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((sc
->rl_max_mtu + ((6 * 2) + 2) + 4 + 4)), (1), ((sc->rl_max_mtu
+ ((6 * 2) + 2) + 4 + 4)), (0), (0), (&sc->rl_ldata.rl_rxsoft
[i].rxs_dmamap))
;
995 if (error) {
996 printf("%s: can't create DMA map for RX\n",
997 sc->sc_dev.dv_xname);
998 goto fail_8;
999 }
1000 }
1001
1002 ifp = &sc->sc_arpcom.ac_if;
1003 ifp->if_softc = sc;
1004 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ16);
1005 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
1006 ifp->if_xflags = IFXF_MPSAFE0x1;
1007 ifp->if_ioctl = re_ioctl;
1008 ifp->if_qstart = re_start;
1009 ifp->if_watchdog = re_watchdog;
1010 ifp->if_hardmtu = sc->rl_max_mtu;
1011 ifq_set_maxlen(&ifp->if_snd, sc->rl_ldata.rl_tx_desc_cnt)((&ifp->if_snd)->ifq_maxlen = (sc->rl_ldata.rl_tx_desc_cnt
))
;
1012
1013 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010 | IFCAP_CSUM_TCPv40x00000002 |
1014 IFCAP_CSUM_UDPv40x00000004;
1015
1016 /*
1017 * RTL8168/8111C generates wrong IP checksummed frame if the
1018 * packet has IP options so disable TX IP checksum offloading.
1019 */
1020 switch (sc->sc_hwrev) {
1021 case RL_HWREV_8168C0x3c000000:
1022 case RL_HWREV_8168C_SPIN20x3c400000:
1023 case RL_HWREV_8168CP0x3c800000:
1024 break;
1025 default:
1026 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_CSUM_IPv40x00000001;
1027 }
1028
1029#if NVLAN1 > 0
1030 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020;
1031#endif
1032
1033#ifndef SMALL_KERNEL
1034 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_WOL0x00008000;
1035 ifp->if_wol = re_wol;
1036 re_wol(ifp, 0);
1037#endif
1038 timeout_set(&sc->timer_handle, re_tick, sc);
1039 task_set(&sc->rl_start, re_txstart, sc);
1040
1041 /* Take PHY out of power down mode. */
1042 if (sc->rl_flags & RL_FLAG_PHYWAKE_PM0x00004000) {
1043 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x006F), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x006F))) |
0x80)))
;
1044 if (sc->sc_hwrev == RL_HWREV_8401E0x24000000)
1045 CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08)((sc->rl_btag)->write_1((sc->rl_bhandle), (0xD1), ((
(sc->rl_btag)->read_1((sc->rl_bhandle), (0xD1))) &
~0x08)))
;
1046 }
1047 if (sc->rl_flags & RL_FLAG_PHYWAKE0x00000008) {
1048 re_gmii_writereg((struct device *)sc, 1, 0x1f, 0);
1049 re_gmii_writereg((struct device *)sc, 1, 0x0e, 0);
1050 }
1051
1052 /* Do MII setup */
1053 sc->sc_mii.mii_ifp = ifp;
1054 sc->sc_mii.mii_readreg = re_miibus_readreg;
1055 sc->sc_mii.mii_writereg = re_miibus_writereg;
1056 sc->sc_mii.mii_statchg = re_miibus_statchg;
1057 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK0xff00000000000000ULL, re_ifmedia_upd,
1058 re_ifmedia_sts);
1059 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY-1,
1060 MII_OFFSET_ANY-1, MIIF_DOPAUSE0x0100);
1061 if (LIST_FIRST(&sc->sc_mii.mii_phys)((&sc->sc_mii.mii_phys)->lh_first) == NULL((void *)0)) {
1062 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
1063 ifmedia_add(&sc->sc_mii.mii_media,
1064 IFM_ETHER0x0000000000000100ULL|IFM_NONE2ULL, 0, NULL((void *)0));
1065 ifmedia_set(&sc->sc_mii.mii_media,
1066 IFM_ETHER0x0000000000000100ULL|IFM_NONE2ULL);
1067 } else
1068 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL);
1069
1070 /*
1071 * Call MI attach routine.
1072 */
1073 if_attach(ifp);
1074 ether_ifattach(ifp);
1075
1076 return (0);
1077
1078fail_8:
1079 /* Destroy DMA maps for RX buffers. */
1080 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1081 if (sc->rl_ldata.rl_rxsoft[i].rxs_dmamap != NULL((void *)0))
1082 bus_dmamap_destroy(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->rl_ldata.rl_rxsoft[i].rxs_dmamap))
1083 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->rl_ldata.rl_rxsoft[i].rxs_dmamap))
;
1084 }
1085
1086 /* Free DMA'able memory for the RX ring. */
1087 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
rl_ldata.rl_rx_list_map))
;
1088fail_7:
1089 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->rl_ldata.rl_rx_list_map))
;
1090fail_6:
1091 bus_dmamem_unmap(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)sc->rl_ldata.rl_rx_list), ((((sc)->rl_ldata.rl_rx_desc_cnt
* sizeof(struct rl_desc)) + (((6 * 2) + 2) + 28))))
1092 (caddr_t)sc->rl_ldata.rl_rx_list, RL_RX_DMAMEM_SZ(sc))(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)sc->rl_ldata.rl_rx_list), ((((sc)->rl_ldata.rl_rx_desc_cnt
* sizeof(struct rl_desc)) + (((6 * 2) + 2) + 28))))
;
1093fail_5:
1094 bus_dmamem_free(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
sc->rl_ldata.rl_rx_listseg), (sc->rl_ldata.rl_rx_listnseg
))
1095 &sc->rl_ldata.rl_rx_listseg, sc->rl_ldata.rl_rx_listnseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
sc->rl_ldata.rl_rx_listseg), (sc->rl_ldata.rl_rx_listnseg
))
;
1096
1097fail_4:
1098 /* Destroy DMA maps for TX buffers. */
1099 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1100 if (sc->rl_ldata.rl_txq[i].txq_dmamap != NULL((void *)0))
1101 bus_dmamap_destroy(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->rl_ldata.rl_txq[i].txq_dmamap))
1102 sc->rl_ldata.rl_txq[i].txq_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->rl_ldata.rl_txq[i].txq_dmamap))
;
1103 }
1104
1105 /* Free DMA'able memory for the TX ring. */
1106 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
rl_ldata.rl_tx_list_map))
;
1107fail_3:
1108 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->rl_ldata.rl_tx_list_map))
;
1109fail_2:
1110 bus_dmamem_unmap(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)sc->rl_ldata.rl_tx_list), (((sc)->rl_ldata.rl_tx_desc_cnt
* sizeof(struct rl_desc))))
1111 (caddr_t)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ(sc))(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)sc->rl_ldata.rl_tx_list), (((sc)->rl_ldata.rl_tx_desc_cnt
* sizeof(struct rl_desc))))
;
1112fail_1:
1113 bus_dmamem_free(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
sc->rl_ldata.rl_tx_listseg), (sc->rl_ldata.rl_tx_listnseg
))
1114 &sc->rl_ldata.rl_tx_listseg, sc->rl_ldata.rl_tx_listnseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
sc->rl_ldata.rl_tx_listseg), (sc->rl_ldata.rl_tx_listnseg
))
;
1115fail_0:
1116 return (1);
1117}
1118
1119
1120int
1121re_newbuf(struct rl_softc *sc)
1122{
1123 struct mbuf *m;
1124 bus_dmamap_t map;
1125 struct rl_desc *d;
1126 struct rl_rxsoft *rxs;
1127 u_int32_t cmdstat;
1128 int error, idx;
1129
1130 m = MCLGETL(NULL, M_DONTWAIT, RL_FRAMELEN(sc->rl_max_mtu))m_clget((((void *)0)), (0x0002), ((sc->rl_max_mtu + ((6 * 2
) + 2) + 4 + 4)))
;
1131 if (!m)
1132 return (ENOBUFS55);
1133
1134 /*
1135 * Initialize mbuf length fields and fixup
1136 * alignment so that the frame payload is
1137 * longword aligned on strict alignment archs.
1138 */
1139 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = RL_FRAMELEN(sc->rl_max_mtu)(sc->rl_max_mtu + ((6 * 2) + 2) + 4 + 4);
1140 m->m_datam_hdr.mh_data += RE_ETHER_ALIGN0;
1141
1142 idx = sc->rl_ldata.rl_rx_prodidx;
1143 rxs = &sc->rl_ldata.rl_rxsoft[idx];
1144 map = rxs->rxs_dmamap;
1145 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
map), (m), (0x0200|0x0001))
1146 BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
map), (m), (0x0200|0x0001))
;
1147 if (error) {
1148 m_freem(m);
1149 return (ENOBUFS55);
1150 }
1151
1152 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x01))
1153 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x01))
;
1154
1155 d = &sc->rl_ldata.rl_rx_list[idx];
1156 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->rl_ldata.rl_rx_list_map), (sizeof(struct rl_desc) * (
idx)), (sizeof(struct rl_desc)), ((0x02|0x08)))
;
1157 cmdstat = letoh32(d->rl_cmdstat)((__uint32_t)(d->rl_cmdstat));
1158 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->rl_ldata.rl_rx_list_map), (sizeof(struct rl_desc) * (
idx)), (sizeof(struct rl_desc)), ((0x01)))
;
1159 if (cmdstat & RL_RDESC_STAT_OWN0x80000000) {
1160 printf("%s: tried to map busy RX descriptor\n",
1161 sc->sc_dev.dv_xname);
1162 m_freem(m);
1163 return (ENOBUFS55);
1164 }
1165
1166 rxs->rxs_mbuf = m;
1167
1168 d->rl_vlanctl = 0;
1169 cmdstat = map->dm_segs[0].ds_len;
1170 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1171 cmdstat |= RL_RDESC_CMD_EOR0x40000000;
1172 re_set_bufaddr(d, map->dm_segs[0].ds_addr);
1173 d->rl_cmdstat = htole32(cmdstat)((__uint32_t)(cmdstat));
1174 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->rl_ldata.rl_rx_list_map), (sizeof(struct rl_desc) * (
idx)), (sizeof(struct rl_desc)), ((0x01|0x04)))
;
1175 cmdstat |= RL_RDESC_CMD_OWN0x80000000;
1176 d->rl_cmdstat = htole32(cmdstat)((__uint32_t)(cmdstat));
1177 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->rl_ldata.rl_rx_list_map), (sizeof(struct rl_desc) * (
idx)), (sizeof(struct rl_desc)), ((0x01|0x04)))
;
1178
1179 sc->rl_ldata.rl_rx_prodidx = RL_NEXT_RX_DESC(sc, idx)(((idx) + 1) % (sc)->rl_ldata.rl_rx_desc_cnt);
1180
1181 return (0);
1182}
1183
1184
1185int
1186re_tx_list_init(struct rl_softc *sc)
1187{
1188 int i;
1189
1190 memset(sc->rl_ldata.rl_tx_list, 0, RL_TX_LIST_SZ(sc))__builtin_memset((sc->rl_ldata.rl_tx_list), (0), (((sc)->
rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc))))
;
1191 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1192 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL((void *)0);
1193 }
1194
1195 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
rl_ldata.rl_tx_list_map), (0), (sc->rl_ldata.rl_tx_list_map
->dm_mapsize), (0x01|0x04))
1196 sc->rl_ldata.rl_tx_list_map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
rl_ldata.rl_tx_list_map), (0), (sc->rl_ldata.rl_tx_list_map
->dm_mapsize), (0x01|0x04))
1197 sc->rl_ldata.rl_tx_list_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
rl_ldata.rl_tx_list_map), (0), (sc->rl_ldata.rl_tx_list_map
->dm_mapsize), (0x01|0x04))
1198 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
rl_ldata.rl_tx_list_map), (0), (sc->rl_ldata.rl_tx_list_map
->dm_mapsize), (0x01|0x04))
;
1199 sc->rl_ldata.rl_txq_prodidx = 0;
1200 sc->rl_ldata.rl_txq_considx = 0;
1201 sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt;
1202 sc->rl_ldata.rl_tx_nextfree = 0;
1203
1204 return (0);
1205}
1206
1207int
1208re_rx_list_init(struct rl_softc *sc)
1209{
1210 bzero(sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ(sc))__builtin_bzero((sc->rl_ldata.rl_rx_list), (((sc)->rl_ldata
.rl_rx_desc_cnt * sizeof(struct rl_desc))))
;
1211
1212 sc->rl_ldata.rl_rx_prodidx = 0;
1213 sc->rl_ldata.rl_rx_considx = 0;
1214 sc->rl_head = sc->rl_tail = NULL((void *)0);
1215
1216 if_rxr_init(&sc->rl_ldata.rl_rx_ring, 2,
1217 sc->rl_ldata.rl_rx_desc_cnt - 1);
1218 re_rx_list_fill(sc);
1219
1220 return (0);
1221}
1222
1223void
1224re_rx_list_fill(struct rl_softc *sc)
1225{
1226 u_int slots;
1227
1228 for (slots = if_rxr_get(&sc->rl_ldata.rl_rx_ring,
1229 sc->rl_ldata.rl_rx_desc_cnt);
1230 slots > 0; slots--) {
1231 if (re_newbuf(sc) == ENOBUFS55)
1232 break;
1233 }
1234 if_rxr_put(&sc->rl_ldata.rl_rx_ring, slots)do { (&sc->rl_ldata.rl_rx_ring)->rxr_alive -= (slots
); } while (0)
;
1235}
1236
1237/*
1238 * RX handler for C+ and 8169. For the gigE chips, we support
1239 * the reception of jumbo frames that have been fragmented
1240 * across multiple 2K mbuf cluster buffers.
1241 */
1242int
1243re_rxeof(struct rl_softc *sc)
1244{
1245 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
1246 struct mbuf *m;
1247 struct ifnet *ifp;
1248 int i, total_len, rx = 0;
1249 struct rl_desc *cur_rx;
1250 struct rl_rxsoft *rxs;
1251 u_int32_t rxstat, rxvlan;
1252
1253 ifp = &sc->sc_arpcom.ac_if;
1254
1255 for (i = sc->rl_ldata.rl_rx_considx;
1256 if_rxr_inuse(&sc->rl_ldata.rl_rx_ring)((&sc->rl_ldata.rl_rx_ring)->rxr_alive) > 0;
1257 i = RL_NEXT_RX_DESC(sc, i)(((i) + 1) % (sc)->rl_ldata.rl_rx_desc_cnt)) {
1258 cur_rx = &sc->rl_ldata.rl_rx_list[i];
1259 RL_RXDESCSYNC(sc, i,(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->rl_ldata.rl_rx_list_map), (sizeof(struct rl_desc) * (
i)), (sizeof(struct rl_desc)), ((0x02|0x08)))
1260 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->rl_ldata.rl_rx_list_map), (sizeof(struct rl_desc) * (
i)), (sizeof(struct rl_desc)), ((0x02|0x08)))
;
1261 rxstat = letoh32(cur_rx->rl_cmdstat)((__uint32_t)(cur_rx->rl_cmdstat));
1262 rxvlan = letoh32(cur_rx->rl_vlanctl)((__uint32_t)(cur_rx->rl_vlanctl));
1263 RL_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->rl_ldata.rl_rx_list_map), (sizeof(struct rl_desc) * (
i)), (sizeof(struct rl_desc)), ((0x01)))
;
1264 if ((rxstat & RL_RDESC_STAT_OWN0x80000000) != 0)
1265 break;
1266 total_len = rxstat & sc->rl_rxlenmask;
1267 rxs = &sc->rl_ldata.rl_rxsoft[i];
1268 m = rxs->rxs_mbuf;
1269 rxs->rxs_mbuf = NULL((void *)0);
1270 if_rxr_put(&sc->rl_ldata.rl_rx_ring, 1)do { (&sc->rl_ldata.rl_rx_ring)->rxr_alive -= (1); }
while (0)
;
1271 rx = 1;
1272
1273 /* Invalidate the RX mbuf and unload its map */
1274
1275 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs->
rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x02)
)
1276 rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs->
rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x02)
)
1277 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs->
rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x02)
)
;
1278 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (rxs
->rxs_dmamap))
;
1279
1280 if ((sc->rl_flags & RL_FLAG_JUMBOV20x00100000) != 0 &&
1281 (rxstat & (RL_RDESC_STAT_SOF0x20000000 | RL_RDESC_STAT_EOF0x10000000)) !=
1282 (RL_RDESC_STAT_SOF0x20000000 | RL_RDESC_STAT_EOF0x10000000)) {
1283 ifp->if_ierrorsif_data.ifi_ierrors++;
1284 m_freem(m);
1285 continue;
1286 } else if (!(rxstat & RL_RDESC_STAT_EOF0x10000000)) {
1287 m->m_lenm_hdr.mh_len = RL_FRAMELEN(sc->rl_max_mtu)(sc->rl_max_mtu + ((6 * 2) + 2) + 4 + 4);
1288 if (sc->rl_head == NULL((void *)0))
1289 sc->rl_head = sc->rl_tail = m;
1290 else {
1291 m->m_flagsm_hdr.mh_flags &= ~M_PKTHDR0x0002;
1292 sc->rl_tail->m_nextm_hdr.mh_next = m;
1293 sc->rl_tail = m;
1294 }
1295 continue;
1296 }
1297
1298 /*
1299 * NOTE: for the 8139C+, the frame length field
1300 * is always 12 bits in size, but for the gigE chips,
1301 * it is 13 bits (since the max RX frame length is 16K).
1302 * Unfortunately, all 32 bits in the status word
1303 * were already used, so to make room for the extra
1304 * length bit, Realtek took out the 'frame alignment
1305 * error' bit and shifted the other status bits
1306 * over one slot. The OWN, EOR, FS and LS bits are
1307 * still in the same places. We have already extracted
1308 * the frame length and checked the OWN bit, so rather
1309 * than using an alternate bit mapping, we shift the
1310 * status bits one space to the right so we can evaluate
1311 * them using the 8169 status as though it was in the
1312 * same format as that of the 8139C+.
1313 */
1314 if (sc->sc_hwrev != RL_HWREV_8139CPLUS0x74800000)
1315 rxstat >>= 1;
1316
1317 /*
1318 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be
1319 * set, but if CRC is clear, it will still be a valid frame.
1320 */
1321 if ((rxstat & RL_RDESC_STAT_RXERRSUM0x00100000) != 0 &&
1322 !(rxstat & RL_RDESC_STAT_RXERRSUM0x00100000 && !(total_len > 8191 &&
1323 (rxstat & RL_RDESC_STAT_ERRS(0x00200000|0x00080000| 0x00040000)) == RL_RDESC_STAT_GIANT0x00200000))) {
1324 ifp->if_ierrorsif_data.ifi_ierrors++;
1325 /*
1326 * If this is part of a multi-fragment packet,
1327 * discard all the pieces.
1328 */
1329 if (sc->rl_head != NULL((void *)0)) {
1330 m_freem(sc->rl_head);
1331 sc->rl_head = sc->rl_tail = NULL((void *)0);
1332 }
1333 m_freem(m);
1334 continue;
1335 }
1336
1337 if (sc->rl_head != NULL((void *)0)) {
1338 m->m_lenm_hdr.mh_len = total_len % RL_FRAMELEN(sc->rl_max_mtu)(sc->rl_max_mtu + ((6 * 2) + 2) + 4 + 4);
1339 if (m->m_lenm_hdr.mh_len == 0)
1340 m->m_lenm_hdr.mh_len = RL_FRAMELEN(sc->rl_max_mtu)(sc->rl_max_mtu + ((6 * 2) + 2) + 4 + 4);
1341 /*
1342 * Special case: if there's 4 bytes or less
1343 * in this buffer, the mbuf can be discarded:
1344 * the last 4 bytes is the CRC, which we don't
1345 * care about anyway.
1346 */
1347 if (m->m_lenm_hdr.mh_len <= ETHER_CRC_LEN4) {
1348 sc->rl_tail->m_lenm_hdr.mh_len -=
1349 (ETHER_CRC_LEN4 - m->m_lenm_hdr.mh_len);
1350 m_freem(m);
1351 } else {
1352 m->m_lenm_hdr.mh_len -= ETHER_CRC_LEN4;
1353 m->m_flagsm_hdr.mh_flags &= ~M_PKTHDR0x0002;
1354 sc->rl_tail->m_nextm_hdr.mh_next = m;
1355 }
1356 m = sc->rl_head;
1357 sc->rl_head = sc->rl_tail = NULL((void *)0);
1358 m->m_pkthdrM_dat.MH.MH_pkthdr.len = total_len - ETHER_CRC_LEN4;
1359 } else
1360 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len =
1361 (total_len - ETHER_CRC_LEN4);
1362
1363 /* Do RX checksumming */
1364
1365 if (sc->rl_flags & RL_FLAG_DESCV20x00000020) {
1366 /* Check IP header checksum */
1367 if ((rxvlan & RL_RDESC_IPV40x40000000) &&
1368 !(rxstat & RL_RDESC_STAT_IPSUMBAD0x00008000))
1369 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK0x0008;
1370
1371 /* Check TCP/UDP checksum */
1372 if ((rxvlan & (RL_RDESC_IPV40x40000000|RL_RDESC_IPV60x80000000)) &&
1373 (((rxstat & RL_RDESC_STAT_TCP0x00010000) &&
1374 !(rxstat & RL_RDESC_STAT_TCPSUMBAD0x00002000)) ||
1375 ((rxstat & RL_RDESC_STAT_UDP0x00020000) &&
1376 !(rxstat & RL_RDESC_STAT_UDPSUMBAD0x00004000))))
1377 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK0x0020 |
1378 M_UDP_CSUM_IN_OK0x0080;
1379 } else {
1380 /* Check IP header checksum */
1381 if ((rxstat & RL_RDESC_STAT_PROTOID0x00030000) &&
1382 !(rxstat & RL_RDESC_STAT_IPSUMBAD0x00008000))
1383 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK0x0008;
1384
1385 /* Check TCP/UDP checksum */
1386 if ((RL_TCPPKT(rxstat)(((rxstat) & 0x00030000) == 0x00010000) &&
1387 !(rxstat & RL_RDESC_STAT_TCPSUMBAD0x00002000)) ||
1388 (RL_UDPPKT(rxstat)(((rxstat) & 0x00030000) == 0x00020000) &&
1389 !(rxstat & RL_RDESC_STAT_UDPSUMBAD0x00004000)))
1390 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK0x0020 |
1391 M_UDP_CSUM_IN_OK0x0080;
1392 }
1393#if NVLAN1 > 0
1394 if (rxvlan & RL_RDESC_VLANCTL_TAG0x00010000) {
1395 m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag =
1396 ntohs((rxvlan & RL_RDESC_VLANCTL_DATA))(__uint16_t)(__builtin_constant_p((rxvlan & 0x0000FFFF)) ?
(__uint16_t)(((__uint16_t)((rxvlan & 0x0000FFFF)) & 0xffU
) << 8 | ((__uint16_t)((rxvlan & 0x0000FFFF)) &
0xff00U) >> 8) : __swap16md((rxvlan & 0x0000FFFF))
)
;
1397 m->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020;
1398 }
1399#endif
1400
1401 ml_enqueue(&ml, m);
1402 }
1403
1404 if (ifiq_input(&ifp->if_rcv, &ml))
1405 if_rxr_livelocked(&sc->rl_ldata.rl_rx_ring);
1406
1407 sc->rl_ldata.rl_rx_considx = i;
1408 re_rx_list_fill(sc);
1409
1410
1411 return (rx);
1412}
1413
1414int
1415re_txeof(struct rl_softc *sc)
1416{
1417 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1418 struct rl_txq *txq;
1419 uint32_t txstat;
1420 unsigned int prod, cons;
1421 unsigned int idx;
1422 int free = 0;
1423
1424 prod = sc->rl_ldata.rl_txq_prodidx;
1425 cons = sc->rl_ldata.rl_txq_considx;
1426
1427 while (prod != cons) {
1428 txq = &sc->rl_ldata.rl_txq[cons];
1429
1430 idx = txq->txq_descidx;
1431 RL_TXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->rl_ldata.rl_tx_list_map), (sizeof(struct rl_desc) * (
idx)), (sizeof(struct rl_desc)), ((0x02)))
;
1432 txstat = letoh32(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat)((__uint32_t)(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat));
1433 RL_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->rl_ldata.rl_tx_list_map), (sizeof(struct rl_desc) * (
idx)), (sizeof(struct rl_desc)), ((0x01)))
;
1434 if (ISSET(txstat, RL_TDESC_CMD_OWN)((txstat) & (0x80000000))) {
1435 free = 2;
1436 break;
1437 }
1438
1439 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txq->
txq_dmamap), (0), (txq->txq_dmamap->dm_mapsize), (0x08)
)
1440 0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txq->
txq_dmamap), (0), (txq->txq_dmamap->dm_mapsize), (0x08)
)
;
1441 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txq
->txq_dmamap))
;
1442 m_freem(txq->txq_mbuf);
1443 txq->txq_mbuf = NULL((void *)0);
1444
1445 if (txstat & (RL_TDESC_STAT_EXCESSCOL0x00100000 | RL_TDESC_STAT_COLCNT0x000F0000))
1446 ifp->if_collisionsif_data.ifi_collisions++;
1447 if (txstat & RL_TDESC_STAT_TXERRSUM0x00800000)
1448 ifp->if_oerrorsif_data.ifi_oerrors++;
1449
1450 cons = RL_NEXT_TX_DESC(sc, idx)(((idx) + 1) % (sc)->rl_ldata.rl_tx_desc_cnt);
1451 free = 1;
1452 }
1453
1454 if (free == 0)
1455 return (0);
1456
1457 sc->rl_ldata.rl_txq_considx = cons;
1458
1459 /*
1460 * Some chips will ignore a second TX request issued while an
1461 * existing transmission is in progress. If the transmitter goes
1462 * idle but there are still packets waiting to be sent, we need
1463 * to restart the channel here to flush them out. This only
1464 * seems to be required with the PCIe devices.
1465 */
1466 if (ifq_is_oactive(&ifp->if_snd))
1467 ifq_restart(&ifp->if_snd);
1468 else if (free == 2)
1469 ifq_serialize(&ifp->if_snd, &sc->rl_start);
1470 else
1471 ifp->if_timer = 0;
1472
1473 return (1);
1474}
1475
1476void
1477re_tick(void *xsc)
1478{
1479 struct rl_softc *sc = xsc;
1480 struct mii_data *mii;
1481 int s;
1482
1483 mii = &sc->sc_mii;
1484
1485 s = splnet()splraise(0x7);
1486
1487 mii_tick(mii);
1488
1489 if ((sc->rl_flags & RL_FLAG_LINK0x00002000) == 0)
1490 re_miibus_statchg(&sc->sc_dev);
1491
1492 splx(s)spllower(s);
1493
1494 timeout_add_sec(&sc->timer_handle, 1);
1495}
1496
1497int
1498re_intr(void *arg)
1499{
1500 struct rl_softc *sc = arg;
1501 struct ifnet *ifp;
1502 u_int16_t status;
1503 int claimed = 0, rx, tx;
1504
1505 ifp = &sc->sc_arpcom.ac_if;
1506
1507 if (!(ifp->if_flags & IFF_RUNNING0x40))
1508 return (0);
1509
1510 /* Disable interrupts. */
1511 CSR_WRITE_2(sc, RL_IMR, 0)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x003C), (
0)))
;
1512
1513 rx = tx = 0;
1514 status = CSR_READ_2(sc, RL_ISR)((sc->rl_btag)->read_2((sc->rl_bhandle), (0x003E)));
1515 /* If the card has gone away the read returns 0xffff. */
1516 if (status == 0xffff)
1517 return (0);
1518 if (status)
1519 CSR_WRITE_2(sc, RL_ISR, status)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x003E), (
status)))
;
1520
1521 if (status & RL_ISR_TIMEOUT_EXPIRED0x4000)
1522 claimed = 1;
1523
1524 if (status & RL_INTRS_CPLUS(0x0001|0x0002|0x0008| 0x0010|0x0040| 0x8000|0x0004)) {
1525 if (status &
1526 (sc->rl_rx_ack | RL_ISR_RX_ERR0x0002 | RL_ISR_FIFO_OFLOW0x0040)) {
1527 rx |= re_rxeof(sc);
1528 claimed = 1;
1529 }
1530
1531 if (status & (sc->rl_tx_ack | RL_ISR_TX_ERR0x0008)) {
1532 tx |= re_txeof(sc);
1533 claimed = 1;
1534 }
1535
1536 if (status & RL_ISR_SYSTEM_ERR0x8000) {
1537 KERNEL_LOCK()_kernel_lock();
1538 re_init(ifp);
1539 KERNEL_UNLOCK()_kernel_unlock();
1540 claimed = 1;
1541 }
1542 }
1543
1544 if (sc->rl_imtype == RL_IMTYPE_SIM1) {
1545 if (sc->rl_timerintr) {
1546 if ((tx | rx) == 0) {
1547 /*
1548 * Nothing needs to be processed, fallback
1549 * to use TX/RX interrupts.
1550 */
1551 re_setup_intr(sc, 1, RL_IMTYPE_NONE0);
1552
1553 /*
1554 * Recollect, mainly to avoid the possible
1555 * race introduced by changing interrupt
1556 * masks.
1557 */
1558 re_rxeof(sc);
1559 re_txeof(sc);
1560 } else
1561 CSR_WRITE_4(sc, RL_TIMERCNT, 1)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0048), (
1)))
; /* reload */
1562 } else if (tx | rx) {
1563 /*
1564 * Assume that using simulated interrupt moderation
1565 * (hardware timer based) could reduce the interrupt
1566 * rate.
1567 */
1568 re_setup_intr(sc, 1, RL_IMTYPE_SIM1);
1569 }
1570 }
1571
1572 CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x003C), (
sc->rl_intrs)))
;
1573
1574 return (claimed);
1575}
1576
1577int
1578re_encap(struct rl_softc *sc, unsigned int idx, struct mbuf *m)
1579{
1580 struct rl_txq *txq;
1581 bus_dmamap_t map;
1582 int error, seg, nsegs, curidx, lastidx, pad;
1583 int off;
1584 struct ip *ip;
1585 struct rl_desc *d;
11
'd' declared without an initial value
1586 u_int32_t cmdstat, vlanctl = 0, csum_flags = 0;
1587
1588 /*
1589 * Set up checksum offload. Note: checksum offload bits must
1590 * appear in all descriptors of a multi-descriptor transmit
1591 * attempt. This is according to testing done with an 8169
1592 * chip. This is a requirement.
1593 */
1594
1595 /*
1596 * Set RL_TDESC_CMD_IPCSUM if any checksum offloading
1597 * is requested. Otherwise, RL_TDESC_CMD_TCPCSUM/
1598 * RL_TDESC_CMD_UDPCSUM does not take affect.
1599 */
1600
1601 if ((sc->rl_flags & RL_FLAG_JUMBOV20x00100000) &&
12
Assuming the condition is false
1602 m->m_pkthdrM_dat.MH.MH_pkthdr.len > RL_MTU(1518 - ((6 * 2) + 2) - 4) &&
1603 (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags &
1604 (M_IPV4_CSUM_OUT0x0001|M_TCP_CSUM_OUT0x0002|M_UDP_CSUM_OUT0x0004)) != 0) {
1605 struct mbuf mh, *mp;
1606
1607 mp = m_getptr(m, ETHER_HDR_LEN((6 * 2) + 2), &off);
1608 mh.m_flagsm_hdr.mh_flags = 0;
1609 mh.m_datam_hdr.mh_data = mtod(mp, caddr_t)((caddr_t)((mp)->m_hdr.mh_data)) + off;
1610 mh.m_nextm_hdr.mh_next = mp->m_nextm_hdr.mh_next;
1611 mh.m_pkthdrM_dat.MH.MH_pkthdr.len = mp->m_pkthdrM_dat.MH.MH_pkthdr.len - ETHER_HDR_LEN((6 * 2) + 2);
1612 mh.m_lenm_hdr.mh_len = mp->m_lenm_hdr.mh_len - off;
1613 ip = (struct ip *)mh.m_datam_hdr.mh_data;
1614
1615 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_IPV4_CSUM_OUT0x0001)
1616 ip->ip_sum = in_cksum(&mh, sizeof(struct ip));
1617 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & (M_TCP_CSUM_OUT0x0002|M_UDP_CSUM_OUT0x0004))
1618 in_delayed_cksum(&mh);
1619
1620 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags &=
1621 ~(M_IPV4_CSUM_OUT0x0001|M_TCP_CSUM_OUT0x0002|M_UDP_CSUM_OUT0x0004);
1622 }
1623
1624 if ((m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags &
13
Assuming the condition is false
14
Taking false branch
1625 (M_IPV4_CSUM_OUT0x0001|M_TCP_CSUM_OUT0x0002|M_UDP_CSUM_OUT0x0004)) != 0) {
1626 if (sc->rl_flags & RL_FLAG_DESCV20x00000020) {
1627 vlanctl |= RL_TDESC_CMD_IPCSUMV20x20000000;
1628 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_TCP_CSUM_OUT0x0002)
1629 vlanctl |= RL_TDESC_CMD_TCPCSUMV20x40000000;
1630 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_UDP_CSUM_OUT0x0004)
1631 vlanctl |= RL_TDESC_CMD_UDPCSUMV20x80000000;
1632 } else {
1633 csum_flags |= RL_TDESC_CMD_IPCSUM0x00040000;
1634 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_TCP_CSUM_OUT0x0002)
1635 csum_flags |= RL_TDESC_CMD_TCPCSUM0x00010000;
1636 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_UDP_CSUM_OUT0x0004)
1637 csum_flags |= RL_TDESC_CMD_UDPCSUM0x00020000;
1638 }
1639 }
1640
1641 txq = &sc->rl_ldata.rl_txq[idx];
1642 map = txq->txq_dmamap;
1643
1644 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
map), (m), (0x0400|0x0001))
1645 BUS_DMA_WRITE|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
map), (m), (0x0400|0x0001))
;
1646 switch (error) {
15
Control jumps to 'case 0:' at line 1647
1647 case 0:
1648 break;
16
Execution continues on line 1661
1649
1650 case EFBIG27:
1651 if (m_defrag(m, M_DONTWAIT0x0002) == 0 &&
1652 bus_dmamap_load_mbuf(sc->sc_dmat, map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
map), (m), (0x0400|0x0001))
1653 BUS_DMA_WRITE|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
map), (m), (0x0400|0x0001))
== 0)
1654 break;
1655
1656 /* FALLTHROUGH */
1657 default:
1658 return (0);
1659 }
1660
1661 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x04))
1662 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x04))
;
1663
1664 nsegs = map->dm_nsegs;
1665 pad = 0;
1666
1667 /*
1668 * With some of the Realtek chips, using the checksum offload
1669 * support in conjunction with the autopadding feature results
1670 * in the transmission of corrupt frames. For example, if we
1671 * need to send a really small IP fragment that's less than 60
1672 * bytes in size, and IP header checksumming is enabled, the
1673 * resulting ethernet frame that appears on the wire will
1674 * have garbled payload. To work around this, if TX IP checksum
1675 * offload is enabled, we always manually pad short frames out
1676 * to the minimum ethernet frame size.
1677 */
1678 if ((sc->rl_flags & RL_FLAG_AUTOPAD0x00001000) == 0 &&
17
Assuming the condition is false
1679 m->m_pkthdrM_dat.MH.MH_pkthdr.len < RL_IP4CSUMTX_PADLEN(((6 * 2) + 2) + 28) &&
1680 (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_IPV4_CSUM_OUT0x0001) != 0) {
1681 pad = 1;
1682 nsegs++;
1683 }
1684
1685 /*
1686 * Set up hardware VLAN tagging. Note: vlan tag info must
1687 * appear in all descriptors of a multi-descriptor
1688 * transmission attempt.
1689 */
1690#if NVLAN1 > 0
1691 if (m->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020)
18
Assuming the condition is false
19
Taking false branch
1692 vlanctl |= swap16(m->m_pkthdr.ether_vtag)(__uint16_t)(__builtin_constant_p(m->M_dat.MH.MH_pkthdr.ether_vtag
) ? (__uint16_t)(((__uint16_t)(m->M_dat.MH.MH_pkthdr.ether_vtag
) & 0xffU) << 8 | ((__uint16_t)(m->M_dat.MH.MH_pkthdr
.ether_vtag) & 0xff00U) >> 8) : __swap16md(m->M_dat
.MH.MH_pkthdr.ether_vtag))
|
1693 RL_TDESC_VLANCTL_TAG0x00020000;
1694#endif
1695
1696 /*
1697 * Map the segment array into descriptors. Note that we set the
1698 * start-of-frame and end-of-frame markers for either TX or RX, but
1699 * they really only have meaning in the TX case. (In the RX case,
1700 * it's the chip that tells us where packets begin and end.)
1701 * We also keep track of the end of the ring and set the
1702 * end-of-ring bits as needed, and we set the ownership bits
1703 * in all except the very first descriptor. (The caller will
1704 * set this descriptor later when it start transmission or
1705 * reception.)
1706 */
1707 curidx = idx;
1708 cmdstat = RL_TDESC_CMD_SOF0x20000000;
1709
1710 for (seg = 0; seg < map->dm_nsegs; seg++) {
20
Assuming 'seg' is >= field 'dm_nsegs'
21
Loop condition is false. Execution continues on line 1731
1711 d = &sc->rl_ldata.rl_tx_list[curidx];
1712
1713 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->rl_ldata.rl_tx_list_map), (sizeof(struct rl_desc) * (
curidx)), (sizeof(struct rl_desc)), ((0x08)))
;
1714
1715 d->rl_vlanctl = htole32(vlanctl)((__uint32_t)(vlanctl));
1716 re_set_bufaddr(d, map->dm_segs[seg].ds_addr);
1717 cmdstat |= csum_flags | map->dm_segs[seg].ds_len;
1718
1719 if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1)
1720 cmdstat |= RL_TDESC_CMD_EOR0x40000000;
1721
1722 d->rl_cmdstat = htole32(cmdstat)((__uint32_t)(cmdstat));
1723
1724 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->rl_ldata.rl_tx_list_map), (sizeof(struct rl_desc) * (
curidx)), (sizeof(struct rl_desc)), ((0x04)))
;
1725
1726 lastidx = curidx;
1727 cmdstat = RL_TDESC_CMD_OWN0x80000000;
1728 curidx = RL_NEXT_TX_DESC(sc, curidx)(((curidx) + 1) % (sc)->rl_ldata.rl_tx_desc_cnt);
1729 }
1730
1731 if (pad
21.1
'pad' is 0
) {
22
Taking false branch
1732 d = &sc->rl_ldata.rl_tx_list[curidx];
1733
1734 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->rl_ldata.rl_tx_list_map), (sizeof(struct rl_desc) * (
curidx)), (sizeof(struct rl_desc)), ((0x08)))
;
1735
1736 d->rl_vlanctl = htole32(vlanctl)((__uint32_t)(vlanctl));
1737 re_set_bufaddr(d, RL_TXPADDADDR(sc)((sc)->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr + ((
sc)->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)))
);
1738 cmdstat = csum_flags |
1739 RL_TDESC_CMD_OWN0x80000000 | RL_TDESC_CMD_EOF0x10000000 |
1740 (RL_IP4CSUMTX_PADLEN(((6 * 2) + 2) + 28) + 1 - m->m_pkthdrM_dat.MH.MH_pkthdr.len);
1741
1742 if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1)
1743 cmdstat |= RL_TDESC_CMD_EOR0x40000000;
1744
1745 d->rl_cmdstat = htole32(cmdstat)((__uint32_t)(cmdstat));
1746
1747 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->rl_ldata.rl_tx_list_map), (sizeof(struct rl_desc) * (
curidx)), (sizeof(struct rl_desc)), ((0x04)))
;
1748
1749 lastidx = curidx;
1750 }
1751
1752 /* d is already pointing at the last descriptor */
1753 d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF)((__uint32_t)(0x10000000));
23
Access to field 'rl_cmdstat' results in a dereference of an undefined pointer value (loaded from variable 'd')
1754
1755 /* Transfer ownership of packet to the chip. */
1756 d = &sc->rl_ldata.rl_tx_list[idx];
1757
1758 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->rl_ldata.rl_tx_list_map), (sizeof(struct rl_desc) * (
curidx)), (sizeof(struct rl_desc)), ((0x08)))
;
1759 d->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN)((__uint32_t)(0x80000000));
1760 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->rl_ldata.rl_tx_list_map), (sizeof(struct rl_desc) * (
curidx)), (sizeof(struct rl_desc)), ((0x04)))
;
1761
1762 /* update info of TX queue and descriptors */
1763 txq->txq_mbuf = m;
1764 txq->txq_descidx = lastidx;
1765
1766 return (nsegs);
1767}
1768
1769void
1770re_txstart(void *xsc)
1771{
1772 struct rl_softc *sc = xsc;
1773
1774 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START)((sc->rl_btag)->write_1((sc->rl_bhandle), (sc->rl_txstart
), (0x40)))
;
1775}
1776
1777/*
1778 * Main transmit routine for C+ and gigE NICs.
1779 */
1780
1781void
1782re_start(struct ifqueue *ifq)
1783{
1784 struct ifnet *ifp = ifq->ifq_if;
1785 struct rl_softc *sc = ifp->if_softc;
1786 struct mbuf *m;
1787 unsigned int idx;
1788 unsigned int free, used;
1789 int post = 0;
1790
1791 if (!ISSET(sc->rl_flags, RL_FLAG_LINK)((sc->rl_flags) & (0x00002000))) {
1
Assuming the condition is false
2
Taking false branch
1792 ifq_purge(ifq);
1793 return;
1794 }
1795
1796 free = sc->rl_ldata.rl_txq_considx;
1797 idx = sc->rl_ldata.rl_txq_prodidx;
1798 if (free <= idx)
3
Assuming 'free' is > 'idx'
4
Taking false branch
1799 free += sc->rl_ldata.rl_tx_desc_cnt;
1800 free -= idx;
1801
1802 for (;;) {
5
Loop condition is true. Entering loop body
1803 if (sc->rl_ldata.rl_tx_ndescs >= free + 2) {
6
Assuming the condition is false
7
Taking false branch
1804 ifq_set_oactive(ifq);
1805 break;
1806 }
1807
1808 m = ifq_dequeue(ifq);
1809 if (m == NULL((void *)0))
8
Assuming 'm' is not equal to NULL
9
Taking false branch
1810 break;
1811
1812 used = re_encap(sc, idx, m);
10
Calling 're_encap'
1813 if (used == 0) {
1814 m_freem(m);
1815 continue;
1816 }
1817
1818#if NBPFILTER1 > 0
1819 if (ifp->if_bpf)
1820 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
1821#endif
1822
1823 KASSERT(used <= free)((used <= free) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/ic/re.c"
, 1823, "used <= free"))
;
1824 free -= used;
1825
1826 idx += used;
1827 if (idx >= sc->rl_ldata.rl_tx_desc_cnt)
1828 idx -= sc->rl_ldata.rl_tx_desc_cnt;
1829
1830 post = 1;
1831 }
1832
1833 if (post == 0)
1834 return;
1835
1836 ifp->if_timer = 5;
1837 sc->rl_ldata.rl_txq_prodidx = idx;
1838 ifq_serialize(ifq, &sc->rl_start);
1839}
1840
1841int
1842re_init(struct ifnet *ifp)
1843{
1844 struct rl_softc *sc = ifp->if_softc;
1845 u_int16_t cfg;
1846 uint32_t rxcfg;
1847 int s;
1848 union {
1849 u_int32_t align_dummy;
1850 u_char eaddr[ETHER_ADDR_LEN6];
1851 } eaddr;
1852
1853 s = splnet()splraise(0x7);
1854
1855 /*
1856 * Cancel pending I/O and free all RX/TX buffers.
1857 */
1858 re_stop(ifp);
1859
1860 /* Put controller into known state. */
1861 re_reset(sc);
1862
1863 /*
1864 * Enable C+ RX and TX mode, as well as VLAN stripping and
1865 * RX checksum offload. We must configure the C+ register
1866 * before all others.
1867 */
1868 cfg = RL_CPLUSCMD_TXENB0x0001 | RL_CPLUSCMD_PCI_MRW0x0008 |
1869 RL_CPLUSCMD_RXCSUM_ENB0x0020;
1870
1871 if (ifp->if_capabilitiesif_data.ifi_capabilities & IFCAP_VLAN_HWTAGGING0x00000020)
1872 cfg |= RL_CPLUSCMD_VLANSTRIP0x0040;
1873
1874 if (sc->rl_flags & RL_FLAG_MACSTAT0x00000040)
1875 cfg |= RL_CPLUSCMD_MACSTAT_DIS0x0080;
1876 else
1877 cfg |= RL_CPLUSCMD_RXENB0x0002;
1878
1879 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x00E0), (
cfg)))
;
1880
1881 /*
1882 * Init our MAC address. Even though the chipset
1883 * documentation doesn't mention it, we need to enter "Config
1884 * register write enable" mode to modify the ID registers.
1885 */
1886 bcopy(sc->sc_arpcom.ac_enaddr, eaddr.eaddr, ETHER_ADDR_LEN6);
1887 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
(0x80|0x40))))
;
1888 CSR_WRITE_4(sc, RL_IDR4,((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0004), (
((__uint32_t)(*(u_int32_t *)(&eaddr.eaddr[4]))))))
1889 htole32(*(u_int32_t *)(&eaddr.eaddr[4])))((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0004), (
((__uint32_t)(*(u_int32_t *)(&eaddr.eaddr[4]))))))
;
1890 CSR_WRITE_4(sc, RL_IDR0,((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0000), (
((__uint32_t)(*(u_int32_t *)(&eaddr.eaddr[0]))))))
1891 htole32(*(u_int32_t *)(&eaddr.eaddr[0])))((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0000), (
((__uint32_t)(*(u_int32_t *)(&eaddr.eaddr[0]))))))
;
1892 /*
1893 * Default on PC Engines APU1 is to have all LEDs off unless
1894 * there is network activity. Override to provide a link status
1895 * LED.
1896 */
1897 if (sc->sc_hwrev == RL_HWREV_8168E0x2C000000 &&
1898 hw_vendor != NULL((void *)0) && hw_prod != NULL((void *)0) &&
1899 strcmp(hw_vendor, "PC Engines") == 0 &&
1900 strcmp(hw_prod, "APU") == 0) {
1901 CSR_SETBIT_1(sc, RL_CFG4, RL_CFG4_CUSTOM_LED)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0055), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0055))) |
(0x40))))
;
1902 CSR_WRITE_1(sc, RL_LEDSEL, RL_LED_LINK | RL_LED_ACT << 4)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0018), (
0x7 | 0x8 << 4)))
;
1903 }
1904 /*
1905 * Protect config register again
1906 */
1907 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
0x00)))
;
1908
1909 if ((sc->rl_flags & RL_FLAG_JUMBOV20x00100000) != 0)
1910 re_set_jumbo(sc);
1911
1912 /*
1913 * For C+ mode, initialize the RX descriptors and mbufs.
1914 */
1915 re_rx_list_init(sc);
1916 re_tx_list_init(sc);
1917
1918 /*
1919 * Load the addresses of the RX and TX lists into the chip.
1920 */
1921 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,((sc->rl_btag)->write_4((sc->rl_bhandle), (0x00E8), (
((u_int64_t) (sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr
) >> 32))))
1922 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr))((sc->rl_btag)->write_4((sc->rl_bhandle), (0x00E8), (
((u_int64_t) (sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr
) >> 32))))
;
1923 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,((sc->rl_btag)->write_4((sc->rl_bhandle), (0x00E4), (
((u_int64_t) (sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr
) & 0xFFFFFFFF))))
1924 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr))((sc->rl_btag)->write_4((sc->rl_bhandle), (0x00E4), (
((u_int64_t) (sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr
) & 0xFFFFFFFF))))
;
1925
1926 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0024), (
((u_int64_t) (sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr
) >> 32))))
1927 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr))((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0024), (
((u_int64_t) (sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr
) >> 32))))
;
1928 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0020), (
((u_int64_t) (sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr
) & 0xFFFFFFFF))))
1929 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr))((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0020), (
((u_int64_t) (sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr
) & 0xFFFFFFFF))))
;
1930
1931 if (sc->rl_flags & RL_FLAG_RXDV_GATED0x00020000)
1932 CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) &((sc->rl_btag)->write_4((sc->rl_bhandle), (0x00F0), (
((sc->rl_btag)->read_4((sc->rl_bhandle), (0x00F0))) &
~0x00080000)))
1933 ~0x00080000)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x00F0), (
((sc->rl_btag)->read_4((sc->rl_bhandle), (0x00F0))) &
~0x00080000)))
;
1934
1935 /*
1936 * Set the initial TX and RX configuration.
1937 */
1938 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0040), (
(0x03000000|0x00000700))))
;
1939
1940 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x00EC), (
16)))
;
1941
1942 rxcfg = RL_RXCFG_CONFIG(0x0000E000|0x00000700|0x00001800);
1943 if (sc->rl_flags & RL_FLAG_EARLYOFF0x00008000)
1944 rxcfg |= RL_RXCFG_EARLYOFF0x00003800;
1945 else if (sc->rl_flags & RL_FLAG_EARLYOFFV20x00010000)
1946 rxcfg |= RL_RXCFG_EARLYOFFV20x00000800;
1947 CSR_WRITE_4(sc, RL_RXCFG, rxcfg)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0044), (
rxcfg)))
;
1948
1949 /*
1950 * Enable transmit and receive.
1951 */
1952 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0037), (
0x0004 | 0x0008)))
;
1953
1954 /* Program promiscuous mode and multicast filters. */
1955 re_iff(sc);
1956
1957 /*
1958 * Enable interrupts.
1959 */
1960 re_setup_intr(sc, 1, sc->rl_imtype);
1961 CSR_WRITE_2(sc, RL_ISR, sc->rl_intrs)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x003E), (
sc->rl_intrs)))
;
1962
1963 /* Start RX/TX process. */
1964 CSR_WRITE_4(sc, RL_MISSEDPKT, 0)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x004C), (
0)))
;
1965
1966 /*
1967 * For 8169 gigE NICs, set the max allowed RX packet
1968 * size so we can receive jumbo frames.
1969 */
1970 if (sc->sc_hwrev != RL_HWREV_8139CPLUS0x74800000) {
1971 if (sc->rl_flags & RL_FLAG_PCIE0x00000004 &&
1972 (sc->rl_flags & RL_FLAG_JUMBOV20x00100000) == 0)
1973 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x00DA), (
(1 << 11))))
;
1974 else
1975 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x00DA), (
16383)))
;
1976 }
1977
1978 CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) |((sc->rl_btag)->write_1((sc->rl_bhandle), (sc->rl_cfg1
), (((sc->rl_btag)->read_1((sc->rl_bhandle), (sc->
rl_cfg1))) | 0x20)))
1979 RL_CFG1_DRVLOAD)((sc->rl_btag)->write_1((sc->rl_bhandle), (sc->rl_cfg1
), (((sc->rl_btag)->read_1((sc->rl_bhandle), (sc->
rl_cfg1))) | 0x20)))
;
1980
1981 ifp->if_flags |= IFF_RUNNING0x40;
1982 ifq_clr_oactive(&ifp->if_snd);
1983
1984 splx(s)spllower(s);
1985
1986 sc->rl_flags &= ~RL_FLAG_LINK0x00002000;
1987 mii_mediachg(&sc->sc_mii);
1988
1989 timeout_add_sec(&sc->timer_handle, 1);
1990
1991 return (0);
1992}
1993
1994/*
1995 * Set media options.
1996 */
1997int
1998re_ifmedia_upd(struct ifnet *ifp)
1999{
2000 struct rl_softc *sc;
2001
2002 sc = ifp->if_softc;
2003
2004 return (mii_mediachg(&sc->sc_mii));
2005}
2006
2007/*
2008 * Report current media status.
2009 */
2010void
2011re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2012{
2013 struct rl_softc *sc;
2014
2015 sc = ifp->if_softc;
2016
2017 mii_pollstat(&sc->sc_mii);
2018 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2019 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2020}
2021
2022int
2023re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2024{
2025 struct rl_softc *sc = ifp->if_softc;
2026 struct ifreq *ifr = (struct ifreq *) data;
2027 int s, error = 0;
2028
2029 s = splnet()splraise(0x7);
2030
2031 switch(command) {
2032 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
2033 ifp->if_flags |= IFF_UP0x1;
2034 if (!(ifp->if_flags & IFF_RUNNING0x40))
2035 re_init(ifp);
2036 break;
2037 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
2038 if (ifp->if_flags & IFF_UP0x1) {
2039 if (ifp->if_flags & IFF_RUNNING0x40)
2040 error = ENETRESET52;
2041 else
2042 re_init(ifp);
2043 } else {
2044 if (ifp->if_flags & IFF_RUNNING0x40)
2045 re_stop(ifp);
2046 }
2047 break;
2048 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
2049 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
2050 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
2051 break;
2052 case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((170)))
:
2053 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data,
2054 NULL((void *)0), RL_FRAMELEN(sc->rl_max_mtu)(sc->rl_max_mtu + ((6 * 2) + 2) + 4 + 4), &sc->rl_ldata.rl_rx_ring);
2055 break;
2056 default:
2057 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
2058 }
2059
2060 if (error == ENETRESET52) {
2061 if (ifp->if_flags & IFF_RUNNING0x40)
2062 re_iff(sc);
2063 error = 0;
2064 }
2065
2066 splx(s)spllower(s);
2067 return (error);
2068}
2069
2070void
2071re_watchdog(struct ifnet *ifp)
2072{
2073 struct rl_softc *sc;
2074 int s;
2075
2076 sc = ifp->if_softc;
2077 s = splnet()splraise(0x7);
2078 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2079
2080 re_init(ifp);
2081
2082 splx(s)spllower(s);
2083}
2084
2085/*
2086 * Stop the adapter and free any mbufs allocated to the
2087 * RX and TX lists.
2088 */
2089void
2090re_stop(struct ifnet *ifp)
2091{
2092 struct rl_softc *sc;
2093 int i;
2094
2095 sc = ifp->if_softc;
2096
2097 ifp->if_timer = 0;
2098 sc->rl_flags &= ~RL_FLAG_LINK0x00002000;
2099 sc->rl_timerintr = 0;
2100
2101 timeout_del(&sc->timer_handle);
2102 ifp->if_flags &= ~IFF_RUNNING0x40;
2103
2104 /*
2105 * Disable accepting frames to put RX MAC into idle state.
2106 * Otherwise it's possible to get frames while stop command
2107 * execution is in progress and controller can DMA the frame
2108 * to already freed RX buffer during that period.
2109 */
2110 CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) &((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0044), (
((sc->rl_btag)->read_4((sc->rl_bhandle), (0x0044))) &
~(0x00000001 | 0x00000008 | 0x00000002 | 0x00000004))))
2111 ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | RL_RXCFG_RX_INDIV |((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0044), (
((sc->rl_btag)->read_4((sc->rl_bhandle), (0x0044))) &
~(0x00000001 | 0x00000008 | 0x00000002 | 0x00000004))))
2112 RL_RXCFG_RX_MULTI))((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0044), (
((sc->rl_btag)->read_4((sc->rl_bhandle), (0x0044))) &
~(0x00000001 | 0x00000008 | 0x00000002 | 0x00000004))))
;
2113
2114 if (sc->rl_flags & RL_FLAG_WAIT_TXPOLL0x00400000) {
2115 for (i = RL_TIMEOUT1000; i > 0; i--) {
2116 if ((CSR_READ_1(sc, sc->rl_txstart)((sc->rl_btag)->read_1((sc->rl_bhandle), (sc->rl_txstart
)))
&
2117 RL_TXSTART_START0x40) == 0)
2118 break;
2119 DELAY(20)(*delay_func)(20);
2120 }
2121 if (i == 0)
2122 printf("%s: stopping TX poll timed out!\n",
2123 sc->sc_dev.dv_xname);
2124 CSR_WRITE_1(sc, RL_COMMAND, 0x00)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0037), (
0x00)))
;
2125 } else if (sc->rl_flags & RL_FLAG_CMDSTOP0x00000400) {
2126 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB |((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0037), (
0x0080 | 0x0004 | 0x0008)))
2127 RL_CMD_RX_ENB)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0037), (
0x0080 | 0x0004 | 0x0008)))
;
2128 if (sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ0x00080000) {
2129 for (i = RL_TIMEOUT1000; i > 0; i--) {
2130 if ((CSR_READ_4(sc, RL_TXCFG)((sc->rl_btag)->read_4((sc->rl_bhandle), (0x0040))) &
2131 RL_TXCFG_QUEUE_EMPTY0x00000800) != 0)
2132 break;
2133 DELAY(100)(*delay_func)(100);
2134 }
2135 if (i == 0)
2136 printf("%s: stopping TXQ timed out!\n",
2137 sc->sc_dev.dv_xname);
2138 }
2139 } else
2140 CSR_WRITE_1(sc, RL_COMMAND, 0x00)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0037), (
0x00)))
;
2141 DELAY(1000)(*delay_func)(1000);
2142 CSR_WRITE_2(sc, RL_IMR, 0x0000)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x003C), (
0x0000)))
;
2143 CSR_WRITE_2(sc, RL_ISR, 0xFFFF)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x003E), (
0xFFFF)))
;
2144
2145 intr_barrier(sc->sc_ih);
2146 ifq_barrier(&ifp->if_snd);
2147
2148 ifq_clr_oactive(&ifp->if_snd);
2149 mii_down(&sc->sc_mii);
2150
2151 if (sc->rl_head != NULL((void *)0)) {
2152 m_freem(sc->rl_head);
2153 sc->rl_head = sc->rl_tail = NULL((void *)0);
2154 }
2155
2156 /* Free the TX list buffers. */
2157 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
2158 if (sc->rl_ldata.rl_txq[i].txq_mbuf != NULL((void *)0)) {
2159 bus_dmamap_unload(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
rl_ldata.rl_txq[i].txq_dmamap))
2160 sc->rl_ldata.rl_txq[i].txq_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
rl_ldata.rl_txq[i].txq_dmamap))
;
2161 m_freem(sc->rl_ldata.rl_txq[i].txq_mbuf);
2162 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL((void *)0);
2163 }
2164 }
2165
2166 /* Free the RX list buffers. */
2167 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2168 if (sc->rl_ldata.rl_rxsoft[i].rxs_mbuf != NULL((void *)0)) {
2169 bus_dmamap_unload(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
rl_ldata.rl_rxsoft[i].rxs_dmamap))
2170 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
rl_ldata.rl_rxsoft[i].rxs_dmamap))
;
2171 m_freem(sc->rl_ldata.rl_rxsoft[i].rxs_mbuf);
2172 sc->rl_ldata.rl_rxsoft[i].rxs_mbuf = NULL((void *)0);
2173 }
2174 }
2175}
2176
2177void
2178re_setup_hw_im(struct rl_softc *sc)
2179{
2180 KASSERT(sc->rl_flags & RL_FLAG_HWIM)((sc->rl_flags & 0x00000080) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/ic/re.c", 2180, "sc->rl_flags & RL_FLAG_HWIM"
))
;
2181
2182 /*
2183 * Interrupt moderation
2184 *
2185 * 0xABCD
2186 * A - unknown (maybe TX related)
2187 * B - TX timer (unit: 25us)
2188 * C - unknown (maybe RX related)
2189 * D - RX timer (unit: 25us)
2190 *
2191 *
2192 * re(4)'s interrupt moderation is actually controlled by
2193 * two variables, like most other NICs (bge, bnx etc.)
2194 * o timer
2195 * o number of packets [P]
2196 *
2197 * The logic relationship between these two variables is
2198 * similar to other NICs too:
2199 * if (timer expire || packets > [P])
2200 * Interrupt is delivered
2201 *
2202 * Currently we only know how to set 'timer', but not
2203 * 'number of packets', which should be ~30, as far as I
2204 * tested (sink ~900Kpps, interrupt rate is 30KHz)
2205 */
2206 CSR_WRITE_2(sc, RL_IM,((sc->rl_btag)->write_2((sc->rl_bhandle), (0x00E2), (
((sc->rl_rx_time) & 0xf) | (((sc->rl_tx_time) &
0xf) << 8) | 0x5050)))
2207 RL_IM_RXTIME(sc->rl_rx_time) |((sc->rl_btag)->write_2((sc->rl_bhandle), (0x00E2), (
((sc->rl_rx_time) & 0xf) | (((sc->rl_tx_time) &
0xf) << 8) | 0x5050)))
2208 RL_IM_TXTIME(sc->rl_tx_time) |((sc->rl_btag)->write_2((sc->rl_bhandle), (0x00E2), (
((sc->rl_rx_time) & 0xf) | (((sc->rl_tx_time) &
0xf) << 8) | 0x5050)))
2209 RL_IM_MAGIC)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x00E2), (
((sc->rl_rx_time) & 0xf) | (((sc->rl_tx_time) &
0xf) << 8) | 0x5050)))
;
2210}
2211
2212void
2213re_disable_hw_im(struct rl_softc *sc)
2214{
2215 if (sc->rl_flags & RL_FLAG_HWIM0x00000080)
2216 CSR_WRITE_2(sc, RL_IM, 0)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x00E2), (
0)))
;
2217}
2218
2219void
2220re_setup_sim_im(struct rl_softc *sc)
2221{
2222 if (sc->sc_hwrev == RL_HWREV_8139CPLUS0x74800000)
2223 CSR_WRITE_4(sc, RL_TIMERINT, 0x400)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0054), (
0x400)))
; /* XXX */
2224 else {
2225 u_int32_t nticks;
2226
2227 /*
2228 * Datasheet says tick decreases at bus speed,
2229 * but it seems the clock runs a little bit
2230 * faster, so we do some compensation here.
2231 */
2232 nticks = (sc->rl_sim_time * sc->rl_bus_speed * 8) / 5;
2233 CSR_WRITE_4(sc, RL_TIMERINT_8169, nticks)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0058), (
nticks)))
;
2234 }
2235 CSR_WRITE_4(sc, RL_TIMERCNT, 1)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0048), (
1)))
; /* reload */
2236 sc->rl_timerintr = 1;
2237}
2238
2239void
2240re_disable_sim_im(struct rl_softc *sc)
2241{
2242 if (sc->sc_hwrev == RL_HWREV_8139CPLUS0x74800000)
2243 CSR_WRITE_4(sc, RL_TIMERINT, 0)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0054), (
0)))
;
2244 else
2245 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0)((sc->rl_btag)->write_4((sc->rl_bhandle), (0x0058), (
0)))
;
2246 sc->rl_timerintr = 0;
2247}
2248
2249void
2250re_config_imtype(struct rl_softc *sc, int imtype)
2251{
2252 switch (imtype) {
2253 case RL_IMTYPE_HW2:
2254 KASSERT(sc->rl_flags & RL_FLAG_HWIM)((sc->rl_flags & 0x00000080) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/ic/re.c", 2254, "sc->rl_flags & RL_FLAG_HWIM"
))
;
2255 /* FALLTHROUGH */
2256 case RL_IMTYPE_NONE0:
2257 sc->rl_intrs = RL_INTRS_CPLUS(0x0001|0x0002|0x0008| 0x0010|0x0040| 0x8000|0x0004);
2258 sc->rl_rx_ack = RL_ISR_RX_OK0x0001 | RL_ISR_FIFO_OFLOW0x0040 |
2259 RL_ISR_RX_OVERRUN0x0010;
2260 sc->rl_tx_ack = RL_ISR_TX_OK0x0004;
2261 break;
2262
2263 case RL_IMTYPE_SIM1:
2264 sc->rl_intrs = RL_INTRS_TIMER(0x0002|0x0008|0x8000| 0x4000);
2265 sc->rl_rx_ack = RL_ISR_TIMEOUT_EXPIRED0x4000;
2266 sc->rl_tx_ack = RL_ISR_TIMEOUT_EXPIRED0x4000;
2267 break;
2268
2269 default:
2270 panic("%s: unknown imtype %d",
2271 sc->sc_dev.dv_xname, imtype);
2272 }
2273}
2274
2275void
2276re_set_jumbo(struct rl_softc *sc)
2277{
2278 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
(0x80|0x40))))
;
2279 CSR_WRITE_1(sc, RL_CFG3, CSR_READ_1(sc, RL_CFG3) |((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0054), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0054))) |
0x04)))
2280 RL_CFG3_JUMBO_EN0)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0054), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0054))) |
0x04)))
;
2281
2282 switch (sc->sc_hwrev) {
2283 case RL_HWREV_8168DP0x28800000:
2284 break;
2285 case RL_HWREV_8168E0x2C000000:
2286 CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) |((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0055), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0055))) |
0x01)))
2287 RL_CFG4_8168E_JUMBO_EN1)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0055), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0055))) |
0x01)))
;
2288 break;
2289 default:
2290 CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) |((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0055), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0055))) |
0x02)))
2291 RL_CFG4_JUMBO_EN1)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0055), (
((sc->rl_btag)->read_1((sc->rl_bhandle), (0x0055))) |
0x02)))
;
2292 break;
2293 }
2294
2295 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
0x00)))
;
2296}
2297
2298void
2299re_setup_intr(struct rl_softc *sc, int enable_intrs, int imtype)
2300{
2301 re_config_imtype(sc, imtype);
2302
2303 if (enable_intrs)
2304 CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x003C), (
sc->rl_intrs)))
;
2305 else
2306 CSR_WRITE_2(sc, RL_IMR, 0)((sc->rl_btag)->write_2((sc->rl_bhandle), (0x003C), (
0)))
;
2307
2308 switch (imtype) {
2309 case RL_IMTYPE_NONE0:
2310 re_disable_sim_im(sc);
2311 re_disable_hw_im(sc);
2312 break;
2313
2314 case RL_IMTYPE_HW2:
2315 KASSERT(sc->rl_flags & RL_FLAG_HWIM)((sc->rl_flags & 0x00000080) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/ic/re.c", 2315, "sc->rl_flags & RL_FLAG_HWIM"
))
;
2316 re_disable_sim_im(sc);
2317 re_setup_hw_im(sc);
2318 break;
2319
2320 case RL_IMTYPE_SIM1:
2321 re_disable_hw_im(sc);
2322 re_setup_sim_im(sc);
2323 break;
2324
2325 default:
2326 panic("%s: unknown imtype %d",
2327 sc->sc_dev.dv_xname, imtype);
2328 }
2329}
2330
2331#ifndef SMALL_KERNEL
2332int
2333re_wol(struct ifnet *ifp, int enable)
2334{
2335 struct rl_softc *sc = ifp->if_softc;
2336 u_int8_t val;
2337
2338 if (enable) {
2339 if ((CSR_READ_1(sc, sc->rl_cfg1)((sc->rl_btag)->read_1((sc->rl_bhandle), (sc->rl_cfg1
)))
& RL_CFG1_PME0x01) == 0) {
2340 printf("%s: power management is disabled, "
2341 "cannot do WOL\n", sc->sc_dev.dv_xname);
2342 return (ENOTSUP91);
2343 }
2344 if ((CSR_READ_1(sc, sc->rl_cfg2)((sc->rl_btag)->read_1((sc->rl_bhandle), (sc->rl_cfg2
)))
& RL_CFG2_AUXPWR0x10) == 0)
2345 printf("%s: no auxiliary power, cannot do WOL from D3 "
2346 "(power-off) state\n", sc->sc_dev.dv_xname);
2347 }
2348
2349 re_iff(sc);
2350
2351 /* Temporarily enable write to configuration registers. */
2352 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
(0x80|0x40))))
;
2353
2354 /* Always disable all wake events except magic packet. */
2355 if (enable) {
2356 val = CSR_READ_1(sc, sc->rl_cfg5)((sc->rl_btag)->read_1((sc->rl_bhandle), (sc->rl_cfg5
)))
;
2357 val &= ~(RL_CFG5_WOL_UCAST0x10 | RL_CFG5_WOL_MCAST0x20 |
2358 RL_CFG5_WOL_BCAST0x40);
2359 CSR_WRITE_1(sc, sc->rl_cfg5, val)((sc->rl_btag)->write_1((sc->rl_bhandle), (sc->rl_cfg5
), (val)))
;
2360
2361 val = CSR_READ_1(sc, sc->rl_cfg3)((sc->rl_btag)->read_1((sc->rl_bhandle), (sc->rl_cfg3
)))
;
2362 val |= RL_CFG3_WOL_MAGIC0x20;
2363 val &= ~RL_CFG3_WOL_LINK0x10;
2364 CSR_WRITE_1(sc, sc->rl_cfg3, val)((sc->rl_btag)->write_1((sc->rl_bhandle), (sc->rl_cfg3
), (val)))
;
2365 } else {
2366 val = CSR_READ_1(sc, sc->rl_cfg5)((sc->rl_btag)->read_1((sc->rl_bhandle), (sc->rl_cfg5
)))
;
2367 val &= ~(RL_CFG5_WOL_UCAST0x10 | RL_CFG5_WOL_MCAST0x20 |
2368 RL_CFG5_WOL_BCAST0x40);
2369 CSR_WRITE_1(sc, sc->rl_cfg5, val)((sc->rl_btag)->write_1((sc->rl_bhandle), (sc->rl_cfg5
), (val)))
;
2370
2371 val = CSR_READ_1(sc, sc->rl_cfg3)((sc->rl_btag)->read_1((sc->rl_bhandle), (sc->rl_cfg3
)))
;
2372 val &= ~(RL_CFG3_WOL_MAGIC0x20 | RL_CFG3_WOL_LINK0x10);
2373 CSR_WRITE_1(sc, sc->rl_cfg3, val)((sc->rl_btag)->write_1((sc->rl_bhandle), (sc->rl_cfg3
), (val)))
;
2374 }
2375
2376 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF)((sc->rl_btag)->write_1((sc->rl_bhandle), (0x0050), (
0x00)))
;
2377
2378 return (0);
2379}
2380#endif