Bug Summary

File:dev/pci/if_se.c
Warning:line 527, column 16
Value stored to 'ifp' during its initialization is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name if_se.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/if_se.c
1/* $OpenBSD: if_se.c,v 1.23 2022/01/09 05:42:54 jsg Exp $ */
2
3/*-
4 * Copyright (c) 2009, 2010 Christopher Zimmermann <madroach@zakweb.de>
5 * Copyright (c) 2008, 2009, 2010 Nikolay Denev <ndenev@gmail.com>
6 * Copyright (c) 2007, 2008 Alexander Pohoyda <alexander.pohoyda@gmx.net>
7 * Copyright (c) 1997, 1998, 1999
8 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Bill Paul.
21 * 4. Neither the name of the author nor the names of any co-contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS''
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
28 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR
29 * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
34 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
36 * OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39/*
40 * SiS 190/191 PCI Ethernet NIC driver.
41 *
42 * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original
43 * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by
44 * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu
45 * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for
46 * review and very useful comments.
47 *
48 * Ported to OpenBSD by Christopher Zimmermann 2009/10
49 *
50 * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the
51 * Linux and Solaris drivers.
52 */
53
54#include "bpfilter.h"
55
56#include <sys/param.h>
57#include <sys/systm.h>
58#include <sys/device.h>
59#include <sys/ioctl.h>
60#include <sys/kernel.h>
61#include <sys/mbuf.h>
62#include <sys/socket.h>
63#include <sys/sockio.h>
64#include <sys/timeout.h>
65
66#include <net/if.h>
67#include <net/if_media.h>
68
69#include <netinet/in.h>
70#include <netinet/if_ether.h>
71
72#if NBPFILTER1 > 0
73#include <net/bpf.h>
74#endif
75
76#include <dev/mii/miivar.h>
77
78#include <dev/pci/pcidevs.h>
79#include <dev/pci/pcireg.h>
80#include <dev/pci/pcivar.h>
81
82#include <dev/pci/if_sereg.h>
83
84#define SE_RX_RING_CNT256 256 /* [8, 1024] */
85#define SE_TX_RING_CNT256 256 /* [8, 8192] */
86#define SE_RX_BUF_ALIGNsizeof(uint64_t) sizeof(uint64_t)
87
88#define SE_RX_RING_SZ(256 * sizeof(struct se_desc)) (SE_RX_RING_CNT256 * sizeof(struct se_desc))
89#define SE_TX_RING_SZ(256 * sizeof(struct se_desc)) (SE_TX_RING_CNT256 * sizeof(struct se_desc))
90
91struct se_list_data {
92 struct se_desc *se_rx_ring;
93 struct se_desc *se_tx_ring;
94 bus_dmamap_t se_rx_dmamap;
95 bus_dmamap_t se_tx_dmamap;
96};
97
98struct se_chain_data {
99 struct mbuf *se_rx_mbuf[SE_RX_RING_CNT256];
100 struct mbuf *se_tx_mbuf[SE_TX_RING_CNT256];
101 bus_dmamap_t se_rx_map[SE_RX_RING_CNT256];
102 bus_dmamap_t se_tx_map[SE_TX_RING_CNT256];
103 uint se_rx_prod;
104 uint se_tx_prod;
105 uint se_tx_cons;
106 uint se_tx_cnt;
107};
108
109struct se_softc {
110 struct device sc_dev;
111 void *sc_ih;
112 bus_space_tag_t sc_iot;
113 bus_space_handle_t sc_ioh;
114 bus_dma_tag_t sc_dmat;
115
116 struct mii_data sc_mii;
117 struct arpcom sc_ac;
118
119 struct se_list_data se_ldata;
120 struct se_chain_data se_cdata;
121
122 struct timeout sc_tick_tmo;
123
124 int sc_flags;
125#define SE_FLAG_FASTETHER0x0001 0x0001
126#define SE_FLAG_RGMII0x0010 0x0010
127#define SE_FLAG_LINK0x8000 0x8000
128};
129
130/*
131 * Various supported device vendors/types and their names.
132 */
133const struct pci_matchid se_devices[] = {
134 { PCI_VENDOR_SIS0x1039, PCI_PRODUCT_SIS_1900x0190 },
135 { PCI_VENDOR_SIS0x1039, PCI_PRODUCT_SIS_1910x0191 }
136};
137
138int se_match(struct device *, void *, void *);
139void se_attach(struct device *, struct device *, void *);
140int se_activate(struct device *, int);
141
142const struct cfattach se_ca = {
143 sizeof(struct se_softc),
144 se_match, se_attach, NULL((void *)0), se_activate
145};
146
147struct cfdriver se_cd = {
148 0, "se", DV_IFNET
149};
150
151uint32_t
152 se_miibus_cmd(struct se_softc *, uint32_t);
153int se_miibus_readreg(struct device *, int, int);
154void se_miibus_writereg(struct device *, int, int, int);
155void se_miibus_statchg(struct device *);
156
157int se_newbuf(struct se_softc *, uint);
158void se_discard_rxbuf(struct se_softc *, uint);
159int se_encap(struct se_softc *, struct mbuf *, uint *);
160void se_rxeof(struct se_softc *);
161void se_txeof(struct se_softc *);
162int se_intr(void *);
163void se_tick(void *);
164void se_start(struct ifnet *);
165int se_ioctl(struct ifnet *, u_long, caddr_t);
166int se_init(struct ifnet *);
167void se_stop(struct se_softc *);
168void se_watchdog(struct ifnet *);
169int se_ifmedia_upd(struct ifnet *);
170void se_ifmedia_sts(struct ifnet *, struct ifmediareq *);
171
172int se_pcib_match(struct pci_attach_args *);
173int se_get_mac_addr_apc(struct se_softc *, uint8_t *);
174int se_get_mac_addr_eeprom(struct se_softc *, uint8_t *);
175uint16_t
176 se_read_eeprom(struct se_softc *, int);
177
178void se_iff(struct se_softc *);
179void se_reset(struct se_softc *);
180int se_list_rx_init(struct se_softc *);
181int se_list_rx_free(struct se_softc *);
182int se_list_tx_init(struct se_softc *);
183int se_list_tx_free(struct se_softc *);
184
185/*
186 * Register space access macros.
187 */
188
189#define CSR_WRITE_4(sc, reg, val)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (reg), (val
)))
\
190 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, val)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (reg), (val
)))
191#define CSR_WRITE_2(sc, reg, val)(((sc)->sc_iot)->write_2(((sc)->sc_ioh), (reg), (val
)))
\
192 bus_space_write_2((sc)->sc_iot, (sc)->sc_ioh, reg, val)(((sc)->sc_iot)->write_2(((sc)->sc_ioh), (reg), (val
)))
193#define CSR_WRITE_1(sc, reg, val)(((sc)->sc_iot)->write_1(((sc)->sc_ioh), (reg), (val
)))
\
194 bus_space_write_1((sc)->sc_iot, (sc)->sc_ioh, reg, val)(((sc)->sc_iot)->write_1(((sc)->sc_ioh), (reg), (val
)))
195
196#define CSR_READ_4(sc, reg)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), (reg))) \
197 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), (reg)))
198#define CSR_READ_2(sc, reg)(((sc)->sc_iot)->read_2(((sc)->sc_ioh), (reg))) \
199 bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, reg)(((sc)->sc_iot)->read_2(((sc)->sc_ioh), (reg)))
200#define CSR_READ_1(sc, reg)(((sc)->sc_iot)->read_1(((sc)->sc_ioh), (reg))) \
201 bus_space_read_1((sc)->sc_iot, (sc)->sc_ioh, reg)(((sc)->sc_iot)->read_1(((sc)->sc_ioh), (reg)))
202
203/*
204 * Read a sequence of words from the EEPROM.
205 */
206uint16_t
207se_read_eeprom(struct se_softc *sc, int offset)
208{
209 uint32_t val;
210 int i;
211
212 KASSERT(offset <= EI_OFFSET)((offset <= 0x0000fc00) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_se.c", 212, "offset <= EI_OFFSET"
))
;
213
214 CSR_WRITE_4(sc, ROMInterface,(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x3c), (0x00000080
| (2 << 8) | (offset << 10))))
215 EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT))(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x3c), (0x00000080
| (2 << 8) | (offset << 10))))
;
216 DELAY(500)(*delay_func)(500);
217 for (i = 0; i < SE_TIMEOUT1000; i++) {
218 val = CSR_READ_4(sc, ROMInterface)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), (0x3c)));
219 if ((val & EI_REQ0x00000080) == 0)
220 break;
221 DELAY(100)(*delay_func)(100);
222 }
223 if (i == SE_TIMEOUT1000) {
224 printf("%s: EEPROM read timeout: 0x%08x\n",
225 sc->sc_dev.dv_xname, val);
226 return 0xffff;
227 }
228
229 return (val & EI_DATA0xffff0000) >> EI_DATA_SHIFT16;
230}
231
232int
233se_get_mac_addr_eeprom(struct se_softc *sc, uint8_t *dest)
234{
235 uint16_t val;
236 int i;
237
238 val = se_read_eeprom(sc, EEPROMSignature0x00);
239 if (val == 0xffff || val == 0x0000) {
240 printf("%s: invalid EEPROM signature : 0x%04x\n",
241 sc->sc_dev.dv_xname, val);
242 return (EINVAL22);
243 }
244
245 for (i = 0; i < ETHER_ADDR_LEN6; i += 2) {
246 val = se_read_eeprom(sc, EEPROMMACAddr0x03 + i / 2);
247 dest[i + 0] = (uint8_t)val;
248 dest[i + 1] = (uint8_t)(val >> 8);
249 }
250
251 if ((se_read_eeprom(sc, EEPROMInfo0x02) & 0x80) != 0)
252 sc->sc_flags |= SE_FLAG_RGMII0x0010;
253 return (0);
254}
255
256/*
257 * For SiS96x, APC CMOS RAM is used to store Ethernet address.
258 * APC CMOS RAM is accessed through ISA bridge.
259 */
260#if defined(__amd64__1) || defined(__i386__)
261int
262se_pcib_match(struct pci_attach_args *pa)
263{
264 const struct pci_matchid apc_devices[] = {
265 { PCI_VENDOR_SIS0x1039, PCI_PRODUCT_SIS_9650x0965 },
266 { PCI_VENDOR_SIS0x1039, PCI_PRODUCT_SIS_9660x0966 },
267 { PCI_VENDOR_SIS0x1039, PCI_PRODUCT_SIS_9680x0968 }
268 };
269
270 return pci_matchbyid(pa, apc_devices, nitems(apc_devices)(sizeof((apc_devices)) / sizeof((apc_devices)[0])));
271}
272#endif
273
274int
275se_get_mac_addr_apc(struct se_softc *sc, uint8_t *dest)
276{
277#if defined(__amd64__1) || defined(__i386__)
278 struct pci_attach_args pa;
279 pcireg_t reg;
280 bus_space_handle_t ioh;
281 int rc, i;
282
283 if (pci_find_device(&pa, se_pcib_match) == 0) {
284 printf("\n%s: couldn't find PCI-ISA bridge\n",
285 sc->sc_dev.dv_xname);
286 return EINVAL22;
287 }
288
289 /* Enable port 0x78 and 0x79 to access APC registers. */
290 reg = pci_conf_read(pa.pa_pc, pa.pa_tag, 0x48);
291 pci_conf_write(pa.pa_pc, pa.pa_tag, 0x48, reg & ~0x02);
292 DELAY(50)(*delay_func)(50);
293 (void)pci_conf_read(pa.pa_pc, pa.pa_tag, 0x48);
294
295 /* XXX this abuses bus_space implementation knowledge */
296 rc = _bus_space_map(pa.pa_iot, 0x78, 2, 0, &ioh);
297 if (rc == 0) {
298 /* Read stored Ethernet address. */
299 for (i = 0; i < ETHER_ADDR_LEN6; i++) {
300 bus_space_write_1(pa.pa_iot, ioh, 0, 0x09 + i)((pa.pa_iot)->write_1((ioh), (0), (0x09 + i)));
301 dest[i] = bus_space_read_1(pa.pa_iot, ioh, 1)((pa.pa_iot)->read_1((ioh), (1)));
302 }
303 bus_space_write_1(pa.pa_iot, ioh, 0, 0x12)((pa.pa_iot)->write_1((ioh), (0), (0x12)));
304 if ((bus_space_read_1(pa.pa_iot, ioh, 1)((pa.pa_iot)->read_1((ioh), (1))) & 0x80) != 0)
305 sc->sc_flags |= SE_FLAG_RGMII0x0010;
306 _bus_space_unmap(pa.pa_iot, ioh, 2, NULL((void *)0));
307 } else
308 rc = EINVAL22;
309
310 /* Restore access to APC registers. */
311 pci_conf_write(pa.pa_pc, pa.pa_tag, 0x48, reg);
312
313 return rc;
314#endif
315 return EINVAL22;
316}
317
318uint32_t
319se_miibus_cmd(struct se_softc *sc, uint32_t ctrl)
320{
321 int i;
322 uint32_t val;
323
324 CSR_WRITE_4(sc, GMIIControl, ctrl)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x44), (ctrl
)))
;
325 DELAY(10)(*delay_func)(10);
326 for (i = 0; i < SE_TIMEOUT1000; i++) {
327 val = CSR_READ_4(sc, GMIIControl)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), (0x44)));
328 if ((val & GMI_REQ0x00000010) == 0)
329 return val;
330 DELAY(10)(*delay_func)(10);
331 }
332
333 return GMI_REQ0x00000010;
334}
335
336int
337se_miibus_readreg(struct device *self, int phy, int reg)
338{
339 struct se_softc *sc = (struct se_softc *)self;
340 uint32_t ctrl, val;
341
342 ctrl = (phy << GMI_PHY_SHIFT6) | (reg << GMI_REG_SHIFT11) |
343 GMI_OP_RD0x00000000 | GMI_REQ0x00000010;
344 val = se_miibus_cmd(sc, ctrl);
345 if ((val & GMI_REQ0x00000010) != 0) {
346 printf("%s: PHY read timeout : %d\n",
347 sc->sc_dev.dv_xname, reg);
348 return 0;
349 }
350 return (val & GMI_DATA0xffff0000) >> GMI_DATA_SHIFT16;
351}
352
353void
354se_miibus_writereg(struct device *self, int phy, int reg, int data)
355{
356 struct se_softc *sc = (struct se_softc *)self;
357 uint32_t ctrl, val;
358
359 ctrl = (phy << GMI_PHY_SHIFT6) | (reg << GMI_REG_SHIFT11) |
360 GMI_OP_WR0x00000020 | (data << GMI_DATA_SHIFT16) | GMI_REQ0x00000010;
361 val = se_miibus_cmd(sc, ctrl);
362 if ((val & GMI_REQ0x00000010) != 0) {
363 printf("%s: PHY write timeout : %d\n",
364 sc->sc_dev.dv_xname, reg);
365 }
366}
367
368void
369se_miibus_statchg(struct device *self)
370{
371 struct se_softc *sc = (struct se_softc *)self;
372#ifdef SE_DEBUG
373 struct ifnet *ifp = &sc->sc_ac.ac_if;
374#endif
375 struct mii_data *mii = &sc->sc_mii;
376 uint32_t ctl, speed;
377
378 speed = 0;
379 sc->sc_flags &= ~SE_FLAG_LINK0x8000;
380 if ((mii->mii_media_status & (IFM_ACTIVE0x0000000000000002ULL | IFM_AVALID0x0000000000000001ULL)) ==
381 (IFM_ACTIVE0x0000000000000002ULL | IFM_AVALID0x0000000000000001ULL)) {
382 switch (IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL)) {
383 case IFM_10_T3:
384#ifdef SE_DEBUG
385 if (ifp->if_flags & IFF_DEBUG0x4)
386 printf("%s: 10baseT link\n", ifp->if_xname);
387#endif
388 sc->sc_flags |= SE_FLAG_LINK0x8000;
389 speed = SC_SPEED_100x00000400;
390 break;
391 case IFM_100_TX6:
392#ifdef SE_DEBUG
393 if (ifp->if_flags & IFF_DEBUG0x4)
394 printf("%s: 100baseTX link\n", ifp->if_xname);
395#endif
396 sc->sc_flags |= SE_FLAG_LINK0x8000;
397 speed = SC_SPEED_1000x00000800;
398 break;
399 case IFM_1000_T16:
400#ifdef SE_DEBUG
401 if (ifp->if_flags & IFF_DEBUG0x4)
402 printf("%s: 1000baseT link\n", ifp->if_xname);
403#endif
404 if ((sc->sc_flags & SE_FLAG_FASTETHER0x0001) == 0) {
405 sc->sc_flags |= SE_FLAG_LINK0x8000;
406 speed = SC_SPEED_10000x00000c00;
407 }
408 break;
409 default:
410 break;
411 }
412 }
413 if ((sc->sc_flags & SE_FLAG_LINK0x8000) == 0) {
414#ifdef SE_DEBUG
415 if (ifp->if_flags & IFF_DEBUG0x4)
416 printf("%s: no link\n", ifp->if_xname);
417#endif
418 return;
419 }
420 /* Reprogram MAC to resolved speed/duplex/flow-control parameters. */
421 ctl = CSR_READ_4(sc, StationControl)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), (0x40)));
422 ctl &= ~(0x0f000000 | SC_FDX0x00001000 | SC_SPEED_MASK0x00000c00);
423 if (speed == SC_SPEED_10000x00000c00)
424 ctl |= 0x07000000;
425 else
426 ctl |= 0x04000000;
427#ifdef notyet
428 if ((sc->sc_flags & SE_FLAG_GMII) != 0)
429 ctl |= 0x03000000;
430#endif
431 ctl |= speed;
432 if ((IFM_OPTIONS(mii->mii_media_active)((mii->mii_media_active) & (0x00000000ffff0000ULL|0x00ffff0000000000ULL
))
& IFM_FDX0x0000010000000000ULL) != 0)
433 ctl |= SC_FDX0x00001000;
434 CSR_WRITE_4(sc, StationControl, ctl)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x40), (ctl
)))
;
435 if ((sc->sc_flags & SE_FLAG_RGMII0x0010) != 0) {
436 CSR_WRITE_4(sc, RGMIIDelay, 0x0441)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x58), (0x0441
)))
;
437 CSR_WRITE_4(sc, RGMIIDelay, 0x0440)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x58), (0x0440
)))
;
438 }
439}
440
441void
442se_iff(struct se_softc *sc)
443{
444 struct arpcom *ac = &sc->sc_ac;
445 struct ifnet *ifp = &ac->ac_if;
446 struct ether_multi *enm;
447 struct ether_multistep step;
448 uint32_t crc, hashes[2];
449 uint16_t rxfilt;
450
451 rxfilt = CSR_READ_2(sc, RxMacControl)(((sc)->sc_iot)->read_2(((sc)->sc_ioh), (0x60)));
452 rxfilt &= ~(AcceptAllPhys0x0100 | AcceptBroadcast0x0800 | AcceptMulticast0x0400);
453 ifp->if_flags &= ~IFF_ALLMULTI0x200;
454
455 /*
456 * Always accept broadcast frames.
457 * Always accept frames destined to our station address.
458 */
459 rxfilt |= AcceptBroadcast0x0800 | AcceptMyPhys0x0200;
460
461 if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0) {
462 ifp->if_flags |= IFF_ALLMULTI0x200;
463 if (ifp->if_flags & IFF_PROMISC0x100)
464 rxfilt |= AcceptAllPhys0x0100;
465 rxfilt |= AcceptMulticast0x0400;
466 hashes[0] = hashes[1] = 0xffffffff;
467 } else {
468 rxfilt |= AcceptMulticast0x0400;
469 hashes[0] = hashes[1] = 0;
470
471 ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first
); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step
)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while
( 0)
;
472 while (enm != NULL((void *)0)) {
473 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN6);
474
475 hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
476
477 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
478 }
479 }
480
481 CSR_WRITE_2(sc, RxMacControl, rxfilt)(((sc)->sc_iot)->write_2(((sc)->sc_ioh), (0x60), (rxfilt
)))
;
482 CSR_WRITE_4(sc, RxHashTable, hashes[0])(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x68), (hashes
[0])))
;
483 CSR_WRITE_4(sc, RxHashTable2, hashes[1])(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x6c), (hashes
[1])))
;
484}
485
486void
487se_reset(struct se_softc *sc)
488{
489 CSR_WRITE_4(sc, IntrMask, 0)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x24), (0)
))
;
490 CSR_WRITE_4(sc, IntrStatus, 0xffffffff)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x20), (0xffffffff
)))
;
491
492 /* Soft reset. */
493 CSR_WRITE_4(sc, IntrControl, 0x8000)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x28), (0x8000
)))
;
494 CSR_READ_4(sc, IntrControl)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), (0x28)));
495 DELAY(100)(*delay_func)(100);
496 CSR_WRITE_4(sc, IntrControl, 0)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x28), (0)
))
;
497 /* Stop MAC. */
498 CSR_WRITE_4(sc, TX_CTL, 0x1a00)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x00), (0x1a00
)))
;
499 CSR_WRITE_4(sc, RX_CTL, 0x1a00)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x10), (0x1a00
)))
;
500
501 CSR_WRITE_4(sc, IntrMask, 0)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x24), (0)
))
;
502 CSR_WRITE_4(sc, IntrStatus, 0xffffffff)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x20), (0xffffffff
)))
;
503
504 CSR_WRITE_4(sc, GMIIControl, 0)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x44), (0)
))
;
505}
506
507/*
508 * Probe for an SiS chip. Check the PCI vendor and device
509 * IDs against our list and return a device name if we find a match.
510 */
511int
512se_match(struct device *parent, void *match, void *aux)
513{
514 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
515
516 return pci_matchbyid(pa, se_devices, nitems(se_devices)(sizeof((se_devices)) / sizeof((se_devices)[0])));
517}
518
519/*
520 * Attach the interface. Do ifmedia setup and ethernet/BPF attach.
521 */
522void
523se_attach(struct device *parent, struct device *self, void *aux)
524{
525 struct se_softc *sc = (struct se_softc *)self;
526 struct arpcom *ac = &sc->sc_ac;
527 struct ifnet *ifp = &ac->ac_if;
Value stored to 'ifp' during its initialization is never read
528 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
529 uint8_t eaddr[ETHER_ADDR_LEN6];
530 const char *intrstr;
531 pci_intr_handle_t ih;
532 bus_size_t iosize;
533 bus_dma_segment_t seg;
534 struct se_list_data *ld;
535 struct se_chain_data *cd;
536 int nseg;
537 uint i;
538 int rc;
539
540 printf(": ");
541
542 /*
543 * Map control/status registers.
544 */
545
546 rc = pci_mapreg_map(pa, PCI_MAPREG_START0x10, PCI_MAPREG_TYPE_MEM0x00000000, 0,
547 &sc->sc_iot, &sc->sc_ioh, NULL((void *)0), &iosize, 0);
548 if (rc != 0) {
549 printf("can't map i/o space\n");
550 return;
551 }
552
553 if (pci_intr_map(pa, &ih)) {
554 printf("can't map interrupt\n");
555 goto fail1;
556 }
557 intrstr = pci_intr_string(pa->pa_pc, ih);
558 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET0x7, se_intr, sc,
559 self->dv_xname);
560 if (sc->sc_ih == NULL((void *)0)) {
561 printf("can't establish interrupt");
562 if (intrstr != NULL((void *)0))
563 printf(" at %s", intrstr);
564 printf("\n");
565 goto fail1;
566 }
567
568 printf("%s", intrstr);
569
570 if (pa->pa_id == PCI_ID_CODE(PCI_VENDOR_SIS, PCI_PRODUCT_SIS_190)((((0x1039) & 0xffff) << 0) | (((0x0190) & 0xffff
) << 16))
)
571 sc->sc_flags |= SE_FLAG_FASTETHER0x0001;
572
573 /* Reset the adapter. */
574 se_reset(sc);
575
576 /* Get MAC address from the EEPROM. */
577 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, 0x70) & (0x01 << 24)) != 0)
578 se_get_mac_addr_apc(sc, eaddr);
579 else
580 se_get_mac_addr_eeprom(sc, eaddr);
581 printf(", address %s\n", ether_sprintf(eaddr));
582 bcopy(eaddr, ac->ac_enaddr, ETHER_ADDR_LEN6);
583
584 /*
585 * Now do all the DMA mapping stuff
586 */
587
588 sc->sc_dmat = pa->pa_dmat;
589 ld = &sc->se_ldata;
590 cd = &sc->se_cdata;
591
592 /* First create TX/RX busdma maps. */
593 for (i = 0; i < SE_RX_RING_CNT256; i++) {
594 rc = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0001), (&cd->se_rx_map
[i]))
595 0, BUS_DMA_NOWAIT, &cd->se_rx_map[i])(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0001), (&cd->se_rx_map
[i]))
;
596 if (rc != 0) {
597 printf("%s: cannot init the RX map array\n",
598 self->dv_xname);
599 goto fail2;
600 }
601 }
602
603 for (i = 0; i < SE_TX_RING_CNT256; i++) {
604 rc = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0001), (&cd->se_tx_map
[i]))
605 0, BUS_DMA_NOWAIT, &cd->se_tx_map[i])(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0001), (&cd->se_tx_map
[i]))
;
606 if (rc != 0) {
607 printf("%s: cannot init the TX map array\n",
608 self->dv_xname);
609 goto fail2;
610 }
611 }
612
613 /*
614 * Now allocate a chunk of DMA-able memory for RX and TX ring
615 * descriptors, as a contiguous block of memory.
616 * XXX fix deallocation upon error
617 */
618
619 /* RX */
620 rc = bus_dmamem_alloc(sc->sc_dmat, SE_RX_RING_SZ, PAGE_SIZE, 0,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((256
* sizeof(struct se_desc))), ((1 << 12)), (0), (&seg
), (1), (&nseg), (0x0001))
621 &seg, 1, &nseg, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((256
* sizeof(struct se_desc))), ((1 << 12)), (0), (&seg
), (1), (&nseg), (0x0001))
;
622 if (rc != 0) {
623 printf("%s: no memory for RX descriptors\n", self->dv_xname);
624 goto fail2;
625 }
626
627 rc = bus_dmamem_map(sc->sc_dmat, &seg, nseg, SE_RX_RING_SZ,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&seg
), (nseg), ((256 * sizeof(struct se_desc))), ((caddr_t *)&
ld->se_rx_ring), (0x0001))
628 (caddr_t *)&ld->se_rx_ring, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&seg
), (nseg), ((256 * sizeof(struct se_desc))), ((caddr_t *)&
ld->se_rx_ring), (0x0001))
;
629 if (rc != 0) {
630 printf("%s: can't map RX descriptors\n", self->dv_xname);
631 goto fail2;
632 }
633
634 rc = bus_dmamap_create(sc->sc_dmat, SE_RX_RING_SZ, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((256
* sizeof(struct se_desc))), (1), ((256 * sizeof(struct se_desc
))), (0), (0x0001), (&ld->se_rx_dmamap))
635 SE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &ld->se_rx_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((256
* sizeof(struct se_desc))), (1), ((256 * sizeof(struct se_desc
))), (0), (0x0001), (&ld->se_rx_dmamap))
;
636 if (rc != 0) {
637 printf("%s: can't alloc RX DMA map\n", self->dv_xname);
638 goto fail2;
639 }
640
641 rc = bus_dmamap_load(sc->sc_dmat, ld->se_rx_dmamap,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (ld->
se_rx_dmamap), ((caddr_t)ld->se_rx_ring), ((256 * sizeof(struct
se_desc))), (((void *)0)), (0x0001))
642 (caddr_t)ld->se_rx_ring, SE_RX_RING_SZ, NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (ld->
se_rx_dmamap), ((caddr_t)ld->se_rx_ring), ((256 * sizeof(struct
se_desc))), (((void *)0)), (0x0001))
;
643 if (rc != 0) {
644 printf("%s: can't load RX DMA map\n", self->dv_xname);
645 bus_dmamem_unmap(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)ld->se_rx_ring), ((256 * sizeof(struct se_desc))))
646 (caddr_t)ld->se_rx_ring, SE_RX_RING_SZ)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)ld->se_rx_ring), ((256 * sizeof(struct se_desc))))
;
647 bus_dmamap_destroy(sc->sc_dmat, ld->se_rx_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ld
->se_rx_dmamap))
;
648 bus_dmamem_free(sc->sc_dmat, &seg, nseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
seg), (nseg))
;
649 goto fail2;
650 }
651
652 /* TX */
653 rc = bus_dmamem_alloc(sc->sc_dmat, SE_TX_RING_SZ, PAGE_SIZE, 0,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((256
* sizeof(struct se_desc))), ((1 << 12)), (0), (&seg
), (1), (&nseg), (0x0001))
654 &seg, 1, &nseg, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((256
* sizeof(struct se_desc))), ((1 << 12)), (0), (&seg
), (1), (&nseg), (0x0001))
;
655 if (rc != 0) {
656 printf("%s: no memory for TX descriptors\n", self->dv_xname);
657 goto fail2;
658 }
659
660 rc = bus_dmamem_map(sc->sc_dmat, &seg, nseg, SE_TX_RING_SZ,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&seg
), (nseg), ((256 * sizeof(struct se_desc))), ((caddr_t *)&
ld->se_tx_ring), (0x0001))
661 (caddr_t *)&ld->se_tx_ring, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&seg
), (nseg), ((256 * sizeof(struct se_desc))), ((caddr_t *)&
ld->se_tx_ring), (0x0001))
;
662 if (rc != 0) {
663 printf("%s: can't map TX descriptors\n", self->dv_xname);
664 goto fail2;
665 }
666
667 rc = bus_dmamap_create(sc->sc_dmat, SE_TX_RING_SZ, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((256
* sizeof(struct se_desc))), (1), ((256 * sizeof(struct se_desc
))), (0), (0x0001), (&ld->se_tx_dmamap))
668 SE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &ld->se_tx_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((256
* sizeof(struct se_desc))), (1), ((256 * sizeof(struct se_desc
))), (0), (0x0001), (&ld->se_tx_dmamap))
;
669 if (rc != 0) {
670 printf("%s: can't alloc TX DMA map\n", self->dv_xname);
671 goto fail2;
672 }
673
674 rc = bus_dmamap_load(sc->sc_dmat, ld->se_tx_dmamap,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (ld->
se_tx_dmamap), ((caddr_t)ld->se_tx_ring), ((256 * sizeof(struct
se_desc))), (((void *)0)), (0x0001))
675 (caddr_t)ld->se_tx_ring, SE_TX_RING_SZ, NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (ld->
se_tx_dmamap), ((caddr_t)ld->se_tx_ring), ((256 * sizeof(struct
se_desc))), (((void *)0)), (0x0001))
;
676 if (rc != 0) {
677 printf("%s: can't load TX DMA map\n", self->dv_xname);
678 bus_dmamem_unmap(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)ld->se_tx_ring), ((256 * sizeof(struct se_desc))))
679 (caddr_t)ld->se_tx_ring, SE_TX_RING_SZ)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)ld->se_tx_ring), ((256 * sizeof(struct se_desc))))
;
680 bus_dmamap_destroy(sc->sc_dmat, ld->se_tx_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ld
->se_tx_dmamap))
;
681 bus_dmamem_free(sc->sc_dmat, &seg, nseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
seg), (nseg))
;
682 goto fail2;
683 }
684
685 timeout_set(&sc->sc_tick_tmo, se_tick, sc);
686
687 ifp = &sc->sc_ac.ac_if;
688 ifp->if_softc = sc;
689 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
690 ifp->if_ioctl = se_ioctl;
691 ifp->if_start = se_start;
692 ifp->if_watchdog = se_watchdog;
693 ifq_set_maxlen(&ifp->if_snd, SE_TX_RING_CNT - 1)((&ifp->if_snd)->ifq_maxlen = (256 - 1));
694 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ16);
695
696 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010;
697
698 /*
699 * Do MII setup.
700 */
701
702 sc->sc_mii.mii_ifp = ifp;
703 sc->sc_mii.mii_readreg = se_miibus_readreg;
704 sc->sc_mii.mii_writereg = se_miibus_writereg;
705 sc->sc_mii.mii_statchg = se_miibus_statchg;
706 ifmedia_init(&sc->sc_mii.mii_media, 0, se_ifmedia_upd,
707 se_ifmedia_sts);
708 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY-1,
709 MII_OFFSET_ANY-1, 0);
710
711 if (LIST_FIRST(&sc->sc_mii.mii_phys)((&sc->sc_mii.mii_phys)->lh_first) == NULL((void *)0)) {
712 /* No PHY attached */
713 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL | IFM_MANUAL1ULL,
714 0, NULL((void *)0));
715 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL | IFM_MANUAL1ULL);
716 } else
717 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL);
718
719 /*
720 * Call MI attach routine.
721 */
722 if_attach(ifp);
723 ether_ifattach(ifp);
724
725 return;
726
727fail2:
728 pci_intr_disestablish(pa->pa_pc, sc->sc_ih);
729fail1:
730 bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize);
731}
732
733int
734se_activate(struct device *self, int act)
735{
736 struct se_softc *sc = (struct se_softc *)self;
737 struct ifnet *ifp = &sc->sc_ac.ac_if;
738 int rv = 0;
739
740 switch (act) {
741 case DVACT_SUSPEND3:
742 if (ifp->if_flags & IFF_RUNNING0x40)
743 se_stop(sc);
744 rv = config_activate_children(self, act);
745 break;
746 case DVACT_RESUME4:
747 if (ifp->if_flags & IFF_UP0x1)
748 (void)se_init(ifp);
749 break;
750 default:
751 rv = config_activate_children(self, act);
752 break;
753 }
754
755 return (rv);
756}
757
758/*
759 * Initialize the TX descriptors.
760 */
761int
762se_list_tx_init(struct se_softc *sc)
763{
764 struct se_list_data *ld = &sc->se_ldata;
765 struct se_chain_data *cd = &sc->se_cdata;
766
767 bzero(ld->se_tx_ring, SE_TX_RING_SZ)__builtin_bzero((ld->se_tx_ring), ((256 * sizeof(struct se_desc
))))
;
768 ld->se_tx_ring[SE_TX_RING_CNT256 - 1].se_flags = htole32(RING_END)((__uint32_t)(0x80000000));
769 bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 0, SE_TX_RING_SZ,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ld->
se_tx_dmamap), (0), ((256 * sizeof(struct se_desc))), (0x01 |
0x04))
770 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ld->
se_tx_dmamap), (0), ((256 * sizeof(struct se_desc))), (0x01 |
0x04))
;
771 cd->se_tx_prod = 0;
772 cd->se_tx_cons = 0;
773 cd->se_tx_cnt = 0;
774
775 return 0;
776}
777
778int
779se_list_tx_free(struct se_softc *sc)
780{
781 struct se_chain_data *cd = &sc->se_cdata;
782 uint i;
783
784 for (i = 0; i < SE_TX_RING_CNT256; i++) {
785 if (cd->se_tx_mbuf[i] != NULL((void *)0)) {
786 bus_dmamap_unload(sc->sc_dmat, cd->se_tx_map[i])(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (cd->
se_tx_map[i]))
;
787 m_free(cd->se_tx_mbuf[i]);
788 cd->se_tx_mbuf[i] = NULL((void *)0);
789 }
790 }
791
792 return 0;
793}
794
795/*
796 * Initialize the RX descriptors and allocate mbufs for them.
797 */
798int
799se_list_rx_init(struct se_softc *sc)
800{
801 struct se_list_data *ld = &sc->se_ldata;
802 struct se_chain_data *cd = &sc->se_cdata;
803 uint i;
804
805 bzero(ld->se_rx_ring, SE_RX_RING_SZ)__builtin_bzero((ld->se_rx_ring), ((256 * sizeof(struct se_desc
))))
;
806 bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, 0, SE_RX_RING_SZ,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ld->
se_rx_dmamap), (0), ((256 * sizeof(struct se_desc))), (0x01 |
0x04))
807 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ld->
se_rx_dmamap), (0), ((256 * sizeof(struct se_desc))), (0x01 |
0x04))
;
808 for (i = 0; i < SE_RX_RING_CNT256; i++) {
809 if (se_newbuf(sc, i) != 0)
810 return ENOBUFS55;
811 }
812
813 cd->se_rx_prod = 0;
814
815 return 0;
816}
817
818int
819se_list_rx_free(struct se_softc *sc)
820{
821 struct se_chain_data *cd = &sc->se_cdata;
822 uint i;
823
824 for (i = 0; i < SE_RX_RING_CNT256; i++) {
825 if (cd->se_rx_mbuf[i] != NULL((void *)0)) {
826 bus_dmamap_unload(sc->sc_dmat, cd->se_rx_map[i])(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (cd->
se_rx_map[i]))
;
827 m_free(cd->se_rx_mbuf[i]);
828 cd->se_rx_mbuf[i] = NULL((void *)0);
829 }
830 }
831
832 return 0;
833}
834
835/*
836 * Initialize an RX descriptor and attach an MBUF cluster.
837 */
838int
839se_newbuf(struct se_softc *sc, uint i)
840{
841#ifdef SE_DEBUG
842 struct ifnet *ifp = &sc->sc_ac.ac_if;
843#endif
844 struct se_list_data *ld = &sc->se_ldata;
845 struct se_chain_data *cd = &sc->se_cdata;
846 struct se_desc *desc;
847 struct mbuf *m;
848 int rc;
849
850 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES)m_clget((((void *)0)), (0x0002), ((1 << 11)));
851 if (m == NULL((void *)0)) {
852#ifdef SE_DEBUG
853 if (ifp->if_flags & IFF_DEBUG0x4)
854 printf("%s: MCLGETL failed\n", ifp->if_xname);
855#endif
856 return ENOBUFS55;
857 }
858 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = MCLBYTES(1 << 11);
859 m_adj(m, SE_RX_BUF_ALIGNsizeof(uint64_t));
860
861 rc = bus_dmamap_load_mbuf(sc->sc_dmat, cd->se_rx_map[i],(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
cd->se_rx_map[i]), (m), (0x0001))
862 m, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
cd->se_rx_map[i]), (m), (0x0001))
;
863 KASSERT(cd->se_rx_map[i]->dm_nsegs == 1)((cd->se_rx_map[i]->dm_nsegs == 1) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/if_se.c", 863, "cd->se_rx_map[i]->dm_nsegs == 1"
))
;
864 if (rc != 0) {
865 m_freem(m);
866 return ENOBUFS55;
867 }
868 bus_dmamap_sync(sc->sc_dmat, cd->se_rx_map[i], 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (cd->
se_rx_map[i]), (0), (cd->se_rx_map[i]->dm_mapsize), (0x01
))
869 cd->se_rx_map[i]->dm_mapsize, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (cd->
se_rx_map[i]), (0), (cd->se_rx_map[i]->dm_mapsize), (0x01
))
;
870
871 cd->se_rx_mbuf[i] = m;
872 desc = &ld->se_rx_ring[i];
873 desc->se_sts_size = 0;
874 desc->se_cmdsts = htole32(RDC_OWN | RDC_INTR)((__uint32_t)(0x80000000 | 0x40000000));
875 desc->se_ptr = htole32((uint32_t)cd->se_rx_map[i]->dm_segs[0].ds_addr)((__uint32_t)((uint32_t)cd->se_rx_map[i]->dm_segs[0].ds_addr
))
;
876 desc->se_flags = htole32(cd->se_rx_map[i]->dm_segs[0].ds_len)((__uint32_t)(cd->se_rx_map[i]->dm_segs[0].ds_len));
877 if (i == SE_RX_RING_CNT256 - 1)
878 desc->se_flags |= htole32(RING_END)((__uint32_t)(0x80000000));
879 bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, i * sizeof(*desc),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ld->
se_rx_dmamap), (i * sizeof(*desc)), (sizeof(*desc)), (0x01 | 0x04
))
880 sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ld->
se_rx_dmamap), (i * sizeof(*desc)), (sizeof(*desc)), (0x01 | 0x04
))
;
881
882 return 0;
883}
884
885void
886se_discard_rxbuf(struct se_softc *sc, uint i)
887{
888 struct se_list_data *ld = &sc->se_ldata;
889 struct se_desc *desc;
890
891 desc = &ld->se_rx_ring[i];
892 desc->se_sts_size = 0;
893 desc->se_cmdsts = htole32(RDC_OWN | RDC_INTR)((__uint32_t)(0x80000000 | 0x40000000));
894 desc->se_flags = htole32(MCLBYTES - SE_RX_BUF_ALIGN)((__uint32_t)((1 << 11) - sizeof(uint64_t)));
895 if (i == SE_RX_RING_CNT256 - 1)
896 desc->se_flags |= htole32(RING_END)((__uint32_t)(0x80000000));
897 bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, i * sizeof(*desc),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ld->
se_rx_dmamap), (i * sizeof(*desc)), (sizeof(*desc)), (0x01 | 0x04
))
898 sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ld->
se_rx_dmamap), (i * sizeof(*desc)), (sizeof(*desc)), (0x01 | 0x04
))
;
899}
900
901/*
902 * A frame has been uploaded: pass the resulting mbuf chain up to
903 * the higher level protocols.
904 */
905void
906se_rxeof(struct se_softc *sc)
907{
908 struct mbuf *m;
909 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
910 struct ifnet *ifp = &sc->sc_ac.ac_if;
911 struct se_list_data *ld = &sc->se_ldata;
912 struct se_chain_data *cd = &sc->se_cdata;
913 struct se_desc *cur_rx;
914 uint32_t rxinfo, rxstat;
915 uint i;
916
917 bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, 0, SE_RX_RING_SZ,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ld->
se_rx_dmamap), (0), ((256 * sizeof(struct se_desc))), (0x02 |
0x08))
918 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ld->
se_rx_dmamap), (0), ((256 * sizeof(struct se_desc))), (0x02 |
0x08))
;
919 for (i = cd->se_rx_prod; ; SE_INC(i, SE_RX_RING_CNT)(i) = (((i) + 1) % 256)) {
920 cur_rx = &ld->se_rx_ring[i];
921 rxinfo = letoh32(cur_rx->se_cmdsts)((__uint32_t)(cur_rx->se_cmdsts));
922 if ((rxinfo & RDC_OWN0x80000000) != 0)
923 break;
924 rxstat = letoh32(cur_rx->se_sts_size)((__uint32_t)(cur_rx->se_sts_size));
925
926 /*
927 * If an error occurs, update stats, clear the
928 * status word and leave the mbuf cluster in place:
929 * it should simply get re-used next time this descriptor
930 * comes up in the ring.
931 */
932 if ((rxstat & RDS_CRCOK0x00010000) == 0 || SE_RX_ERROR(rxstat)((rxstat) & (0x00020000 | 0x00040000 | 0x00080000 | 0x00100000
| 0x00200000 | 0x00400000 | 0x00800000))
!= 0 ||
933 SE_RX_NSEGS(rxstat)(((rxstat) & 0x3f000000) >> 24) != 1) {
934 /* XXX We don't support multi-segment frames yet. */
935 if (ifp->if_flags & IFF_DEBUG0x4)
936 printf("%s: rx error %b\n",
937 ifp->if_xname, rxstat, RX_ERR_BITS"\20" "\21CRCOK\22COLON\23NIBON\24OVRUN" "\25MIIER\26LIMIT\27SHORT\30ABORT"
"\40VLAN"
);
938 se_discard_rxbuf(sc, i);
939 ifp->if_ierrorsif_data.ifi_ierrors++;
940 continue;
941 }
942
943 /* No errors; receive the packet. */
944 bus_dmamap_sync(sc->sc_dmat, cd->se_rx_map[i], 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (cd->
se_rx_map[i]), (0), (cd->se_rx_map[i]->dm_mapsize), (0x02
))
945 cd->se_rx_map[i]->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (cd->
se_rx_map[i]), (0), (cd->se_rx_map[i]->dm_mapsize), (0x02
))
;
946 m = cd->se_rx_mbuf[i];
947 if (se_newbuf(sc, i) != 0) {
948 se_discard_rxbuf(sc, i);
949 ifp->if_iqdropsif_data.ifi_iqdrops++;
950 continue;
951 }
952 /*
953 * Account for 10 bytes auto padding which is used
954 * to align IP header on a 32bit boundary. Also note,
955 * CRC bytes are automatically removed by the hardware.
956 */
957 m->m_datam_hdr.mh_data += SE_RX_PAD_BYTES10;
958 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len =
959 SE_RX_BYTES(rxstat)((rxstat) & 0xFFFF) - SE_RX_PAD_BYTES10;
960
961 ml_enqueue(&ml, m);
962 }
963
964 if_input(ifp, &ml);
965
966 cd->se_rx_prod = i;
967}
968
969/*
970 * A frame was downloaded to the chip. It's safe for us to clean up
971 * the list buffers.
972 */
973
974void
975se_txeof(struct se_softc *sc)
976{
977 struct ifnet *ifp = &sc->sc_ac.ac_if;
978 struct se_list_data *ld = &sc->se_ldata;
979 struct se_chain_data *cd = &sc->se_cdata;
980 struct se_desc *cur_tx;
981 uint32_t txstat;
982 uint i;
983
984 /*
985 * Go through our tx list and free mbufs for those
986 * frames that have been transmitted.
987 */
988 bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 0, SE_TX_RING_SZ,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ld->
se_tx_dmamap), (0), ((256 * sizeof(struct se_desc))), (0x02 |
0x08))
989 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ld->
se_tx_dmamap), (0), ((256 * sizeof(struct se_desc))), (0x02 |
0x08))
;
990 for (i = cd->se_tx_cons; cd->se_tx_cnt > 0;
991 cd->se_tx_cnt--, SE_INC(i, SE_TX_RING_CNT)(i) = (((i) + 1) % 256)) {
992 cur_tx = &ld->se_tx_ring[i];
993 txstat = letoh32(cur_tx->se_cmdsts)((__uint32_t)(cur_tx->se_cmdsts));
994 if ((txstat & TDC_OWN0x80000000) != 0)
995 break;
996
997 ifq_clr_oactive(&ifp->if_snd);
998
999 if (SE_TX_ERROR(txstat)((txstat) & (0x00080000 | 0x00040000 | 0x00020000 | 0x00010000
))
!= 0) {
1000 if (ifp->if_flags & IFF_DEBUG0x4)
1001 printf("%s: tx error %b\n",
1002 ifp->if_xname, txstat, TX_ERR_BITS"\20" "\21CRS\22FIFO\23ABT\24OWC");
1003 ifp->if_oerrorsif_data.ifi_oerrors++;
1004 /* TODO: better error differentiation */
1005 }
1006
1007 if (cd->se_tx_mbuf[i] != NULL((void *)0)) {
1008 bus_dmamap_sync(sc->sc_dmat, cd->se_tx_map[i], 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (cd->
se_tx_map[i]), (0), (cd->se_tx_map[i]->dm_mapsize), (0x08
))
1009 cd->se_tx_map[i]->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (cd->
se_tx_map[i]), (0), (cd->se_tx_map[i]->dm_mapsize), (0x08
))
1010 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (cd->
se_tx_map[i]), (0), (cd->se_tx_map[i]->dm_mapsize), (0x08
))
;
1011 bus_dmamap_unload(sc->sc_dmat, cd->se_tx_map[i])(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (cd->
se_tx_map[i]))
;
1012 m_free(cd->se_tx_mbuf[i]);
1013 cd->se_tx_mbuf[i] = NULL((void *)0);
1014 }
1015
1016 cur_tx->se_sts_size = 0;
1017 cur_tx->se_cmdsts = 0;
1018 cur_tx->se_ptr = 0;
1019 cur_tx->se_flags &= htole32(RING_END)((__uint32_t)(0x80000000));
1020 bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ld->
se_tx_dmamap), (i * sizeof(*cur_tx)), (sizeof(*cur_tx)), (0x01
| 0x04))
1021 i * sizeof(*cur_tx), sizeof(*cur_tx),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ld->
se_tx_dmamap), (i * sizeof(*cur_tx)), (sizeof(*cur_tx)), (0x01
| 0x04))
1022 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ld->
se_tx_dmamap), (i * sizeof(*cur_tx)), (sizeof(*cur_tx)), (0x01
| 0x04))
;
1023 }
1024
1025 cd->se_tx_cons = i;
1026 if (cd->se_tx_cnt == 0)
1027 ifp->if_timer = 0;
1028}
1029
1030void
1031se_tick(void *xsc)
1032{
1033 struct se_softc *sc = xsc;
1034 struct mii_data *mii;
1035 struct ifnet *ifp = &sc->sc_ac.ac_if;
1036 int s;
1037
1038 s = splnet()splraise(0x7);
1039 mii = &sc->sc_mii;
1040 mii_tick(mii);
1041 if ((sc->sc_flags & SE_FLAG_LINK0x8000) == 0) {
1042 se_miibus_statchg(&sc->sc_dev);
1043 if ((sc->sc_flags & SE_FLAG_LINK0x8000) != 0 &&
1044 !ifq_empty(&ifp->if_snd)(((&ifp->if_snd)->ifq_len) == 0))
1045 se_start(ifp);
1046 }
1047 splx(s)spllower(s);
1048
1049 timeout_add_sec(&sc->sc_tick_tmo, 1);
1050}
1051
1052int
1053se_intr(void *arg)
1054{
1055 struct se_softc *sc = arg;
1056 struct ifnet *ifp = &sc->sc_ac.ac_if;
1057 uint32_t status;
1058
1059 status = CSR_READ_4(sc, IntrStatus)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), (0x20)));
1060 if (status == 0xffffffff || (status & SE_INTRS(0x00000080 | 0x00000040 | 0x00000020 | 0x00000010 |0x00000008
| 0x00000004 | 0x00000001 | 0x00000002)
) == 0) {
1061 /* Not ours. */
1062 return 0;
1063 }
1064 /* Ack interrupts/ */
1065 CSR_WRITE_4(sc, IntrStatus, status)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x20), (status
)))
;
1066 /* Disable further interrupts. */
1067 CSR_WRITE_4(sc, IntrMask, 0)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x24), (0)
))
;
1068
1069 for (;;) {
1070 if ((ifp->if_flags & IFF_RUNNING0x40) == 0)
1071 break;
1072 if ((status & (INTR_RX_DONE0x00000040 | INTR_RX_IDLE0x00000080)) != 0) {
1073 se_rxeof(sc);
1074 /* Wakeup Rx MAC. */
1075 if ((status & INTR_RX_IDLE0x00000080) != 0)
1076 CSR_WRITE_4(sc, RX_CTL,(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x10), (0x1a00
| 0x000c | 0x00000010 | 0x00000001)))
1077 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x10), (0x1a00
| 0x000c | 0x00000010 | 0x00000001)))
;
1078 }
1079 if ((status & (INTR_TX_DONE0x00000004 | INTR_TX_IDLE0x00000008)) != 0)
1080 se_txeof(sc);
1081 status = CSR_READ_4(sc, IntrStatus)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), (0x20)));
1082 if ((status & SE_INTRS(0x00000080 | 0x00000040 | 0x00000020 | 0x00000010 |0x00000008
| 0x00000004 | 0x00000001 | 0x00000002)
) == 0)
1083 break;
1084 /* Ack interrupts. */
1085 CSR_WRITE_4(sc, IntrStatus, status)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x20), (status
)))
;
1086 }
1087
1088 if ((ifp->if_flags & IFF_RUNNING0x40) != 0) {
1089 /* Re-enable interrupts */
1090 CSR_WRITE_4(sc, IntrMask, SE_INTRS)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x24), ((0x00000080
| 0x00000040 | 0x00000020 | 0x00000010 |0x00000008 | 0x00000004
| 0x00000001 | 0x00000002))))
;
1091 if (!ifq_empty(&ifp->if_snd)(((&ifp->if_snd)->ifq_len) == 0))
1092 se_start(ifp);
1093 }
1094
1095 return 1;
1096}
1097
1098/*
1099 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1100 * pointers to the fragment pointers.
1101 */
1102int
1103se_encap(struct se_softc *sc, struct mbuf *m_head, uint32_t *txidx)
1104{
1105#ifdef SE_DEBUG
1106 struct ifnet *ifp = &sc->sc_ac.ac_if;
1107#endif
1108 struct mbuf *m;
1109 struct se_list_data *ld = &sc->se_ldata;
1110 struct se_chain_data *cd = &sc->se_cdata;
1111 struct se_desc *desc;
1112 uint i, cnt = 0;
1113 int rc;
1114
1115 /*
1116 * If there's no way we can send any packets, return now.
1117 */
1118 if (SE_TX_RING_CNT256 - cd->se_tx_cnt < 2) {
1119#ifdef SE_DEBUG
1120 if (ifp->if_flags & IFF_DEBUG0x4)
1121 printf("%s: encap failed, not enough TX desc\n",
1122 ifp->if_xname);
1123#endif
1124 return ENOBUFS55;
1125 }
1126
1127 if (m_defrag(m_head, M_DONTWAIT0x0002) != 0) {
1128#ifdef SE_DEBUG
1129 if (ifp->if_flags & IFF_DEBUG0x4)
1130 printf("%s: m_defrag failed\n", ifp->if_xname);
1131#endif
1132 return ENOBUFS55; /* XXX should not be fatal */
1133 }
1134
1135 /*
1136 * Start packing the mbufs in this chain into
1137 * the fragment pointers. Stop when we run out
1138 * of fragments or hit the end of the mbuf chain.
1139 */
1140 i = *txidx;
1141
1142 for (m = m_head; m != NULL((void *)0); m = m->m_nextm_hdr.mh_next) {
1143 if (m->m_lenm_hdr.mh_len == 0)
1144 continue;
1145 if ((SE_TX_RING_CNT256 - (cd->se_tx_cnt + cnt)) < 2) {
1146#ifdef SE_DEBUG
1147 if (ifp->if_flags & IFF_DEBUG0x4)
1148 printf("%s: encap failed, not enough TX desc\n",
1149 ifp->if_xname);
1150#endif
1151 return ENOBUFS55;
1152 }
1153 cd->se_tx_mbuf[i] = m;
1154 rc = bus_dmamap_load_mbuf(sc->sc_dmat, cd->se_tx_map[i],(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
cd->se_tx_map[i]), (m), (0x0001))
1155 m, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
cd->se_tx_map[i]), (m), (0x0001))
;
1156 if (rc != 0)
1157 return ENOBUFS55;
1158 KASSERT(cd->se_tx_map[i]->dm_nsegs == 1)((cd->se_tx_map[i]->dm_nsegs == 1) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/if_se.c", 1158, "cd->se_tx_map[i]->dm_nsegs == 1"
))
;
1159 bus_dmamap_sync(sc->sc_dmat, cd->se_tx_map[i], 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (cd->
se_tx_map[i]), (0), (cd->se_tx_map[i]->dm_mapsize), (0x04
))
1160 cd->se_tx_map[i]->dm_mapsize, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (cd->
se_tx_map[i]), (0), (cd->se_tx_map[i]->dm_mapsize), (0x04
))
;
1161
1162 desc = &ld->se_tx_ring[i];
1163 desc->se_sts_size = htole32(cd->se_tx_map[i]->dm_segs->ds_len)((__uint32_t)(cd->se_tx_map[i]->dm_segs->ds_len));
1164 desc->se_ptr =
1165 htole32((uint32_t)cd->se_tx_map[i]->dm_segs->ds_addr)((__uint32_t)((uint32_t)cd->se_tx_map[i]->dm_segs->ds_addr
))
;
1166 desc->se_flags = htole32(cd->se_tx_map[i]->dm_segs->ds_len)((__uint32_t)(cd->se_tx_map[i]->dm_segs->ds_len));
1167 if (i == SE_TX_RING_CNT256 - 1)
1168 desc->se_flags |= htole32(RING_END)((__uint32_t)(0x80000000));
1169 desc->se_cmdsts = htole32(TDC_OWN | TDC_INTR | TDC_DEF |((__uint32_t)(0x80000000 | 0x40000000 | 0x00200000 | 0x00020000
| 0x00010000 | 0x00800000))
1170 TDC_CRC | TDC_PAD | TDC_BST)((__uint32_t)(0x80000000 | 0x40000000 | 0x00200000 | 0x00020000
| 0x00010000 | 0x00800000))
;
1171 bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ld->
se_tx_dmamap), (i * sizeof(*desc)), (sizeof(*desc)), (0x01 | 0x04
))
1172 i * sizeof(*desc), sizeof(*desc),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ld->
se_tx_dmamap), (i * sizeof(*desc)), (sizeof(*desc)), (0x01 | 0x04
))
1173 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ld->
se_tx_dmamap), (i * sizeof(*desc)), (sizeof(*desc)), (0x01 | 0x04
))
;
1174
1175 SE_INC(i, SE_TX_RING_CNT)(i) = (((i) + 1) % 256);
1176 cnt++;
1177 }
1178
1179 /* can't happen */
1180 if (m != NULL((void *)0))
1181 return ENOBUFS55;
1182
1183 cd->se_tx_cnt += cnt;
1184 *txidx = i;
1185
1186 return 0;
1187}
1188
1189/*
1190 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1191 * to the mbuf data regions directly in the transmit lists. We also save a
1192 * copy of the pointers since the transmit list fragment pointers are
1193 * physical addresses.
1194 */
1195void
1196se_start(struct ifnet *ifp)
1197{
1198 struct se_softc *sc = ifp->if_softc;
1199 struct mbuf *m_head = NULL((void *)0);
1200 struct se_chain_data *cd = &sc->se_cdata;
1201 uint i, queued = 0;
1202
1203 if ((sc->sc_flags & SE_FLAG_LINK0x8000) == 0 ||
1204 !(ifp->if_flags & IFF_RUNNING0x40) || ifq_is_oactive(&ifp->if_snd)) {
1205#ifdef SE_DEBUG
1206 if (ifp->if_flags & IFF_DEBUG0x4)
1207 printf("%s: can't tx, flags 0x%x 0x%04x\n",
1208 ifp->if_xname, sc->sc_flags, (uint)ifp->if_flags);
1209#endif
1210 return;
1211 }
1212
1213 i = cd->se_tx_prod;
1214
1215 while (cd->se_tx_mbuf[i] == NULL((void *)0)) {
1216 m_head = ifq_deq_begin(&ifp->if_snd);
1217 if (m_head == NULL((void *)0))
1218 break;
1219
1220 if (se_encap(sc, m_head, &i) != 0) {
1221 ifq_deq_rollback(&ifp->if_snd, m_head);
1222 ifq_set_oactive(&ifp->if_snd);
1223 break;
1224 }
1225
1226 /* now we are committed to transmit the packet */
1227 ifq_deq_commit(&ifp->if_snd, m_head);
1228 queued++;
1229
1230 /*
1231 * If there's a BPF listener, bounce a copy of this frame
1232 * to him.
1233 */
1234#if NBPFILTER1 > 0
1235 if (ifp->if_bpf)
1236 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT(1 << 1));
1237#endif
1238 }
1239
1240 if (queued > 0) {
1241 /* Transmit */
1242 cd->se_tx_prod = i;
1243 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x00), (0x1a00
| 0x00000001 | 0x00000010)))
;
1244 ifp->if_timer = 5;
1245 }
1246}
1247
1248int
1249se_init(struct ifnet *ifp)
1250{
1251 struct se_softc *sc = ifp->if_softc;
1252 uint16_t rxfilt;
1253 int i;
1254
1255 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x7, __func__
); } } while (0)
;
1256
1257 /*
1258 * Cancel pending I/O and free all RX/TX buffers.
1259 */
1260 se_stop(sc);
1261 se_reset(sc);
1262
1263 /* Init circular RX list. */
1264 if (se_list_rx_init(sc) == ENOBUFS55) {
1265 se_stop(sc); /* XXX necessary? */
1266 return ENOBUFS55;
1267 }
1268
1269 /* Init TX descriptors. */
1270 se_list_tx_init(sc);
1271
1272 /*
1273 * Load the address of the RX and TX lists.
1274 */
1275 CSR_WRITE_4(sc, TX_DESC,(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x04), ((uint32_t
)sc->se_ldata.se_tx_dmamap->dm_segs[0].ds_addr)))
1276 (uint32_t)sc->se_ldata.se_tx_dmamap->dm_segs[0].ds_addr)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x04), ((uint32_t
)sc->se_ldata.se_tx_dmamap->dm_segs[0].ds_addr)))
;
1277 CSR_WRITE_4(sc, RX_DESC,(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x14), ((uint32_t
)sc->se_ldata.se_rx_dmamap->dm_segs[0].ds_addr)))
1278 (uint32_t)sc->se_ldata.se_rx_dmamap->dm_segs[0].ds_addr)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x14), ((uint32_t
)sc->se_ldata.se_rx_dmamap->dm_segs[0].ds_addr)))
;
1279
1280 CSR_WRITE_4(sc, TxMacControl, 0x60)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x50), (0x60
)))
;
1281 CSR_WRITE_4(sc, RxWakeOnLan, 0)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x70), (0)
))
;
1282 CSR_WRITE_4(sc, RxWakeOnLanData, 0)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x74), (0)
))
;
1283 CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN +(((sc)->sc_iot)->write_2(((sc)->sc_ioh), (0x78), (1518
+ 4 + 10)))
1284 SE_RX_PAD_BYTES)(((sc)->sc_iot)->write_2(((sc)->sc_ioh), (0x78), (1518
+ 4 + 10)))
;
1285
1286 for (i = 0; i < ETHER_ADDR_LEN6; i++)
1287 CSR_WRITE_1(sc, RxMacAddr + i, sc->sc_ac.ac_enaddr[i])(((sc)->sc_iot)->write_1(((sc)->sc_ioh), (0x62 + i),
(sc->sc_ac.ac_enaddr[i])))
;
1288 /* Configure RX MAC. */
1289 rxfilt = RXMAC_STRIP_FCS0x0010 | RXMAC_PAD_ENB0x0004 | RXMAC_CSUM_ENB0x0002;
1290 CSR_WRITE_2(sc, RxMacControl, rxfilt)(((sc)->sc_iot)->write_2(((sc)->sc_ioh), (0x60), (rxfilt
)))
;
1291
1292 /* Program promiscuous mode and multicast filters. */
1293 se_iff(sc);
1294
1295 /*
1296 * Clear and enable interrupts.
1297 */
1298 CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x20), (0xFFFFFFFF
)))
;
1299 CSR_WRITE_4(sc, IntrMask, SE_INTRS)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x24), ((0x00000080
| 0x00000040 | 0x00000020 | 0x00000010 |0x00000008 | 0x00000004
| 0x00000001 | 0x00000002))))
;
1300
1301 /* Enable receiver and transmitter. */
1302 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x00), (0x1a00
| 0x00000001)))
;
1303 CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x10), (0x1a00
| 0x000c | 0x00000010 | 0x00000001)))
;
1304
1305 ifp->if_flags |= IFF_RUNNING0x40;
1306 ifq_clr_oactive(&ifp->if_snd);
1307
1308 sc->sc_flags &= ~SE_FLAG_LINK0x8000;
1309 mii_mediachg(&sc->sc_mii);
1310 timeout_add_sec(&sc->sc_tick_tmo, 1);
1311
1312 return 0;
1313}
1314
1315/*
1316 * Set media options.
1317 */
1318int
1319se_ifmedia_upd(struct ifnet *ifp)
1320{
1321 struct se_softc *sc = ifp->if_softc;
1322 struct mii_data *mii;
1323
1324 mii = &sc->sc_mii;
1325 sc->sc_flags &= ~SE_FLAG_LINK0x8000;
1326 if (mii->mii_instance) {
1327 struct mii_softc *miisc;
1328 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)for((miisc) = ((&mii->mii_phys)->lh_first); (miisc)
!= ((void *)0); (miisc) = ((miisc)->mii_list.le_next))
1329 mii_phy_reset(miisc);
1330 }
1331 return mii_mediachg(mii);
1332}
1333
1334/*
1335 * Report current media status.
1336 */
1337void
1338se_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1339{
1340 struct se_softc *sc = ifp->if_softc;
1341 struct mii_data *mii;
1342
1343 mii = &sc->sc_mii;
1344 mii_pollstat(mii);
1345 ifmr->ifm_active = mii->mii_media_active;
1346 ifmr->ifm_status = mii->mii_media_status;
1347}
1348
1349int
1350se_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1351{
1352 struct se_softc *sc = ifp->if_softc;
1353 struct ifreq *ifr = (struct ifreq *) data;
1354 int s, rc = 0;
1355
1356 s = splnet()splraise(0x7);
1357
1358 switch (command) {
1359 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
1360 ifp->if_flags |= IFF_UP0x1;
1361 if ((ifp->if_flags & IFF_RUNNING0x40) == 0)
1362 rc = se_init(ifp);
1363 break;
1364 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
1365 if (ifp->if_flags & IFF_UP0x1) {
1366 if (ifp->if_flags & IFF_RUNNING0x40)
1367 rc = ENETRESET52;
1368 else
1369 rc = se_init(ifp);
1370 } else {
1371 if (ifp->if_flags & IFF_RUNNING0x40)
1372 se_stop(sc);
1373 }
1374 break;
1375 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
1376 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
1377 rc = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1378 break;
1379 default:
1380 rc = ether_ioctl(ifp, &sc->sc_ac, command, data);
1381 break;
1382 }
1383
1384 if (rc == ENETRESET52) {
1385 if (ifp->if_flags & IFF_RUNNING0x40)
1386 se_iff(sc);
1387 rc = 0;
1388 }
1389
1390 splx(s)spllower(s);
1391 return rc;
1392}
1393
1394void
1395se_watchdog(struct ifnet *ifp)
1396{
1397 struct se_softc *sc = ifp->if_softc;
1398 int s;
1399
1400 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1401 ifp->if_oerrorsif_data.ifi_oerrors++;
1402
1403 s = splnet()splraise(0x7);
1404 se_init(ifp);
1405 if (!ifq_empty(&ifp->if_snd)(((&ifp->if_snd)->ifq_len) == 0))
1406 se_start(ifp);
1407 splx(s)spllower(s);
1408}
1409
1410/*
1411 * Stop the adapter and free any mbufs allocated to the
1412 * RX and TX lists.
1413 */
1414void
1415se_stop(struct se_softc *sc)
1416{
1417 struct ifnet *ifp = &sc->sc_ac.ac_if;
1418
1419 ifp->if_timer = 0;
1420 ifp->if_flags &= ~IFF_RUNNING0x40;
1421 ifq_clr_oactive(&ifp->if_snd);
1422 timeout_del(&sc->sc_tick_tmo);
1423 mii_down(&sc->sc_mii);
1424
1425 CSR_WRITE_4(sc, IntrMask, 0)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x24), (0)
))
;
1426 CSR_READ_4(sc, IntrMask)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), (0x24)));
1427 CSR_WRITE_4(sc, IntrStatus, 0xffffffff)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x20), (0xffffffff
)))
;
1428 /* Stop TX/RX MAC. */
1429 CSR_WRITE_4(sc, TX_CTL, 0x1a00)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x00), (0x1a00
)))
;
1430 CSR_WRITE_4(sc, RX_CTL, 0x1a00)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x10), (0x1a00
)))
;
1431 /* XXX Can we assume active DMA cycles gone? */
1432 DELAY(2000)(*delay_func)(2000);
1433 CSR_WRITE_4(sc, IntrMask, 0)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x24), (0)
))
;
1434 CSR_WRITE_4(sc, IntrStatus, 0xffffffff)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x20), (0xffffffff
)))
;
1435
1436 sc->sc_flags &= ~SE_FLAG_LINK0x8000;
1437 se_list_rx_free(sc);
1438 se_list_tx_free(sc);
1439}