Bug Summary

File:dev/pci/if_nfe.c
Warning:line 936, column 17
Access to field 'flags' results in a dereference of an undefined pointer value (loaded from variable 'desc64')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name if_nfe.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/if_nfe.c
1/* $OpenBSD: if_nfe.c,v 1.125 2023/11/10 15:51:20 bluhm Exp $ */
2
3/*-
4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr>
5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
21
22#include "bpfilter.h"
23#include "vlan.h"
24
25#include <sys/param.h>
26#include <sys/endian.h>
27#include <sys/systm.h>
28#include <sys/sockio.h>
29#include <sys/mbuf.h>
30#include <sys/queue.h>
31#include <sys/kernel.h>
32#include <sys/device.h>
33#include <sys/timeout.h>
34#include <sys/socket.h>
35
36#include <machine/bus.h>
37
38#include <net/if.h>
39#include <net/if_media.h>
40
41#include <netinet/in.h>
42#include <netinet/if_ether.h>
43
44#if NBPFILTER1 > 0
45#include <net/bpf.h>
46#endif
47
48#include <dev/mii/miivar.h>
49
50#include <dev/pci/pcireg.h>
51#include <dev/pci/pcivar.h>
52#include <dev/pci/pcidevs.h>
53
54#include <dev/pci/if_nfereg.h>
55#include <dev/pci/if_nfevar.h>
56
57int nfe_match(struct device *, void *, void *);
58void nfe_attach(struct device *, struct device *, void *);
59int nfe_activate(struct device *, int);
60void nfe_miibus_statchg(struct device *);
61int nfe_miibus_readreg(struct device *, int, int);
62void nfe_miibus_writereg(struct device *, int, int, int);
63int nfe_intr(void *);
64int nfe_ioctl(struct ifnet *, u_long, caddr_t);
65void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
66void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
67void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
68void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
69void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
70void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
71void nfe_rxeof(struct nfe_softc *);
72void nfe_txeof(struct nfe_softc *);
73int nfe_encap(struct nfe_softc *, struct mbuf *);
74void nfe_start(struct ifnet *);
75void nfe_watchdog(struct ifnet *);
76int nfe_init(struct ifnet *);
77void nfe_stop(struct ifnet *, int);
78int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
79void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
80void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
81int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
82void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
83void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
84int nfe_ifmedia_upd(struct ifnet *);
85void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
86void nfe_iff(struct nfe_softc *);
87void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
88void nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
89void nfe_tick(void *);
90#ifndef SMALL_KERNEL
91int nfe_wol(struct ifnet*, int);
92#endif
93
94const struct cfattach nfe_ca = {
95 sizeof (struct nfe_softc), nfe_match, nfe_attach, NULL((void *)0),
96 nfe_activate
97};
98
99struct cfdriver nfe_cd = {
100 NULL((void *)0), "nfe", DV_IFNET
101};
102
103#ifdef NFE_DEBUG
104int nfedebug = 0;
105#define DPRINTF(x) do { if (nfedebug) printf x; } while (0)
106#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0)
107#else
108#define DPRINTF(x)
109#define DPRINTFN(n,x)
110#endif
111
112const struct pci_matchid nfe_devices[] = {
113 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_NFORCE_LAN0x01c3 },
114 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_NFORCE2_LAN0x0066 },
115 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_NFORCE3_LAN10x00d6 },
116 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_NFORCE3_LAN20x0086 },
117 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_NFORCE3_LAN30x008c },
118 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_NFORCE3_LAN40x00df },
119 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_NFORCE3_LAN50x00e6 },
120 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_CK804_LAN10x0056 },
121 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_CK804_LAN20x0057 },
122 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP04_LAN10x0037 },
123 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP04_LAN20x0038 },
124 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP51_LAN10x0268 },
125 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP51_LAN20x0269 },
126 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP55_LAN10x0372 },
127 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP55_LAN20x0373 },
128 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP61_LAN10x03e5 },
129 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP61_LAN20x03e6 },
130 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP61_LAN30x03ee },
131 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP61_LAN40x03ef },
132 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP65_LAN10x0450 },
133 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP65_LAN20x0451 },
134 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP65_LAN30x0452 },
135 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP65_LAN40x0453 },
136 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP67_LAN10x054c },
137 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP67_LAN20x054d },
138 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP67_LAN30x054e },
139 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP67_LAN40x054f },
140 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP73_LAN10x07dc },
141 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP73_LAN20x07dd },
142 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP73_LAN30x07de },
143 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP73_LAN40x07df },
144 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP77_LAN10x0760 },
145 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP77_LAN20x0761 },
146 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP77_LAN30x0762 },
147 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP77_LAN40x0763 },
148 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP79_LAN10x0ab0 },
149 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP79_LAN20x0ab1 },
150 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP79_LAN30x0ab2 },
151 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP79_LAN40x0ab3 },
152 { PCI_VENDOR_NVIDIA0x10de, PCI_PRODUCT_NVIDIA_MCP89_LAN0x0d7d }
153};
154
155int
156nfe_match(struct device *dev, void *match, void *aux)
157{
158 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
159 sizeof (nfe_devices) / sizeof (nfe_devices[0]));
160}
161
162int
163nfe_activate(struct device *self, int act)
164{
165 struct nfe_softc *sc = (struct nfe_softc *)self;
166 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
167 int rv = 0;
168
169 switch (act) {
170 case DVACT_SUSPEND3:
171 if (ifp->if_flags & IFF_RUNNING0x40)
172 nfe_stop(ifp, 0);
173 rv = config_activate_children(self, act);
174 break;
175 case DVACT_RESUME4:
176 if (ifp->if_flags & IFF_UP0x1)
177 nfe_init(ifp);
178 break;
179 default:
180 rv = config_activate_children(self, act);
181 break;
182 }
183 return (rv);
184}
185
186
187void
188nfe_attach(struct device *parent, struct device *self, void *aux)
189{
190 struct nfe_softc *sc = (struct nfe_softc *)self;
191 struct pci_attach_args *pa = aux;
192 pci_chipset_tag_t pc = pa->pa_pc;
193 pci_intr_handle_t ih;
194 const char *intrstr;
195 struct ifnet *ifp;
196 bus_size_t memsize;
197 pcireg_t memtype;
198
199 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA0x10);
200 if (pci_mapreg_map(pa, NFE_PCI_BA0x10, memtype, 0, &sc->sc_memt,
201 &sc->sc_memh, NULL((void *)0), &memsize, 0)) {
202 printf(": can't map mem space\n");
203 return;
204 }
205
206 if (pci_intr_map(pa, &ih) != 0) {
207 printf(": can't map interrupt\n");
208 return;
209 }
210
211 intrstr = pci_intr_string(pc, ih);
212 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET0x4, nfe_intr, sc,
213 sc->sc_dev.dv_xname);
214 if (sc->sc_ih == NULL((void *)0)) {
215 printf(": could not establish interrupt");
216 if (intrstr != NULL((void *)0))
217 printf(" at %s", intrstr);
218 printf("\n");
219 return;
220 }
221 printf(": %s", intrstr);
222
223 sc->sc_dmat = pa->pa_dmat;
224 sc->sc_flags = 0;
225
226 switch (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff)) {
227 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN20x0086:
228 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN30x008c:
229 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN40x00df:
230 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN50x00e6:
231 sc->sc_flags |= NFE_JUMBO_SUP0x01 | NFE_HW_CSUM0x04;
232 break;
233 case PCI_PRODUCT_NVIDIA_MCP51_LAN10x0268:
234 case PCI_PRODUCT_NVIDIA_MCP51_LAN20x0269:
235 sc->sc_flags |= NFE_40BIT_ADDR0x02 | NFE_PWR_MGMT0x40;
236 break;
237 case PCI_PRODUCT_NVIDIA_MCP61_LAN10x03e5:
238 case PCI_PRODUCT_NVIDIA_MCP61_LAN20x03e6:
239 case PCI_PRODUCT_NVIDIA_MCP61_LAN30x03ee:
240 case PCI_PRODUCT_NVIDIA_MCP61_LAN40x03ef:
241 case PCI_PRODUCT_NVIDIA_MCP67_LAN10x054c:
242 case PCI_PRODUCT_NVIDIA_MCP67_LAN20x054d:
243 case PCI_PRODUCT_NVIDIA_MCP67_LAN30x054e:
244 case PCI_PRODUCT_NVIDIA_MCP67_LAN40x054f:
245 case PCI_PRODUCT_NVIDIA_MCP73_LAN10x07dc:
246 case PCI_PRODUCT_NVIDIA_MCP73_LAN20x07dd:
247 case PCI_PRODUCT_NVIDIA_MCP73_LAN30x07de:
248 case PCI_PRODUCT_NVIDIA_MCP73_LAN40x07df:
249 sc->sc_flags |= NFE_40BIT_ADDR0x02 | NFE_CORRECT_MACADDR0x20 |
250 NFE_PWR_MGMT0x40;
251 break;
252 case PCI_PRODUCT_NVIDIA_MCP77_LAN10x0760:
253 case PCI_PRODUCT_NVIDIA_MCP77_LAN20x0761:
254 case PCI_PRODUCT_NVIDIA_MCP77_LAN30x0762:
255 case PCI_PRODUCT_NVIDIA_MCP77_LAN40x0763:
256 sc->sc_flags |= NFE_40BIT_ADDR0x02 | NFE_HW_CSUM0x04 |
257 NFE_CORRECT_MACADDR0x20 | NFE_PWR_MGMT0x40;
258 break;
259 case PCI_PRODUCT_NVIDIA_MCP79_LAN10x0ab0:
260 case PCI_PRODUCT_NVIDIA_MCP79_LAN20x0ab1:
261 case PCI_PRODUCT_NVIDIA_MCP79_LAN30x0ab2:
262 case PCI_PRODUCT_NVIDIA_MCP79_LAN40x0ab3:
263 case PCI_PRODUCT_NVIDIA_MCP89_LAN0x0d7d:
264 sc->sc_flags |= NFE_JUMBO_SUP0x01 | NFE_40BIT_ADDR0x02 | NFE_HW_CSUM0x04 |
265 NFE_CORRECT_MACADDR0x20 | NFE_PWR_MGMT0x40;
266 break;
267 case PCI_PRODUCT_NVIDIA_CK804_LAN10x0056:
268 case PCI_PRODUCT_NVIDIA_CK804_LAN20x0057:
269 case PCI_PRODUCT_NVIDIA_MCP04_LAN10x0037:
270 case PCI_PRODUCT_NVIDIA_MCP04_LAN20x0038:
271 sc->sc_flags |= NFE_JUMBO_SUP0x01 | NFE_40BIT_ADDR0x02 | NFE_HW_CSUM0x04;
272 break;
273 case PCI_PRODUCT_NVIDIA_MCP65_LAN10x0450:
274 case PCI_PRODUCT_NVIDIA_MCP65_LAN20x0451:
275 case PCI_PRODUCT_NVIDIA_MCP65_LAN30x0452:
276 case PCI_PRODUCT_NVIDIA_MCP65_LAN40x0453:
277 sc->sc_flags |= NFE_JUMBO_SUP0x01 | NFE_40BIT_ADDR0x02 |
278 NFE_CORRECT_MACADDR0x20 | NFE_PWR_MGMT0x40;
279 break;
280 case PCI_PRODUCT_NVIDIA_MCP55_LAN10x0372:
281 case PCI_PRODUCT_NVIDIA_MCP55_LAN20x0373:
282 sc->sc_flags |= NFE_JUMBO_SUP0x01 | NFE_40BIT_ADDR0x02 | NFE_HW_CSUM0x04 |
283 NFE_HW_VLAN0x08 | NFE_PWR_MGMT0x40;
284 break;
285 }
286
287 if (sc->sc_flags & NFE_PWR_MGMT0x40) {
288 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x144))
, ((0x0010 | 0x0004))))
;
289 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x03c))
, ((0x00f3))))
;
290 DELAY(100)(*delay_func)(100);
291 NFE_WRITE(sc, NFE_MAC_RESET, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x03c))
, ((0))))
;
292 DELAY(100)(*delay_func)(100);
293 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x144))
, ((0x0004))))
;
294 NFE_WRITE(sc, NFE_PWR2_CTL,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x600))
, (((((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x600
)))) & ~0x0f11))))
295 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x600))
, (((((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x600
)))) & ~0x0f11))))
;
296 }
297
298 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
299 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
300
301 /*
302 * Allocate Tx and Rx rings.
303 */
304 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
305 printf("%s: could not allocate Tx ring\n",
306 sc->sc_dev.dv_xname);
307 return;
308 }
309
310 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
311 printf("%s: could not allocate Rx ring\n",
312 sc->sc_dev.dv_xname);
313 nfe_free_tx_ring(sc, &sc->txq);
314 return;
315 }
316
317 ifp = &sc->sc_arpcom.ac_if;
318 ifp->if_softc = sc;
319 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
320 ifp->if_ioctl = nfe_ioctl;
321 ifp->if_start = nfe_start;
322 ifp->if_watchdog = nfe_watchdog;
323 ifq_init_maxlen(&ifp->if_snd, NFE_IFQ_MAXLEN64);
324 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ16);
325
326 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010;
327
328#ifndef SMALL_KERNEL
329 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_WOL0x00008000;
330 ifp->if_wol = nfe_wol;
331 nfe_wol(ifp, 0);
332#endif
333
334#if NVLAN1 > 0
335 if (sc->sc_flags & NFE_HW_VLAN0x08)
336 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020;
337#endif
338
339 if (sc->sc_flags & NFE_HW_CSUM0x04) {
340 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_CSUM_IPv40x00000001 | IFCAP_CSUM_TCPv40x00000002 |
341 IFCAP_CSUM_UDPv40x00000004;
342 }
343
344 sc->sc_mii.mii_ifp = ifp;
345 sc->sc_mii.mii_readreg = nfe_miibus_readreg;
346 sc->sc_mii.mii_writereg = nfe_miibus_writereg;
347 sc->sc_mii.mii_statchg = nfe_miibus_statchg;
348
349 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
350 nfe_ifmedia_sts);
351 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY-1, 0, 0);
352 if (LIST_FIRST(&sc->sc_mii.mii_phys)((&sc->sc_mii.mii_phys)->lh_first) == NULL((void *)0)) {
353 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
354 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL | IFM_MANUAL1ULL,
355 0, NULL((void *)0));
356 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL | IFM_MANUAL1ULL);
357 } else
358 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL);
359
360 if_attach(ifp);
361 ether_ifattach(ifp);
362
363 timeout_set(&sc->sc_tick_ch, nfe_tick, sc);
364}
365
366void
367nfe_miibus_statchg(struct device *dev)
368{
369 struct nfe_softc *sc = (struct nfe_softc *)dev;
370 struct mii_data *mii = &sc->sc_mii;
371 uint32_t phy, seed, misc = NFE_MISC1_MAGIC0x003b0f3c, link = NFE_MEDIA_SET0x10000;
372
373 phy = NFE_READ(sc, NFE_PHY_IFACE)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x0c0)))
)
;
374 phy &= ~(NFE_PHY_HDX(1 << 8) | NFE_PHY_100TX(1 << 0) | NFE_PHY_1000T(1 << 1));
375
376 seed = NFE_READ(sc, NFE_RNDSEED)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x09c)))
)
;
377 seed &= ~NFE_SEED_MASK0x0003ff00;
378
379 if ((mii->mii_media_active & IFM_GMASK0x00ffff0000000000ULL) == IFM_HDX0x0000020000000000ULL) {
380 phy |= NFE_PHY_HDX(1 << 8); /* half-duplex */
381 misc |= NFE_MISC1_HDX(1 << 1);
382 }
383
384 switch (IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL)) {
385 case IFM_1000_T16: /* full-duplex only */
386 link |= NFE_MEDIA_1000T0x00032;
387 seed |= NFE_SEED_1000T0x00007400;
388 phy |= NFE_PHY_1000T(1 << 1);
389 break;
390 case IFM_100_TX6:
391 link |= NFE_MEDIA_100TX0x00064;
392 seed |= NFE_SEED_100TX0x00002d00;
393 phy |= NFE_PHY_100TX(1 << 0);
394 break;
395 case IFM_10_T3:
396 link |= NFE_MEDIA_10T0x003e8;
397 seed |= NFE_SEED_10T0x00007f00;
398 break;
399 }
400
401 NFE_WRITE(sc, NFE_RNDSEED, seed)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x09c))
, ((seed))))
; /* XXX: gigabit NICs only? */
402
403 NFE_WRITE(sc, NFE_PHY_IFACE, phy)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x0c0))
, ((phy))))
;
404 NFE_WRITE(sc, NFE_MISC1, misc)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x080))
, ((misc))))
;
405 NFE_WRITE(sc, NFE_LINKSPEED, link)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x110))
, ((link))))
;
406}
407
408int
409nfe_miibus_readreg(struct device *dev, int phy, int reg)
410{
411 struct nfe_softc *sc = (struct nfe_softc *)dev;
412 uint32_t val;
413 int ntries;
414
415 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x180))
, ((0xf))))
;
416
417 if (NFE_READ(sc, NFE_PHY_CTL)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x190)))
)
& NFE_PHY_BUSY0x08000) {
418 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x190))
, ((0x08000))))
;
419 DELAY(100)(*delay_func)(100);
420 }
421
422 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x190))
, (((phy << 5) | reg))))
;
423
424 for (ntries = 0; ntries < 1000; ntries++) {
425 DELAY(100)(*delay_func)(100);
426 if (!(NFE_READ(sc, NFE_PHY_CTL)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x190)))
)
& NFE_PHY_BUSY0x08000))
427 break;
428 }
429 if (ntries == 1000) {
430 DPRINTFN(2, ("%s: timeout waiting for PHY\n",
431 sc->sc_dev.dv_xname));
432 return 0;
433 }
434
435 if (NFE_READ(sc, NFE_PHY_STATUS)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x180)))
)
& NFE_PHY_ERROR0x00001) {
436 DPRINTFN(2, ("%s: could not read PHY\n",
437 sc->sc_dev.dv_xname));
438 return 0;
439 }
440
441 val = NFE_READ(sc, NFE_PHY_DATA)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x194)))
)
;
442 if (val != 0xffffffff && val != 0)
443 sc->mii_phyaddr = phy;
444
445 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n",
446 sc->sc_dev.dv_xname, phy, reg, val));
447
448 return val;
449}
450
451void
452nfe_miibus_writereg(struct device *dev, int phy, int reg, int val)
453{
454 struct nfe_softc *sc = (struct nfe_softc *)dev;
455 uint32_t ctl;
456 int ntries;
457
458 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x180))
, ((0xf))))
;
459
460 if (NFE_READ(sc, NFE_PHY_CTL)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x190)))
)
& NFE_PHY_BUSY0x08000) {
461 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x190))
, ((0x08000))))
;
462 DELAY(100)(*delay_func)(100);
463 }
464
465 NFE_WRITE(sc, NFE_PHY_DATA, val)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x194))
, ((val))))
;
466 ctl = NFE_PHY_WRITE0x00400 | (phy << NFE_PHYADD_SHIFT5) | reg;
467 NFE_WRITE(sc, NFE_PHY_CTL, ctl)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x190))
, ((ctl))))
;
468
469 for (ntries = 0; ntries < 1000; ntries++) {
470 DELAY(100)(*delay_func)(100);
471 if (!(NFE_READ(sc, NFE_PHY_CTL)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x190)))
)
& NFE_PHY_BUSY0x08000))
472 break;
473 }
474#ifdef NFE_DEBUG
475 if (nfedebug >= 2 && ntries == 1000)
476 printf("could not write to PHY\n");
477#endif
478}
479
480int
481nfe_intr(void *arg)
482{
483 struct nfe_softc *sc = arg;
484 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
485 uint32_t r;
486
487 if ((r = NFE_READ(sc, NFE_IRQ_STATUS)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x000)))
)
& NFE_IRQ_WANTED(0x0001 | 0x0004 | 0x0002 | 0x0008 | 0x0080 | 0x0010 | 0x0040
)
) == 0
)
1
Assuming the condition is false
2
Taking false branch
488 return 0; /* not for us */
489 NFE_WRITE(sc, NFE_IRQ_STATUS, r)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x000))
, ((r))))
;
490
491 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
492
493 if (r & NFE_IRQ_LINK0x0040) {
3
Assuming the condition is false
4
Taking false branch
494 NFE_READ(sc, NFE_PHY_STATUS)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x180)))
)
;
495 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x180))
, ((0xf))))
;
496 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname));
497 }
498
499 if (ifp->if_flags & IFF_RUNNING0x40) {
5
Assuming the condition is true
6
Taking true branch
500 /* check Rx ring */
501 nfe_rxeof(sc);
502
503 /* check Tx ring */
504 nfe_txeof(sc);
7
Calling 'nfe_txeof'
505 }
506
507 return 1;
508}
509
510int
511nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
512{
513 struct nfe_softc *sc = ifp->if_softc;
514 struct ifreq *ifr = (struct ifreq *)data;
515 int s, error = 0;
516
517 s = splnet()splraise(0x4);
518
519 switch (cmd) {
520 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
521 ifp->if_flags |= IFF_UP0x1;
522 if (!(ifp->if_flags & IFF_RUNNING0x40))
523 nfe_init(ifp);
524 break;
525
526 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
527 if (ifp->if_flags & IFF_UP0x1) {
528 if (ifp->if_flags & IFF_RUNNING0x40)
529 error = ENETRESET52;
530 else
531 nfe_init(ifp);
532 } else {
533 if (ifp->if_flags & IFF_RUNNING0x40)
534 nfe_stop(ifp, 1);
535 }
536 break;
537
538 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
539 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
540 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
541 break;
542
543 default:
544 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
545 }
546
547 if (error == ENETRESET52) {
548 if (ifp->if_flags & IFF_RUNNING0x40)
549 nfe_iff(sc);
550 error = 0;
551 }
552
553 splx(s)spllower(s);
554 return error;
555}
556
557void
558nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
559{
560 bus_dmamap_sync(sc->sc_dmat, sc->txq.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)desc32 - (caddr_t)sc->txq.desc32), (sizeof
(struct nfe_desc32)), (ops))
561 (caddr_t)desc32 - (caddr_t)sc->txq.desc32,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)desc32 - (caddr_t)sc->txq.desc32), (sizeof
(struct nfe_desc32)), (ops))
562 sizeof (struct nfe_desc32), ops)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)desc32 - (caddr_t)sc->txq.desc32), (sizeof
(struct nfe_desc32)), (ops))
;
563}
564
565void
566nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
567{
568 bus_dmamap_sync(sc->sc_dmat, sc->txq.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)desc64 - (caddr_t)sc->txq.desc64), (sizeof
(struct nfe_desc64)), (ops))
569 (caddr_t)desc64 - (caddr_t)sc->txq.desc64,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)desc64 - (caddr_t)sc->txq.desc64), (sizeof
(struct nfe_desc64)), (ops))
570 sizeof (struct nfe_desc64), ops)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)desc64 - (caddr_t)sc->txq.desc64), (sizeof
(struct nfe_desc64)), (ops))
;
571}
572
573void
574nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
575{
576 if (end > start) {
577 bus_dmamap_sync(sc->sc_dmat, sc->txq.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)&sc->txq.desc32[start] - (caddr_t)
sc->txq.desc32), ((caddr_t)&sc->txq.desc32[end] - (
caddr_t)&sc->txq.desc32[start]), (ops))
578 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)&sc->txq.desc32[start] - (caddr_t)
sc->txq.desc32), ((caddr_t)&sc->txq.desc32[end] - (
caddr_t)&sc->txq.desc32[start]), (ops))
579 (caddr_t)&sc->txq.desc32[end] -(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)&sc->txq.desc32[start] - (caddr_t)
sc->txq.desc32), ((caddr_t)&sc->txq.desc32[end] - (
caddr_t)&sc->txq.desc32[start]), (ops))
580 (caddr_t)&sc->txq.desc32[start], ops)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)&sc->txq.desc32[start] - (caddr_t)
sc->txq.desc32), ((caddr_t)&sc->txq.desc32[end] - (
caddr_t)&sc->txq.desc32[start]), (ops))
;
581 return;
582 }
583 /* sync from 'start' to end of ring */
584 bus_dmamap_sync(sc->sc_dmat, sc->txq.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)&sc->txq.desc32[start] - (caddr_t)
sc->txq.desc32), ((caddr_t)&sc->txq.desc32[256] - (
caddr_t)&sc->txq.desc32[start]), (ops))
585 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)&sc->txq.desc32[start] - (caddr_t)
sc->txq.desc32), ((caddr_t)&sc->txq.desc32[256] - (
caddr_t)&sc->txq.desc32[start]), (ops))
586 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] -(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)&sc->txq.desc32[start] - (caddr_t)
sc->txq.desc32), ((caddr_t)&sc->txq.desc32[256] - (
caddr_t)&sc->txq.desc32[start]), (ops))
587 (caddr_t)&sc->txq.desc32[start], ops)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)&sc->txq.desc32[start] - (caddr_t)
sc->txq.desc32), ((caddr_t)&sc->txq.desc32[256] - (
caddr_t)&sc->txq.desc32[start]), (ops))
;
588
589 /* sync from start of ring to 'end' */
590 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), (0), ((caddr_t)&sc->txq.desc32[end] - (caddr_t
)sc->txq.desc32), (ops))
591 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), (0), ((caddr_t)&sc->txq.desc32[end] - (caddr_t
)sc->txq.desc32), (ops))
;
592}
593
594void
595nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
596{
597 if (end > start) {
598 bus_dmamap_sync(sc->sc_dmat, sc->txq.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)&sc->txq.desc64[start] - (caddr_t)
sc->txq.desc64), ((caddr_t)&sc->txq.desc64[end] - (
caddr_t)&sc->txq.desc64[start]), (ops))
599 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)&sc->txq.desc64[start] - (caddr_t)
sc->txq.desc64), ((caddr_t)&sc->txq.desc64[end] - (
caddr_t)&sc->txq.desc64[start]), (ops))
600 (caddr_t)&sc->txq.desc64[end] -(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)&sc->txq.desc64[start] - (caddr_t)
sc->txq.desc64), ((caddr_t)&sc->txq.desc64[end] - (
caddr_t)&sc->txq.desc64[start]), (ops))
601 (caddr_t)&sc->txq.desc64[start], ops)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)&sc->txq.desc64[start] - (caddr_t)
sc->txq.desc64), ((caddr_t)&sc->txq.desc64[end] - (
caddr_t)&sc->txq.desc64[start]), (ops))
;
602 return;
603 }
604 /* sync from 'start' to end of ring */
605 bus_dmamap_sync(sc->sc_dmat, sc->txq.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)&sc->txq.desc64[start] - (caddr_t)
sc->txq.desc64), ((caddr_t)&sc->txq.desc64[256] - (
caddr_t)&sc->txq.desc64[start]), (ops))
606 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)&sc->txq.desc64[start] - (caddr_t)
sc->txq.desc64), ((caddr_t)&sc->txq.desc64[256] - (
caddr_t)&sc->txq.desc64[start]), (ops))
607 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] -(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)&sc->txq.desc64[start] - (caddr_t)
sc->txq.desc64), ((caddr_t)&sc->txq.desc64[256] - (
caddr_t)&sc->txq.desc64[start]), (ops))
608 (caddr_t)&sc->txq.desc64[start], ops)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), ((caddr_t)&sc->txq.desc64[start] - (caddr_t)
sc->txq.desc64), ((caddr_t)&sc->txq.desc64[256] - (
caddr_t)&sc->txq.desc64[start]), (ops))
;
609
610 /* sync from start of ring to 'end' */
611 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), (0), ((caddr_t)&sc->txq.desc64[end] - (caddr_t
)sc->txq.desc64), (ops))
612 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
txq.map), (0), ((caddr_t)&sc->txq.desc64[end] - (caddr_t
)sc->txq.desc64), (ops))
;
613}
614
615void
616nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
617{
618 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
rxq.map), ((caddr_t)desc32 - (caddr_t)sc->rxq.desc32), (sizeof
(struct nfe_desc32)), (ops))
619 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
rxq.map), ((caddr_t)desc32 - (caddr_t)sc->rxq.desc32), (sizeof
(struct nfe_desc32)), (ops))
620 sizeof (struct nfe_desc32), ops)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
rxq.map), ((caddr_t)desc32 - (caddr_t)sc->rxq.desc32), (sizeof
(struct nfe_desc32)), (ops))
;
621}
622
623void
624nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
625{
626 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
rxq.map), ((caddr_t)desc64 - (caddr_t)sc->rxq.desc64), (sizeof
(struct nfe_desc64)), (ops))
627 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
rxq.map), ((caddr_t)desc64 - (caddr_t)sc->rxq.desc64), (sizeof
(struct nfe_desc64)), (ops))
628 sizeof (struct nfe_desc64), ops)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
rxq.map), ((caddr_t)desc64 - (caddr_t)sc->rxq.desc64), (sizeof
(struct nfe_desc64)), (ops))
;
629}
630
631void
632nfe_rxeof(struct nfe_softc *sc)
633{
634 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
635 struct nfe_desc32 *desc32;
636 struct nfe_desc64 *desc64;
637 struct nfe_rx_data *data;
638 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
639 struct mbuf *m, *mnew;
640 bus_addr_t physaddr;
641#if NVLAN1 > 0
642 uint32_t vtag;
643#endif
644 uint16_t flags;
645 int error, len;
646
647 for (;;) {
648 data = &sc->rxq.data[sc->rxq.cur];
649
650 if (sc->sc_flags & NFE_40BIT_ADDR0x02) {
651 desc64 = &sc->rxq.desc64[sc->rxq.cur];
652 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD0x02);
653
654 flags = letoh16(desc64->flags)((__uint16_t)(desc64->flags));
655 len = letoh16(desc64->length)((__uint16_t)(desc64->length)) & 0x3fff;
656#if NVLAN1 > 0
657 vtag = letoh32(desc64->physaddr[1])((__uint32_t)(desc64->physaddr[1]));
658#endif
659 } else {
660 desc32 = &sc->rxq.desc32[sc->rxq.cur];
661 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD0x02);
662
663 flags = letoh16(desc32->flags)((__uint16_t)(desc32->flags));
664 len = letoh16(desc32->length)((__uint16_t)(desc32->length)) & 0x3fff;
665 }
666
667 if (flags & NFE_RX_READY(1 << 15))
668 break;
669
670 if ((sc->sc_flags & (NFE_JUMBO_SUP0x01 | NFE_40BIT_ADDR0x02)) == 0) {
671 if (!(flags & NFE_RX_VALID_V1(1 << 0)))
672 goto skip;
673
674 if ((flags & NFE_RX_FIXME_V10x6004) == NFE_RX_FIXME_V10x6004) {
675 flags &= ~NFE_RX_ERROR(1 << 14);
676 len--; /* fix buffer length */
677 }
678 } else {
679 if (!(flags & NFE_RX_VALID_V2(1 << 13)))
680 goto skip;
681
682 if ((flags & NFE_RX_FIXME_V20x4300) == NFE_RX_FIXME_V20x4300) {
683 flags &= ~NFE_RX_ERROR(1 << 14);
684 len--; /* fix buffer length */
685 }
686 }
687
688 if (flags & NFE_RX_ERROR(1 << 14)) {
689 ifp->if_ierrorsif_data.ifi_ierrors++;
690 goto skip;
691 }
692
693 /*
694 * Try to allocate a new mbuf for this ring element and load
695 * it before processing the current mbuf. If the ring element
696 * cannot be loaded, drop the received packet and reuse the
697 * old mbuf. In the unlikely case that the old mbuf can't be
698 * reloaded either, explicitly panic.
699 */
700 mnew = MCLGETL(NULL, M_DONTWAIT, MCLBYTES)m_clget((((void *)0)), (0x0002), ((1 << 11)));
701 if (mnew == NULL((void *)0)) {
702 ifp->if_ierrorsif_data.ifi_ierrors++;
703 goto skip;
704 }
705 mnew->m_pkthdrM_dat.MH.MH_pkthdr.len = mnew->m_lenm_hdr.mh_len = MCLBYTES(1 << 11);
706
707 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x02))
708 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x02))
;
709 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
710
711 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, mnew,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (mnew), (0x0200 | 0x0001))
712 BUS_DMA_READ | BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (mnew), (0x0200 | 0x0001))
;
713 if (error != 0) {
714 m_freem(mnew);
715
716 /* try to reload the old mbuf */
717 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0200 | 0x0001))
718 m, BUS_DMA_READ | BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0200 | 0x0001))
;
719 if (error != 0) {
720 /* very unlikely that it will fail.. */
721 panic("%s: could not load old rx mbuf",
722 sc->sc_dev.dv_xname);
723 }
724 ifp->if_ierrorsif_data.ifi_ierrors++;
725 goto skip;
726 }
727 physaddr = data->map->dm_segs[0].ds_addr;
728
729 /*
730 * New mbuf successfully loaded, update Rx ring and continue
731 * processing.
732 */
733 m = data->m;
734 data->m = mnew;
735
736 /* finalize mbuf */
737 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len;
738
739 if ((sc->sc_flags & NFE_HW_CSUM0x04) &&
740 (flags & NFE_RX_IP_CSUMOK(1 << 12))) {
741 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK0x0008;
742 if (flags & NFE_RX_UDP_CSUMOK(1 << 10))
743 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK0x0080;
744 if (flags & NFE_RX_TCP_CSUMOK(1 << 11))
745 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK0x0020;
746 }
747
748#if NVLAN1 > 0
749 if ((vtag & NFE_RX_VTAG(1 << 16)) && (sc->sc_flags & NFE_HW_VLAN0x08)) {
750 m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag = vtag & 0xffff;
751 m->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020;
752 }
753#endif
754
755 ml_enqueue(&ml, m);
756
757 /* update mapping address in h/w descriptor */
758 if (sc->sc_flags & NFE_40BIT_ADDR0x02) {
759#if defined(__LP64__1)
760 desc64->physaddr[0] = htole32(physaddr >> 32)((__uint32_t)(physaddr >> 32));
761#endif
762 desc64->physaddr[1] = htole32(physaddr & 0xffffffff)((__uint32_t)(physaddr & 0xffffffff));
763 } else {
764 desc32->physaddr = htole32(physaddr)((__uint32_t)(physaddr));
765 }
766
767skip: if (sc->sc_flags & NFE_40BIT_ADDR0x02) {
768 desc64->length = htole16(sc->rxq.bufsz)((__uint16_t)(sc->rxq.bufsz));
769 desc64->flags = htole16(NFE_RX_READY)((__uint16_t)((1 << 15)));
770
771 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE0x04);
772 } else {
773 desc32->length = htole16(sc->rxq.bufsz)((__uint16_t)(sc->rxq.bufsz));
774 desc32->flags = htole16(NFE_RX_READY)((__uint16_t)((1 << 15)));
775
776 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE0x04);
777 }
778
779 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT128;
780 }
781 if_input(ifp, &ml);
782}
783
784void
785nfe_txeof(struct nfe_softc *sc)
786{
787 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
788 struct nfe_desc32 *desc32;
789 struct nfe_desc64 *desc64;
790 struct nfe_tx_data *data = NULL((void *)0);
791 uint16_t flags;
792
793 while (sc->txq.next != sc->txq.cur) {
8
Assuming field 'next' is not equal to field 'cur'
9
Loop condition is true. Entering loop body
19
Assuming field 'next' is equal to field 'cur'
20
Loop condition is false. Execution continues on line 850
794 if (sc->sc_flags & NFE_40BIT_ADDR0x02) {
10
Assuming the condition is false
11
Taking false branch
795 desc64 = &sc->txq.desc64[sc->txq.next];
796 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD0x02);
797
798 flags = letoh16(desc64->flags)((__uint16_t)(desc64->flags));
799 } else {
800 desc32 = &sc->txq.desc32[sc->txq.next];
801 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD0x02);
802
803 flags = letoh16(desc32->flags)((__uint16_t)(desc32->flags));
804 }
805
806 if (flags & NFE_TX_VALID(1 << 15))
12
Assuming the condition is false
13
Taking false branch
807 break;
808
809 data = &sc->txq.data[sc->txq.next];
810
811 if ((sc->sc_flags & (NFE_JUMBO_SUP0x01 | NFE_40BIT_ADDR0x02)) == 0) {
14
Assuming the condition is false
812 if (!(flags & NFE_TX_LASTFRAG_V1(1 << 0)) && data->m == NULL((void *)0))
813 goto skip;
814
815 if ((flags & NFE_TX_ERROR_V10x7808) != 0) {
816 printf("%s: tx v1 error %b\n",
817 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR"\020" "\14TXERROR\13UNDERFLOW\12LATECOLLISION\11LOSTCARRIER\10DEFERRED"
"\08FORCEDINT\03RETRY\00LASTPACKET"
);
818 ifp->if_oerrorsif_data.ifi_oerrors++;
819 }
820 } else {
821 if (!(flags & NFE_TX_LASTFRAG_V2(1 << 13)) && data->m == NULL((void *)0))
15
Assuming the condition is true
16
Assuming field 'm' is equal to NULL
17
Taking true branch
822 goto skip;
18
Control jumps to line 846
823
824 if ((flags & NFE_TX_ERROR_V20x5c04) != 0) {
825 printf("%s: tx v2 error %b\n",
826 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR"\020" "\14FORCEDINT\13LASTPACKET\12UNDERFLOW\10LOSTCARRIER\09DEFERRED\02RETRY");
827 ifp->if_oerrorsif_data.ifi_oerrors++;
828 }
829 }
830
831 if (data->m == NULL((void *)0)) { /* should not get there */
832 printf("%s: last fragment bit w/o associated mbuf!\n",
833 sc->sc_dev.dv_xname);
834 goto skip;
835 }
836
837 /* last fragment of the mbuf chain transmitted */
838 bus_dmamap_sync(sc->sc_dmat, data->active, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
active), (0), (data->active->dm_mapsize), (0x08))
839 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
active), (0), (data->active->dm_mapsize), (0x08))
;
840 bus_dmamap_unload(sc->sc_dmat, data->active)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->active))
;
841 m_freem(data->m);
842 data->m = NULL((void *)0);
843
844 ifp->if_timer = 0;
845
846skip: sc->txq.queued--;
847 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT256;
848 }
849
850 if (data
20.1
'data' is not equal to NULL
!= NULL((void *)0)) { /* at least one slot freed */
21
Taking true branch
851 ifq_clr_oactive(&ifp->if_snd);
852 nfe_start(ifp);
22
Calling 'nfe_start'
853 }
854}
855
856int
857nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
858{
859 struct nfe_desc32 *desc32;
860 struct nfe_desc64 *desc64;
29
'desc64' declared without an initial value
861 struct nfe_tx_data *data;
862 bus_dmamap_t map;
863 uint16_t flags = 0;
864 uint32_t vtag = 0;
865 int error, i, first = sc->txq.cur;
866
867 map = sc->txq.data[first].map;
868
869 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
map), (m0), (0x0001))
;
870 if (error != 0) {
30
Assuming 'error' is equal to 0
31
Taking false branch
871 printf("%s: can't map mbuf (error %d)\n",
872 sc->sc_dev.dv_xname, error);
873 return error;
874 }
875
876 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT256 - 1) {
32
Assuming the condition is false
33
Taking false branch
877 bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map
))
;
878 return ENOBUFS55;
879 }
880
881#if NVLAN1 > 0
882 /* setup h/w VLAN tagging */
883 if (m0->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020)
34
Assuming the condition is false
35
Taking false branch
884 vtag = NFE_TX_VTAG(1 << 18) | m0->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag;
885#endif
886 if (m0->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_IPV4_CSUM_OUT0x0001)
36
Assuming the condition is false
37
Taking false branch
887 flags |= NFE_TX_IP_CSUM(1 << 11);
888 if (m0->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & (M_TCP_CSUM_OUT0x0002 | M_UDP_CSUM_OUT0x0004))
38
Assuming the condition is false
39
Taking false branch
889 flags |= NFE_TX_TCP_UDP_CSUM(1 << 10);
890
891 for (i = 0; i < map->dm_nsegs; i++) {
40
Assuming 'i' is >= field 'dm_nsegs'
41
Loop condition is false. Execution continues on line 933
892 data = &sc->txq.data[sc->txq.cur];
893
894 if (sc->sc_flags & NFE_40BIT_ADDR0x02) {
895 desc64 = &sc->txq.desc64[sc->txq.cur];
896#if defined(__LP64__1)
897 desc64->physaddr[0] =
898 htole32(map->dm_segs[i].ds_addr >> 32)((__uint32_t)(map->dm_segs[i].ds_addr >> 32));
899#endif
900 desc64->physaddr[1] =
901 htole32(map->dm_segs[i].ds_addr & 0xffffffff)((__uint32_t)(map->dm_segs[i].ds_addr & 0xffffffff));
902 desc64->length = htole16(map->dm_segs[i].ds_len - 1)((__uint16_t)(map->dm_segs[i].ds_len - 1));
903 desc64->flags = htole16(flags)((__uint16_t)(flags));
904 desc64->vtag = htole32(vtag)((__uint32_t)(vtag));
905 } else {
906 desc32 = &sc->txq.desc32[sc->txq.cur];
907
908 desc32->physaddr = htole32(map->dm_segs[i].ds_addr)((__uint32_t)(map->dm_segs[i].ds_addr));
909 desc32->length = htole16(map->dm_segs[i].ds_len - 1)((__uint16_t)(map->dm_segs[i].ds_len - 1));
910 desc32->flags = htole16(flags)((__uint16_t)(flags));
911 }
912
913 if (map->dm_nsegs > 1) {
914 /*
915 * Checksum flags and vtag belong to the first fragment
916 * only.
917 */
918 flags &= ~(NFE_TX_IP_CSUM(1 << 11) | NFE_TX_TCP_UDP_CSUM(1 << 10));
919 vtag = 0;
920
921 /*
922 * Setting of the valid bit in the first descriptor is
923 * deferred until the whole chain is fully setup.
924 */
925 flags |= NFE_TX_VALID(1 << 15);
926 }
927
928 sc->txq.queued++;
929 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT256;
930 }
931
932 /* the whole mbuf chain has been setup */
933 if (sc->sc_flags & NFE_40BIT_ADDR0x02) {
42
Assuming the condition is true
43
Taking true branch
934 /* fix last descriptor */
935 flags |= NFE_TX_LASTFRAG_V2(1 << 13);
936 desc64->flags = htole16(flags)((__uint16_t)(flags));
44
Access to field 'flags' results in a dereference of an undefined pointer value (loaded from variable 'desc64')
937
938 /* finally, set the valid bit in the first descriptor */
939 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID)((__uint16_t)((1 << 15)));
940 } else {
941 /* fix last descriptor */
942 if (sc->sc_flags & NFE_JUMBO_SUP0x01)
943 flags |= NFE_TX_LASTFRAG_V2(1 << 13);
944 else
945 flags |= NFE_TX_LASTFRAG_V1(1 << 0);
946 desc32->flags = htole16(flags)((__uint16_t)(flags));
947
948 /* finally, set the valid bit in the first descriptor */
949 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID)((__uint16_t)((1 << 15)));
950 }
951
952 data->m = m0;
953 data->active = map;
954
955 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x04))
956 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x04))
;
957
958 return 0;
959}
960
961void
962nfe_start(struct ifnet *ifp)
963{
964 struct nfe_softc *sc = ifp->if_softc;
965 int old = sc->txq.cur;
966 struct mbuf *m0;
967
968 if (!(ifp->if_flags & IFF_RUNNING0x40) || ifq_is_oactive(&ifp->if_snd))
23
Assuming the condition is false
24
Taking false branch
969 return;
970
971 for (;;) {
25
Loop condition is true. Entering loop body
972 m0 = ifq_deq_begin(&ifp->if_snd);
973 if (m0 == NULL((void *)0))
26
Assuming 'm0' is not equal to NULL
27
Taking false branch
974 break;
975
976 if (nfe_encap(sc, m0) != 0) {
28
Calling 'nfe_encap'
977 ifq_deq_rollback(&ifp->if_snd, m0);
978 ifq_set_oactive(&ifp->if_snd);
979 break;
980 }
981
982 /* packet put in h/w queue, remove from s/w queue */
983 ifq_deq_commit(&ifp->if_snd, m0);
984
985#if NBPFILTER1 > 0
986 if (ifp->if_bpf != NULL((void *)0))
987 bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT(1 << 1));
988#endif
989 }
990 if (sc->txq.cur == old) /* nothing sent */
991 return;
992
993 if (sc->sc_flags & NFE_40BIT_ADDR0x02)
994 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE0x04);
995 else
996 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE0x04);
997
998 /* kick Tx */
999 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x144))
, ((0x0001 | sc->rxtxctl))))
;
1000
1001 /*
1002 * Set a timeout in case the chip goes out to lunch.
1003 */
1004 ifp->if_timer = 5;
1005}
1006
1007void
1008nfe_watchdog(struct ifnet *ifp)
1009{
1010 struct nfe_softc *sc = ifp->if_softc;
1011
1012 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1013
1014 nfe_init(ifp);
1015
1016 ifp->if_oerrorsif_data.ifi_oerrors++;
1017}
1018
1019int
1020nfe_init(struct ifnet *ifp)
1021{
1022 struct nfe_softc *sc = ifp->if_softc;
1023 uint32_t tmp;
1024
1025 nfe_stop(ifp, 0);
1026
1027 NFE_WRITE(sc, NFE_TX_UNK, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x10c))
, ((0))))
;
1028 NFE_WRITE(sc, NFE_STATUS, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x188))
, ((0))))
;
1029
1030 sc->rxtxctl = NFE_RXTX_BIT20x0004;
1031 if (sc->sc_flags & NFE_40BIT_ADDR0x02)
1032 sc->rxtxctl |= NFE_RXTX_V3MAGIC0x2200;
1033 else if (sc->sc_flags & NFE_JUMBO_SUP0x01)
1034 sc->rxtxctl |= NFE_RXTX_V2MAGIC0x2100;
1035
1036 if (sc->sc_flags & NFE_HW_CSUM0x04)
1037 sc->rxtxctl |= NFE_RXTX_RXCSUM0x0400;
1038 if (ifp->if_capabilitiesif_data.ifi_capabilities & IFCAP_VLAN_HWTAGGING0x00000020)
1039 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT0x0080 | NFE_RXTX_VTAG_STRIP0x0040;
1040
1041 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x144))
, ((0x0010 | sc->rxtxctl))))
;
1042 DELAY(10)(*delay_func)(10);
1043 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x144))
, ((sc->rxtxctl))))
;
1044
1045 if (ifp->if_capabilitiesif_data.ifi_capabilities & IFCAP_VLAN_HWTAGGING0x00000020)
1046 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x300))
, (((1 << 13)))))
;
1047 else
1048 NFE_WRITE(sc, NFE_VTAG_CTL, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x300))
, ((0))))
;
1049
1050 NFE_WRITE(sc, NFE_SETUP_R6, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x008))
, ((0))))
;
1051
1052 /* set MAC address */
1053 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
1054
1055 /* tell MAC where rings are in memory */
1056#ifdef __LP64__1
1057 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x14c))
, ((sc->rxq.physaddr >> 32))))
;
1058#endif
1059 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x104))
, ((sc->rxq.physaddr & 0xffffffff))))
;
1060#ifdef __LP64__1
1061 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x148))
, ((sc->txq.physaddr >> 32))))
;
1062#endif
1063 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x100))
, ((sc->txq.physaddr & 0xffffffff))))
;
1064
1065 NFE_WRITE(sc, NFE_RING_SIZE,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x108))
, (((128 - 1) << 16 | (256 - 1)))))
1066 (NFE_RX_RING_COUNT - 1) << 16 |(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x108))
, (((128 - 1) << 16 | (256 - 1)))))
1067 (NFE_TX_RING_COUNT - 1))(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x108))
, (((128 - 1) << 16 | (256 - 1)))))
;
1068
1069 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x090))
, ((sc->rxq.bufsz))))
;
1070
1071 /* force MAC to wakeup */
1072 tmp = NFE_READ(sc, NFE_PWR_STATE)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x26c)))
)
;
1073 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x26c))
, ((tmp | (1 << 15)))))
;
1074 DELAY(10)(*delay_func)(10);
1075 tmp = NFE_READ(sc, NFE_PWR_STATE)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x26c)))
)
;
1076 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x26c))
, ((tmp | (1 << 8)))))
;
1077
1078#if 1
1079 /* configure interrupts coalescing/mitigation */
1080 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00c))
, ((((128 * 100) / 1024)))))
;
1081#else
1082 /* no interrupt mitigation: one interrupt per packet */
1083 NFE_WRITE(sc, NFE_IMTIMER, 970)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00c))
, ((970))))
;
1084#endif
1085
1086 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x0a0))
, ((0x16070f))))
;
1087 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x0a4))
, ((0x16))))
;
1088 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x008))
, ((0x03))))
;
1089
1090 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1091 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x188))
, ((sc->mii_phyaddr << 24 | 0x140000))))
;
1092
1093 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x184))
, ((0x08))))
;
1094
1095 sc->rxtxctl &= ~NFE_RXTX_BIT20x0004;
1096 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x144))
, ((sc->rxtxctl))))
;
1097 DELAY(10)(*delay_func)(10);
1098 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x144))
, ((0x0002 | sc->rxtxctl))))
;
1099
1100 /* program promiscuous mode and multicast filters */
1101 nfe_iff(sc);
1102
1103 nfe_ifmedia_upd(ifp);
1104
1105 /* enable Rx */
1106 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x094))
, ((0x01))))
;
1107
1108 /* enable Tx */
1109 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x084))
, ((0x01))))
;
1110
1111 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x180))
, ((0xf))))
;
1112
1113 /* enable interrupts */
1114 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x004))
, (((0x0001 | 0x0004 | 0x0002 | 0x0008 | 0x0080 | 0x0010 | 0x0040
)))))
;
1115
1116 timeout_add_sec(&sc->sc_tick_ch, 1);
1117
1118 ifp->if_flags |= IFF_RUNNING0x40;
1119 ifq_clr_oactive(&ifp->if_snd);
1120
1121 return 0;
1122}
1123
1124void
1125nfe_stop(struct ifnet *ifp, int disable)
1126{
1127 struct nfe_softc *sc = ifp->if_softc;
1128
1129 timeout_del(&sc->sc_tick_ch);
1130
1131 ifp->if_timer = 0;
1132 ifp->if_flags &= ~IFF_RUNNING0x40;
1133 ifq_clr_oactive(&ifp->if_snd);
1134
1135 mii_down(&sc->sc_mii);
1136
1137 /* abort Tx */
1138 NFE_WRITE(sc, NFE_TX_CTL, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x084))
, ((0))))
;
1139
1140 if ((sc->sc_flags & NFE_WOL0x80) == 0) {
1141 /* disable Rx */
1142 NFE_WRITE(sc, NFE_RX_CTL, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x094))
, ((0))))
;
1143
1144 /* disable interrupts */
1145 NFE_WRITE(sc, NFE_IRQ_MASK, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x004))
, ((0))))
;
1146 }
1147
1148 /* reset Tx and Rx rings */
1149 nfe_reset_tx_ring(sc, &sc->txq);
1150 nfe_reset_rx_ring(sc, &sc->rxq);
1151}
1152
1153int
1154nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1155{
1156 struct nfe_desc32 *desc32;
1157 struct nfe_desc64 *desc64;
1158 struct nfe_rx_data *data;
1159 void **desc;
1160 bus_addr_t physaddr;
1161 int i, nsegs, error, descsize;
1162
1163 if (sc->sc_flags & NFE_40BIT_ADDR0x02) {
1164 desc = (void **)&ring->desc64;
1165 descsize = sizeof (struct nfe_desc64);
1166 } else {
1167 desc = (void **)&ring->desc32;
1168 descsize = sizeof (struct nfe_desc32);
1169 }
1170
1171 ring->cur = ring->next = 0;
1172 ring->bufsz = MCLBYTES(1 << 11);
1173
1174 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (128
* descsize), (1), (128 * descsize), (0), (0x0001), (&ring
->map))
1175 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (128
* descsize), (1), (128 * descsize), (0), (0x0001), (&ring
->map))
;
1176 if (error != 0) {
1177 printf("%s: could not create desc DMA map\n",
1178 sc->sc_dev.dv_xname);
1179 goto fail;
1180 }
1181
1182 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (128 *
descsize), ((1 << 12)), (0), (&ring->seg), (1),
(&nsegs), (0x0001 | 0x1000))
1183 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (128 *
descsize), ((1 << 12)), (0), (&ring->seg), (1),
(&nsegs), (0x0001 | 0x1000))
;
1184 if (error != 0) {
1185 printf("%s: could not allocate DMA memory\n",
1186 sc->sc_dev.dv_xname);
1187 goto fail;
1188 }
1189
1190 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&ring
->seg), (nsegs), (128 * descsize), ((caddr_t *)desc), (0x0001
))
1191 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&ring
->seg), (nsegs), (128 * descsize), ((caddr_t *)desc), (0x0001
))
;
1192 if (error != 0) {
1193 printf("%s: can't map desc DMA memory\n",
1194 sc->sc_dev.dv_xname);
1195 goto fail;
1196 }
1197
1198 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (ring->
map), (*desc), (128 * descsize), (((void *)0)), (0x0001))
1199 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (ring->
map), (*desc), (128 * descsize), (((void *)0)), (0x0001))
;
1200 if (error != 0) {
1201 printf("%s: could not load desc DMA map\n",
1202 sc->sc_dev.dv_xname);
1203 goto fail;
1204 }
1205 ring->physaddr = ring->map->dm_segs[0].ds_addr;
1206
1207 /*
1208 * Pre-allocate Rx buffers and populate Rx ring.
1209 */
1210 for (i = 0; i < NFE_RX_RING_COUNT128; i++) {
1211 data = &sc->rxq.data[i];
1212
1213 data->m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES)m_clget((((void *)0)), (0x0002), ((1 << 11)));
1214 if (data->m == NULL((void *)0)) {
1215 printf("%s: could not allocate rx mbuf\n",
1216 sc->sc_dev.dv_xname);
1217 error = ENOMEM12;
1218 goto fail;
1219 }
1220 data->m->m_pkthdrM_dat.MH.MH_pkthdr.len = data->m->m_lenm_hdr.mh_len = MCLBYTES(1 << 11);
1221
1222 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0001), (&data->
map))
1223 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0001), (&data->
map))
;
1224 if (error != 0) {
1225 printf("%s: could not create DMA map\n",
1226 sc->sc_dev.dv_xname);
1227 goto fail;
1228 }
1229
1230 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, data->m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (data->m), (0x0200 | 0x0001))
1231 BUS_DMA_READ | BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (data->m), (0x0200 | 0x0001))
;
1232 if (error != 0) {
1233 printf("%s: could not load rx buf DMA map",
1234 sc->sc_dev.dv_xname);
1235 goto fail;
1236 }
1237 physaddr = data->map->dm_segs[0].ds_addr;
1238
1239 if (sc->sc_flags & NFE_40BIT_ADDR0x02) {
1240 desc64 = &sc->rxq.desc64[i];
1241#if defined(__LP64__1)
1242 desc64->physaddr[0] = htole32(physaddr >> 32)((__uint32_t)(physaddr >> 32));
1243#endif
1244 desc64->physaddr[1] = htole32(physaddr & 0xffffffff)((__uint32_t)(physaddr & 0xffffffff));
1245 desc64->length = htole16(sc->rxq.bufsz)((__uint16_t)(sc->rxq.bufsz));
1246 desc64->flags = htole16(NFE_RX_READY)((__uint16_t)((1 << 15)));
1247 } else {
1248 desc32 = &sc->rxq.desc32[i];
1249 desc32->physaddr = htole32(physaddr)((__uint32_t)(physaddr));
1250 desc32->length = htole16(sc->rxq.bufsz)((__uint16_t)(sc->rxq.bufsz));
1251 desc32->flags = htole16(NFE_RX_READY)((__uint16_t)((1 << 15)));
1252 }
1253 }
1254
1255 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
map), (0), (ring->map->dm_mapsize), (0x04))
1256 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
map), (0), (ring->map->dm_mapsize), (0x04))
;
1257
1258 return 0;
1259
1260fail: nfe_free_rx_ring(sc, ring);
1261 return error;
1262}
1263
1264void
1265nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1266{
1267 int i;
1268
1269 for (i = 0; i < NFE_RX_RING_COUNT128; i++) {
1270 if (sc->sc_flags & NFE_40BIT_ADDR0x02) {
1271 ring->desc64[i].length = htole16(ring->bufsz)((__uint16_t)(ring->bufsz));
1272 ring->desc64[i].flags = htole16(NFE_RX_READY)((__uint16_t)((1 << 15)));
1273 } else {
1274 ring->desc32[i].length = htole16(ring->bufsz)((__uint16_t)(ring->bufsz));
1275 ring->desc32[i].flags = htole16(NFE_RX_READY)((__uint16_t)((1 << 15)));
1276 }
1277 }
1278
1279 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
map), (0), (ring->map->dm_mapsize), (0x04))
1280 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
map), (0), (ring->map->dm_mapsize), (0x04))
;
1281
1282 ring->cur = ring->next = 0;
1283}
1284
1285void
1286nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1287{
1288 struct nfe_rx_data *data;
1289 void *desc;
1290 int i, descsize;
1291
1292 if (sc->sc_flags & NFE_40BIT_ADDR0x02) {
1293 desc = ring->desc64;
1294 descsize = sizeof (struct nfe_desc64);
1295 } else {
1296 desc = ring->desc32;
1297 descsize = sizeof (struct nfe_desc32);
1298 }
1299
1300 if (desc != NULL((void *)0)) {
1301 bus_dmamap_sync(sc->sc_dmat, ring->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
map), (0), (ring->map->dm_mapsize), (0x08))
1302 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
map), (0), (ring->map->dm_mapsize), (0x08))
;
1303 bus_dmamap_unload(sc->sc_dmat, ring->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (ring
->map))
;
1304 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)desc), (128 * descsize))
1305 NFE_RX_RING_COUNT * descsize)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)desc), (128 * descsize))
;
1306 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
ring->seg), (1))
;
1307 }
1308
1309 for (i = 0; i < NFE_RX_RING_COUNT128; i++) {
1310 data = &ring->data[i];
1311
1312 if (data->map != NULL((void *)0)) {
1313 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x02))
1314 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x02))
;
1315 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
1316 bus_dmamap_destroy(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (data
->map))
;
1317 }
1318 m_freem(data->m);
1319 }
1320}
1321
1322int
1323nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1324{
1325 int i, nsegs, error;
1326 void **desc;
1327 int descsize;
1328
1329 if (sc->sc_flags & NFE_40BIT_ADDR0x02) {
1330 desc = (void **)&ring->desc64;
1331 descsize = sizeof (struct nfe_desc64);
1332 } else {
1333 desc = (void **)&ring->desc32;
1334 descsize = sizeof (struct nfe_desc32);
1335 }
1336
1337 ring->queued = 0;
1338 ring->cur = ring->next = 0;
1339
1340 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (256
* descsize), (1), (256 * descsize), (0), (0x0001), (&ring
->map))
1341 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (256
* descsize), (1), (256 * descsize), (0), (0x0001), (&ring
->map))
;
1342
1343 if (error != 0) {
1344 printf("%s: could not create desc DMA map\n",
1345 sc->sc_dev.dv_xname);
1346 goto fail;
1347 }
1348
1349 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (256 *
descsize), ((1 << 12)), (0), (&ring->seg), (1),
(&nsegs), (0x0001 | 0x1000))
1350 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (256 *
descsize), ((1 << 12)), (0), (&ring->seg), (1),
(&nsegs), (0x0001 | 0x1000))
;
1351 if (error != 0) {
1352 printf("%s: could not allocate DMA memory\n",
1353 sc->sc_dev.dv_xname);
1354 goto fail;
1355 }
1356
1357 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&ring
->seg), (nsegs), (256 * descsize), ((caddr_t *)desc), (0x0001
))
1358 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&ring
->seg), (nsegs), (256 * descsize), ((caddr_t *)desc), (0x0001
))
;
1359 if (error != 0) {
1360 printf("%s: can't map desc DMA memory\n",
1361 sc->sc_dev.dv_xname);
1362 goto fail;
1363 }
1364
1365 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (ring->
map), (*desc), (256 * descsize), (((void *)0)), (0x0001))
1366 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (ring->
map), (*desc), (256 * descsize), (((void *)0)), (0x0001))
;
1367 if (error != 0) {
1368 printf("%s: could not load desc DMA map\n",
1369 sc->sc_dev.dv_xname);
1370 goto fail;
1371 }
1372 ring->physaddr = ring->map->dm_segs[0].ds_addr;
1373
1374 for (i = 0; i < NFE_TX_RING_COUNT256; i++) {
1375 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((9018
+ 2)), ((256 - 2)), ((9018 + 2)), (0), (0x0001), (&ring->
data[i].map))
1376 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((9018
+ 2)), ((256 - 2)), ((9018 + 2)), (0), (0x0001), (&ring->
data[i].map))
1377 &ring->data[i].map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((9018
+ 2)), ((256 - 2)), ((9018 + 2)), (0), (0x0001), (&ring->
data[i].map))
;
1378 if (error != 0) {
1379 printf("%s: could not create DMA map\n",
1380 sc->sc_dev.dv_xname);
1381 goto fail;
1382 }
1383 }
1384
1385 return 0;
1386
1387fail: nfe_free_tx_ring(sc, ring);
1388 return error;
1389}
1390
1391void
1392nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1393{
1394 struct nfe_tx_data *data;
1395 int i;
1396
1397 for (i = 0; i < NFE_TX_RING_COUNT256; i++) {
1398 if (sc->sc_flags & NFE_40BIT_ADDR0x02)
1399 ring->desc64[i].flags = 0;
1400 else
1401 ring->desc32[i].flags = 0;
1402
1403 data = &ring->data[i];
1404
1405 if (data->m != NULL((void *)0)) {
1406 bus_dmamap_sync(sc->sc_dmat, data->active, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
active), (0), (data->active->dm_mapsize), (0x08))
1407 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
active), (0), (data->active->dm_mapsize), (0x08))
;
1408 bus_dmamap_unload(sc->sc_dmat, data->active)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->active))
;
1409 m_freem(data->m);
1410 data->m = NULL((void *)0);
1411 }
1412 }
1413
1414 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
map), (0), (ring->map->dm_mapsize), (0x04))
1415 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
map), (0), (ring->map->dm_mapsize), (0x04))
;
1416
1417 ring->queued = 0;
1418 ring->cur = ring->next = 0;
1419}
1420
1421void
1422nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1423{
1424 struct nfe_tx_data *data;
1425 void *desc;
1426 int i, descsize;
1427
1428 if (sc->sc_flags & NFE_40BIT_ADDR0x02) {
1429 desc = ring->desc64;
1430 descsize = sizeof (struct nfe_desc64);
1431 } else {
1432 desc = ring->desc32;
1433 descsize = sizeof (struct nfe_desc32);
1434 }
1435
1436 if (desc != NULL((void *)0)) {
1437 bus_dmamap_sync(sc->sc_dmat, ring->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
map), (0), (ring->map->dm_mapsize), (0x08))
1438 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
map), (0), (ring->map->dm_mapsize), (0x08))
;
1439 bus_dmamap_unload(sc->sc_dmat, ring->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (ring
->map))
;
1440 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)desc), (256 * descsize))
1441 NFE_TX_RING_COUNT * descsize)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)desc), (256 * descsize))
;
1442 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
ring->seg), (1))
;
1443 }
1444
1445 for (i = 0; i < NFE_TX_RING_COUNT256; i++) {
1446 data = &ring->data[i];
1447
1448 if (data->m != NULL((void *)0)) {
1449 bus_dmamap_sync(sc->sc_dmat, data->active, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
active), (0), (data->active->dm_mapsize), (0x08))
1450 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
active), (0), (data->active->dm_mapsize), (0x08))
;
1451 bus_dmamap_unload(sc->sc_dmat, data->active)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->active))
;
1452 m_freem(data->m);
1453 }
1454 }
1455
1456 /* ..and now actually destroy the DMA mappings */
1457 for (i = 0; i < NFE_TX_RING_COUNT256; i++) {
1458 data = &ring->data[i];
1459 if (data->map == NULL((void *)0))
1460 continue;
1461 bus_dmamap_destroy(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (data
->map))
;
1462 }
1463}
1464
1465int
1466nfe_ifmedia_upd(struct ifnet *ifp)
1467{
1468 struct nfe_softc *sc = ifp->if_softc;
1469 struct mii_data *mii = &sc->sc_mii;
1470 struct mii_softc *miisc;
1471
1472 if (mii->mii_instance != 0) {
1473 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)for((miisc) = ((&mii->mii_phys)->lh_first); (miisc)
!= ((void *)0); (miisc) = ((miisc)->mii_list.le_next))
1474 mii_phy_reset(miisc);
1475 }
1476 return mii_mediachg(mii);
1477}
1478
1479void
1480nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1481{
1482 struct nfe_softc *sc = ifp->if_softc;
1483 struct mii_data *mii = &sc->sc_mii;
1484
1485 mii_pollstat(mii);
1486 ifmr->ifm_status = mii->mii_media_status;
1487 ifmr->ifm_active = mii->mii_media_active;
1488}
1489
1490void
1491nfe_iff(struct nfe_softc *sc)
1492{
1493 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1494 struct arpcom *ac = &sc->sc_arpcom;
1495 struct ether_multi *enm;
1496 struct ether_multistep step;
1497 uint8_t addr[ETHER_ADDR_LEN6], mask[ETHER_ADDR_LEN6];
1498 uint32_t filter;
1499 int i;
1500
1501 filter = NFE_RXFILTER_MAGIC0x007f0008;
1502 ifp->if_flags &= ~IFF_ALLMULTI0x200;
1503
1504 if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0) {
1505 ifp->if_flags |= IFF_ALLMULTI0x200;
1506 if (ifp->if_flags & IFF_PROMISC0x100)
1507 filter |= NFE_PROMISC(1 << 7);
1508 else
1509 filter |= NFE_U2M(1 << 5);
1510 bzero(addr, ETHER_ADDR_LEN)__builtin_bzero((addr), (6));
1511 bzero(mask, ETHER_ADDR_LEN)__builtin_bzero((mask), (6));
1512 } else {
1513 filter |= NFE_U2M(1 << 5);
1514
1515 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN6);
1516 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN6);
1517
1518 ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first
); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step
)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while
( 0)
;
1519 while (enm != NULL((void *)0)) {
1520 for (i = 0; i < ETHER_ADDR_LEN6; i++) {
1521 addr[i] &= enm->enm_addrlo[i];
1522 mask[i] &= ~enm->enm_addrlo[i];
1523 }
1524
1525 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
1526 }
1527
1528 for (i = 0; i < ETHER_ADDR_LEN6; i++)
1529 mask[i] |= addr[i];
1530 }
1531
1532 addr[0] |= 0x01; /* make sure multicast bit is set */
1533
1534 NFE_WRITE(sc, NFE_MULTIADDR_HI,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x0b0))
, ((addr[3] << 24 | addr[2] << 16 | addr[1] <<
8 | addr[0]))))
1535 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0])(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x0b0))
, ((addr[3] << 24 | addr[2] << 16 | addr[1] <<
8 | addr[0]))))
;
1536 NFE_WRITE(sc, NFE_MULTIADDR_LO,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x0b4))
, ((addr[5] << 8 | addr[4]))))
1537 addr[5] << 8 | addr[4])(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x0b4))
, ((addr[5] << 8 | addr[4]))))
;
1538 NFE_WRITE(sc, NFE_MULTIMASK_HI,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x0b8))
, ((mask[3] << 24 | mask[2] << 16 | mask[1] <<
8 | mask[0]))))
1539 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0])(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x0b8))
, ((mask[3] << 24 | mask[2] << 16 | mask[1] <<
8 | mask[0]))))
;
1540 NFE_WRITE(sc, NFE_MULTIMASK_LO,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x0bc))
, ((mask[5] << 8 | mask[4]))))
1541 mask[5] << 8 | mask[4])(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x0bc))
, ((mask[5] << 8 | mask[4]))))
;
1542 NFE_WRITE(sc, NFE_RXFILTER, filter)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x08c))
, ((filter))))
;
1543}
1544
1545void
1546nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1547{
1548 uint32_t tmp;
1549
1550 if (sc->sc_flags & NFE_CORRECT_MACADDR0x20) {
1551 tmp = NFE_READ(sc, NFE_MACADDR_HI)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x0a8)))
)
;
1552 addr[0] = (tmp & 0xff);
1553 addr[1] = (tmp >> 8) & 0xff;
1554 addr[2] = (tmp >> 16) & 0xff;
1555 addr[3] = (tmp >> 24) & 0xff;
1556
1557 tmp = NFE_READ(sc, NFE_MACADDR_LO)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x0ac)))
)
;
1558 addr[4] = (tmp & 0xff);
1559 addr[5] = (tmp >> 8) & 0xff;
1560
1561 } else {
1562 tmp = NFE_READ(sc, NFE_MACADDR_LO)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x0ac)))
)
;
1563 addr[0] = (tmp >> 8) & 0xff;
1564 addr[1] = (tmp & 0xff);
1565
1566 tmp = NFE_READ(sc, NFE_MACADDR_HI)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x0a8)))
)
;
1567 addr[2] = (tmp >> 24) & 0xff;
1568 addr[3] = (tmp >> 16) & 0xff;
1569 addr[4] = (tmp >> 8) & 0xff;
1570 addr[5] = (tmp & 0xff);
1571 }
1572}
1573
1574void
1575nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1576{
1577 NFE_WRITE(sc, NFE_MACADDR_LO,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x0ac))
, ((addr[5] << 8 | addr[4]))))
1578 addr[5] << 8 | addr[4])(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x0ac))
, ((addr[5] << 8 | addr[4]))))
;
1579 NFE_WRITE(sc, NFE_MACADDR_HI,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x0a8))
, ((addr[3] << 24 | addr[2] << 16 | addr[1] <<
8 | addr[0]))))
1580 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0])(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x0a8))
, ((addr[3] << 24 | addr[2] << 16 | addr[1] <<
8 | addr[0]))))
;
1581}
1582
1583void
1584nfe_tick(void *arg)
1585{
1586 struct nfe_softc *sc = arg;
1587 int s;
1588
1589 s = splnet()splraise(0x4);
1590 mii_tick(&sc->sc_mii);
1591 splx(s)spllower(s);
1592
1593 timeout_add_sec(&sc->sc_tick_ch, 1);
1594}
1595
1596#ifndef SMALL_KERNEL
1597int
1598nfe_wol(struct ifnet *ifp, int enable)
1599{
1600 struct nfe_softc *sc = ifp->if_softc;
1601
1602 if (enable) {
1603 sc->sc_flags |= NFE_WOL0x80;
1604 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x200))
, ((0x1111))))
;
1605 } else {
1606 sc->sc_flags &= ~NFE_WOL0x80;
1607 NFE_WRITE(sc, NFE_WOL_CTL, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x200))
, ((0))))
;
1608 }
1609
1610 return 0;
1611}
1612#endif