Bug Summary

File:dev/pci/if_jme.c
Warning:line 188, column 9
Although the value stored to 'val' is used in the enclosing expression, the value is never actually read from 'val'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name if_jme.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/if_jme.c
1/* $OpenBSD: if_jme.c,v 1.55 2022/01/09 05:42:54 jsg Exp $ */
2/*-
3 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
29 * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.7 2008/09/13 04:04:39 sephe Exp $
30 */
31
32#include "bpfilter.h"
33#include "vlan.h"
34
35#include <sys/param.h>
36#include <sys/endian.h>
37#include <sys/systm.h>
38#include <sys/sockio.h>
39#include <sys/mbuf.h>
40#include <sys/queue.h>
41#include <sys/kernel.h>
42#include <sys/device.h>
43#include <sys/timeout.h>
44#include <sys/socket.h>
45
46#include <machine/bus.h>
47
48#include <net/if.h>
49#include <net/if_dl.h>
50#include <net/if_media.h>
51
52#include <netinet/in.h>
53#include <netinet/if_ether.h>
54
55#if NBPFILTER1 > 0
56#include <net/bpf.h>
57#endif
58
59#include <dev/mii/miivar.h>
60#include <dev/mii/jmphyreg.h>
61
62#include <dev/pci/pcireg.h>
63#include <dev/pci/pcivar.h>
64#include <dev/pci/pcidevs.h>
65
66#include <dev/pci/if_jmereg.h>
67#include <dev/pci/if_jmevar.h>
68
69/* Define the following to disable printing Rx errors. */
70#undef JME_SHOW_ERRORS
71
72int jme_match(struct device *, void *, void *);
73void jme_map_intr_vector(struct jme_softc *);
74void jme_attach(struct device *, struct device *, void *);
75int jme_detach(struct device *, int);
76
77int jme_miibus_readreg(struct device *, int, int);
78void jme_miibus_writereg(struct device *, int, int, int);
79void jme_miibus_statchg(struct device *);
80
81int jme_init(struct ifnet *);
82int jme_ioctl(struct ifnet *, u_long, caddr_t);
83
84void jme_start(struct ifnet *);
85void jme_watchdog(struct ifnet *);
86void jme_mediastatus(struct ifnet *, struct ifmediareq *);
87int jme_mediachange(struct ifnet *);
88
89int jme_intr(void *);
90void jme_txeof(struct jme_softc *);
91void jme_rxeof(struct jme_softc *);
92
93int jme_dma_alloc(struct jme_softc *);
94void jme_dma_free(struct jme_softc *);
95int jme_init_rx_ring(struct jme_softc *);
96void jme_init_tx_ring(struct jme_softc *);
97void jme_init_ssb(struct jme_softc *);
98int jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
99int jme_encap(struct jme_softc *, struct mbuf *);
100void jme_rxpkt(struct jme_softc *);
101
102void jme_tick(void *);
103void jme_stop(struct jme_softc *);
104void jme_reset(struct jme_softc *);
105void jme_set_vlan(struct jme_softc *);
106void jme_iff(struct jme_softc *);
107void jme_stop_tx(struct jme_softc *);
108void jme_stop_rx(struct jme_softc *);
109void jme_mac_config(struct jme_softc *);
110void jme_reg_macaddr(struct jme_softc *, uint8_t[]);
111int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
112int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
113void jme_discard_rxbufs(struct jme_softc *, int, int);
114#ifdef notyet
115void jme_setwol(struct jme_softc *);
116void jme_setlinkspeed(struct jme_softc *);
117#endif
118
119/*
120 * Devices supported by this driver.
121 */
122const struct pci_matchid jme_devices[] = {
123 { PCI_VENDOR_JMICRON0x197b, PCI_PRODUCT_JMICRON_JMC2500x0250 },
124 { PCI_VENDOR_JMICRON0x197b, PCI_PRODUCT_JMICRON_JMC2600x0260 }
125};
126
127struct cfattach jme_ca = {
128 sizeof (struct jme_softc), jme_match, jme_attach
129};
130
131struct cfdriver jme_cd = {
132 NULL((void *)0), "jme", DV_IFNET
133};
134
135int jmedebug = 0;
136#define DPRINTF(x)do { if (jmedebug) printf x; } while (0) do { if (jmedebug) printf x; } while (0)
137
138/*
139 * Read a PHY register on the MII of the JMC250.
140 */
141int
142jme_miibus_readreg(struct device *dev, int phy, int reg)
143{
144 struct jme_softc *sc = (struct jme_softc *)dev;
145 uint32_t val;
146 int i;
147
148 /* For FPGA version, PHY address 0 should be ignored. */
149 if ((sc->jme_caps & JME_CAP_FPGA0x0001) && phy == 0)
150 return (0);
151
152 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0050
)), ((0x00000000 | 0x00000010 | (((phy) << 6) & 0x000007C0
) | (((reg) << 11) & 0x0000F800)))))
153 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg))(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0050
)), ((0x00000000 | 0x00000010 | (((phy) << 6) & 0x000007C0
) | (((reg) << 11) & 0x0000F800)))))
;
154
155 for (i = JME_PHY_TIMEOUT1000; i > 0; i--) {
156 DELAY(1)(*delay_func)(1);
157 if (((val = CSR_READ_4(sc, JME_SMI)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0050
))))
) & SMI_OP_EXECUTE0x00000010) == 0)
158 break;
159 }
160 if (i == 0) {
161 printf("%s: phy read timeout: phy %d, reg %d\n",
162 sc->sc_dev.dv_xname, phy, reg);
163 return (0);
164 }
165
166 return ((val & SMI_DATA_MASK0xFFFF0000) >> SMI_DATA_SHIFT16);
167}
168
169/*
170 * Write a PHY register on the MII of the JMC250.
171 */
172void
173jme_miibus_writereg(struct device *dev, int phy, int reg, int val)
174{
175 struct jme_softc *sc = (struct jme_softc *)dev;
176 int i;
177
178 /* For FPGA version, PHY address 0 should be ignored. */
179 if ((sc->jme_caps & JME_CAP_FPGA0x0001) && phy == 0)
180 return;
181
182 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0050
)), ((0x00000020 | 0x00000010 | ((val << 16) & 0xFFFF0000
) | (((phy) << 6) & 0x000007C0) | (((reg) << 11
) & 0x0000F800)))))
183 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0050
)), ((0x00000020 | 0x00000010 | ((val << 16) & 0xFFFF0000
) | (((phy) << 6) & 0x000007C0) | (((reg) << 11
) & 0x0000F800)))))
184 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg))(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0050
)), ((0x00000020 | 0x00000010 | ((val << 16) & 0xFFFF0000
) | (((phy) << 6) & 0x000007C0) | (((reg) << 11
) & 0x0000F800)))))
;
185
186 for (i = JME_PHY_TIMEOUT1000; i > 0; i--) {
187 DELAY(1)(*delay_func)(1);
188 if (((val = CSR_READ_4(sc, JME_SMI)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0050
))))
) & SMI_OP_EXECUTE0x00000010) == 0)
Although the value stored to 'val' is used in the enclosing expression, the value is never actually read from 'val'
189 break;
190 }
191 if (i == 0) {
192 printf("%s: phy write timeout: phy %d, reg %d\n",
193 sc->sc_dev.dv_xname, phy, reg);
194 }
195}
196
197/*
198 * Callback from MII layer when media changes.
199 */
200void
201jme_miibus_statchg(struct device *dev)
202{
203 struct jme_softc *sc = (struct jme_softc *)dev;
204 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
205 struct mii_data *mii;
206 struct jme_txdesc *txd;
207 bus_addr_t paddr;
208 int i;
209
210 if ((ifp->if_flags & IFF_RUNNING0x40) == 0)
211 return;
212
213 mii = &sc->sc_miibus;
214
215 sc->jme_flags &= ~JME_FLAG_LINK0x0008;
216 if ((mii->mii_media_status & IFM_AVALID0x0000000000000001ULL) != 0) {
217 switch (IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL)) {
218 case IFM_10_T3:
219 case IFM_100_TX6:
220 sc->jme_flags |= JME_FLAG_LINK0x0008;
221 break;
222 case IFM_1000_T16:
223 if (sc->jme_caps & JME_CAP_FASTETH0x0008)
224 break;
225 sc->jme_flags |= JME_FLAG_LINK0x0008;
226 break;
227 default:
228 break;
229 }
230 }
231
232 /*
233 * Disabling Rx/Tx MACs have a side-effect of resetting
234 * JME_TXNDA/JME_RXNDA register to the first address of
235 * Tx/Rx descriptor address. So driver should reset its
236 * internal producer/consumer pointer and reclaim any
237 * allocated resources. Note, just saving the value of
238 * JME_TXNDA and JME_RXNDA registers before stopping MAC
239 * and restoring JME_TXNDA/JME_RXNDA register is not
240 * sufficient to make sure correct MAC state because
241 * stopping MAC operation can take a while and hardware
242 * might have updated JME_TXNDA/JME_RXNDA registers
243 * during the stop operation.
244 */
245
246 /* Disable interrupts */
247 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x082C
)), ((((0x02000000 | 0x01000000 | 0x00800000 | 0x00400000) | 0x00200000
| (0x00100000 | 0x00080000 | 0x00040000 | 0x00020000) | 0x00010000
| (0x00001000 | 0x00002000 | 0x00004000 | 0x00008000))))))
;
248
249 /* Stop driver */
250 ifp->if_flags &= ~IFF_RUNNING0x40;
251 ifq_clr_oactive(&ifp->if_snd);
252 ifp->if_timer = 0;
253 timeout_del(&sc->jme_tick_ch);
254
255 /* Stop receiver/transmitter. */
256 jme_stop_rx(sc);
257 jme_stop_tx(sc);
258
259 jme_rxeof(sc);
260 m_freem(sc->jme_cdata.jme_rxhead);
261 JME_RXCHAIN_RESET(sc)do { (sc)->jme_cdata.jme_rxhead = ((void *)0); (sc)->jme_cdata
.jme_rxtail = ((void *)0); (sc)->jme_cdata.jme_rxlen = 0; }
while (0)
;
262
263 jme_txeof(sc);
264 if (sc->jme_cdata.jme_tx_cnt != 0) {
265 /* Remove queued packets for transmit. */
266 for (i = 0; i < JME_TX_RING_CNT384; i++) {
267 txd = &sc->jme_cdata.jme_txdesc[i];
268 if (txd->tx_m != NULL((void *)0)) {
269 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txd
->tx_dmamap))
;
270 m_freem(txd->tx_m);
271 txd->tx_m = NULL((void *)0);
272 txd->tx_ndesc = 0;
273 ifp->if_oerrorsif_data.ifi_oerrors++;
274 }
275 }
276 }
277
278 /*
279 * Reuse configured Rx descriptors and reset
280 * producer/consumer index.
281 */
282 sc->jme_cdata.jme_rx_cons = 0;
283
284 jme_init_tx_ring(sc);
285
286 /* Initialize shadow status block. */
287 jme_init_ssb(sc);
288
289 /* Program MAC with resolved speed/duplex/flow-control. */
290 if (sc->jme_flags & JME_FLAG_LINK0x0008) {
291 jme_mac_config(sc);
292
293 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0020
)), ((sc->jme_rxcsr))))
;
294 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0000
)), ((sc->jme_txcsr))))
;
295
296 /* Set Tx ring address to the hardware. */
297 paddr = JME_TX_RING_ADDR(sc, 0)((sc)->jme_rdata.jme_tx_ring_paddr + sizeof(struct jme_desc
) * (0))
;
298 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr))(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0008
)), ((((uint64_t) (paddr) >> 32)))))
;
299 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr))(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0004
)), ((((uint64_t) (paddr) & 0xFFFFFFFF)))))
;
300
301 /* Set Rx ring address to the hardware. */
302 paddr = JME_RX_RING_ADDR(sc, 0)((sc)->jme_rdata.jme_rx_ring_paddr + sizeof(struct jme_desc
) * (0))
;
303 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr))(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0028
)), ((((uint64_t) (paddr) >> 32)))))
;
304 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr))(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0024
)), ((((uint64_t) (paddr) & 0xFFFFFFFF)))))
;
305
306 /* Restart receiver/transmitter. */
307 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0020
)), ((sc->jme_rxcsr | 0x00000001 | 0x00000004))))
308 RXCSR_RXQ_START)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0020
)), ((sc->jme_rxcsr | 0x00000001 | 0x00000004))))
;
309 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0000
)), ((sc->jme_txcsr | 0x00000001))))
;
310 }
311
312 ifp->if_flags |= IFF_RUNNING0x40;
313 ifq_clr_oactive(&ifp->if_snd);
314 timeout_add_sec(&sc->jme_tick_ch, 1);
315
316 /* Reenable interrupts. */
317 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0828
)), ((((0x02000000 | 0x01000000 | 0x00800000 | 0x00400000) | 0x00200000
| (0x00100000 | 0x00080000 | 0x00040000 | 0x00020000) | 0x00010000
| (0x00001000 | 0x00002000 | 0x00004000 | 0x00008000))))))
;
318}
319
320/*
321 * Get the current interface media status.
322 */
323void
324jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
325{
326 struct jme_softc *sc = ifp->if_softc;
327 struct mii_data *mii = &sc->sc_miibus;
328
329 mii_pollstat(mii);
330 ifmr->ifm_status = mii->mii_media_status;
331 ifmr->ifm_active = mii->mii_media_active;
332}
333
334/*
335 * Set hardware to newly-selected media.
336 */
337int
338jme_mediachange(struct ifnet *ifp)
339{
340 struct jme_softc *sc = ifp->if_softc;
341 struct mii_data *mii = &sc->sc_miibus;
342 int error;
343
344 if (mii->mii_instance != 0) {
345 struct mii_softc *miisc;
346
347 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)for((miisc) = ((&mii->mii_phys)->lh_first); (miisc)
!= ((void *)0); (miisc) = ((miisc)->mii_list.le_next))
348 mii_phy_reset(miisc);
349 }
350 error = mii_mediachg(mii);
351
352 return (error);
353}
354
355int
356jme_match(struct device *dev, void *match, void *aux)
357{
358 return pci_matchbyid((struct pci_attach_args *)aux, jme_devices,
359 sizeof (jme_devices) / sizeof (jme_devices[0]));
360}
361
362int
363jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
364{
365 uint32_t reg;
366 int i;
367
368 *val = 0;
369 for (i = JME_TIMEOUT1000; i > 0; i--) {
370 reg = CSR_READ_4(sc, JME_SMBCSR)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0440
))))
;
371 if ((reg & SMBCSR_HW_BUSY_MASK0x0000000F) == SMBCSR_HW_IDLE0x00000000)
372 break;
373 DELAY(1)(*delay_func)(1);
374 }
375
376 if (i == 0) {
377 printf("%s: EEPROM idle timeout!\n", sc->sc_dev.dv_xname);
378 return (ETIMEDOUT60);
379 }
380
381 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT8) & SMBINTF_ADDR_MASK0x0000FF00;
382 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0444
)), ((reg | 0x00000020 | 0x00000010))))
;
383 for (i = JME_TIMEOUT1000; i > 0; i--) {
384 DELAY(1)(*delay_func)(1);
385 reg = CSR_READ_4(sc, JME_SMBINTF)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0444
))))
;
386 if ((reg & SMBINTF_CMD_TRIGGER0x00000010) == 0)
387 break;
388 }
389
390 if (i == 0) {
391 printf("%s: EEPROM read timeout!\n", sc->sc_dev.dv_xname);
392 return (ETIMEDOUT60);
393 }
394
395 reg = CSR_READ_4(sc, JME_SMBINTF)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0444
))))
;
396 *val = (reg & SMBINTF_RD_DATA_MASK0xFF000000) >> SMBINTF_RD_DATA_SHIFT24;
397
398 return (0);
399}
400
401int
402jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
403{
404 uint8_t fup, reg, val;
405 uint32_t offset;
406 int match;
407
408 offset = 0;
409 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
410 fup != JME_EEPROM_SIG00x55)
411 return (ENOENT2);
412 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
413 fup != JME_EEPROM_SIG10xAA)
414 return (ENOENT2);
415 match = 0;
416 do {
417 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
418 break;
419 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1)((((0) & 0x70) << 4) | (((1) & 0x0F) << 0
))
==
420 (fup & (JME_EEPROM_FUNC_MASK0x70 | JME_EEPROM_PAGE_MASK0x0F))) {
421 if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
422 break;
423 if (reg >= JME_PAR00x0038 &&
424 reg < JME_PAR00x0038 + ETHER_ADDR_LEN6) {
425 if (jme_eeprom_read_byte(sc, offset + 2,
426 &val) != 0)
427 break;
428 eaddr[reg - JME_PAR00x0038] = val;
429 match++;
430 }
431 }
432 /* Check for the end of EEPROM descriptor. */
433 if ((fup & JME_EEPROM_DESC_END0x80) == JME_EEPROM_DESC_END0x80)
434 break;
435 /* Try next eeprom descriptor. */
436 offset += JME_EEPROM_DESC_BYTES3;
437 } while (match != ETHER_ADDR_LEN6 && offset < JME_EEPROM_END0xFF);
438
439 if (match == ETHER_ADDR_LEN6)
440 return (0);
441
442 return (ENOENT2);
443}
444
445void
446jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
447{
448 uint32_t par0, par1;
449
450 /* Read station address. */
451 par0 = CSR_READ_4(sc, JME_PAR0)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0038
))))
;
452 par1 = CSR_READ_4(sc, JME_PAR1)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x003C
))))
;
453 par1 &= 0xFFFF;
454
455 eaddr[0] = (par0 >> 0) & 0xFF;
456 eaddr[1] = (par0 >> 8) & 0xFF;
457 eaddr[2] = (par0 >> 16) & 0xFF;
458 eaddr[3] = (par0 >> 24) & 0xFF;
459 eaddr[4] = (par1 >> 0) & 0xFF;
460 eaddr[5] = (par1 >> 8) & 0xFF;
461}
462
463void
464jme_map_intr_vector(struct jme_softc *sc)
465{
466 uint32_t map[MSINUM_NUM_INTR_SOURCE32 / JME_MSI_MESSAGES8];
467
468 bzero(map, sizeof(map))__builtin_bzero((map), (sizeof(map)));
469
470 /* Map Tx interrupts source to MSI/MSIX vector 2. */
471 map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)((0) / 8)] =
472 MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP)(((2) & 7) << (((0) & 7) * 4));
473 map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)((1) / 8)] |=
474 MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP)(((2) & 7) << (((1) & 7) * 4));
475 map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)((2) / 8)] |=
476 MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP)(((2) & 7) << (((2) & 7) * 4));
477 map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)((3) / 8)] |=
478 MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP)(((2) & 7) << (((3) & 7) * 4));
479 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)((4) / 8)] |=
480 MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP)(((2) & 7) << (((4) & 7) * 4));
481 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)((4) / 8)] |=
482 MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP)(((2) & 7) << (((5) & 7) * 4));
483 map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)((6) / 8)] |=
484 MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP)(((2) & 7) << (((6) & 7) * 4));
485 map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)((7) / 8)] |=
486 MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP)(((2) & 7) << (((7) & 7) * 4));
487 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)((16) / 8)] |=
488 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL)(((2) & 7) << (((16) & 7) * 4));
489 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)((21) / 8)] |=
490 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO)(((2) & 7) << (((21) & 7) * 4));
491
492 /* Map Rx interrupts source to MSI/MSIX vector 1. */
493 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)((8) / 8)] =
494 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP)(((1) & 7) << (((8) & 7) * 4));
495 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)((9) / 8)] =
496 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP)(((1) & 7) << (((9) & 7) * 4));
497 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)((10) / 8)] =
498 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP)(((1) & 7) << (((10) & 7) * 4));
499 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)((11) / 8)] =
500 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP)(((1) & 7) << (((11) & 7) * 4));
501 map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)((12) / 8)] =
502 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY)(((1) & 7) << (((12) & 7) * 4));
503 map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)((13) / 8)] =
504 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY)(((1) & 7) << (((13) & 7) * 4));
505 map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)((14) / 8)] =
506 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY)(((1) & 7) << (((14) & 7) * 4));
507 map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)((15) / 8)] =
508 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY)(((1) & 7) << (((15) & 7) * 4));
509 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)((20) / 8)] =
510 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL)(((1) & 7) << (((20) & 7) * 4));
511 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)((19) / 8)] =
512 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL)(((1) & 7) << (((19) & 7) * 4));
513 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)((18) / 8)] =
514 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL)(((1) & 7) << (((18) & 7) * 4));
515 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)((17) / 8)] =
516 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL)(((1) & 7) << (((17) & 7) * 4));
517 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)((25) / 8)] =
518 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO)(((1) & 7) << (((25) & 7) * 4));
519 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)((24) / 8)] =
520 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO)(((1) & 7) << (((24) & 7) * 4));
521 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)((23) / 8)] =
522 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO)(((1) & 7) << (((23) & 7) * 4));
523 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)((22) / 8)] =
524 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO)(((1) & 7) << (((22) & 7) * 4));
525
526 /* Map all other interrupts source to MSI/MSIX vector 0. */
527 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0])(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0810
+ sizeof(uint32_t) * 0)), ((map[0]))))
;
528 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1])(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0810
+ sizeof(uint32_t) * 1)), ((map[1]))))
;
529 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2])(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0810
+ sizeof(uint32_t) * 2)), ((map[2]))))
;
530 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3])(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0810
+ sizeof(uint32_t) * 3)), ((map[3]))))
;
531}
532
533void
534jme_attach(struct device *parent, struct device *self, void *aux)
535{
536 struct jme_softc *sc = (struct jme_softc *)self;
537 struct pci_attach_args *pa = aux;
538 pci_chipset_tag_t pc = pa->pa_pc;
539 pci_intr_handle_t ih;
540 const char *intrstr;
541 pcireg_t memtype;
542
543 struct ifnet *ifp;
544 uint32_t reg;
545 int error = 0;
546
547 /*
548 * Allocate IO memory
549 *
550 * JMC250 supports both memory mapped and I/O register space
551 * access. Because I/O register access should use different
552 * BARs to access registers it's waste of time to use I/O
553 * register space access. JMC250 uses 16K to map entire memory
554 * space.
555 */
556
557 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, JME_PCIR_BAR0x10);
558 if (pci_mapreg_map(pa, JME_PCIR_BAR0x10, memtype, 0, &sc->jme_mem_bt,
559 &sc->jme_mem_bh, NULL((void *)0), &sc->jme_mem_size, 0)) {
560 printf(": can't map mem space\n");
561 return;
562 }
563
564 if (pci_intr_map_msi(pa, &ih) == 0)
565 jme_map_intr_vector(sc);
566 else if (pci_intr_map(pa, &ih) != 0) {
567 printf(": can't map interrupt\n");
568 return;
569 }
570
571 /*
572 * Allocate IRQ
573 */
574 intrstr = pci_intr_string(pc, ih);
575 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET0x7, jme_intr, sc,
576 sc->sc_dev.dv_xname);
577 if (sc->sc_irq_handle == NULL((void *)0)) {
578 printf(": could not establish interrupt");
579 if (intrstr != NULL((void *)0))
580 printf(" at %s", intrstr);
581 printf("\n");
582 return;
583 }
584 printf(": %s", intrstr);
585
586 sc->sc_dmat = pa->pa_dmat;
587 sc->jme_pct = pa->pa_pc;
588 sc->jme_pcitag = pa->pa_tag;
589
590 /*
591 * Extract FPGA revision
592 */
593 reg = CSR_READ_4(sc, JME_CHIPMODE)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0844
))))
;
594 if (((reg & CHIPMODE_FPGA_REV_MASK0xFFFF0000) >> CHIPMODE_FPGA_REV_SHIFT16) !=
595 CHIPMODE_NOT_FPGA0) {
596 sc->jme_caps |= JME_CAP_FPGA0x0001;
597
598 if (jmedebug) {
599 printf("%s: FPGA revision : 0x%04x\n",
600 sc->sc_dev.dv_xname,
601 (reg & CHIPMODE_FPGA_REV_MASK0xFFFF0000) >>
602 CHIPMODE_FPGA_REV_SHIFT16);
603 }
604 }
605
606 sc->jme_revfm = (reg & CHIPMODE_REVFM_MASK0x00000F00) >> CHIPMODE_REVFM_SHIFT8;
607
608 if (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_JMICRON_JMC2500x0250 &&
609 PCI_REVISION(pa->pa_class)(((pa->pa_class) >> 0) & 0xff) == JME_REV_JMC250_A20x11)
610 sc->jme_workaround |= JME_WA_CRCERRORS0x0001 | JME_WA_PACKETLOSS0x0002;
611
612 /* Reset the ethernet controller. */
613 jme_reset(sc);
614
615 /* Get station address. */
616 reg = CSR_READ_4(sc, JME_SMBCSR)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0440
))))
;
617 if (reg & SMBCSR_EEPROM_PRESENT0x00000020)
618 error = jme_eeprom_macaddr(sc, sc->sc_arpcom.ac_enaddr);
619 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT0x00000020) == 0) {
620 if (error != 0 && (jmedebug)) {
621 printf("%s: ethernet hardware address "
622 "not found in EEPROM.\n", sc->sc_dev.dv_xname);
623 }
624 jme_reg_macaddr(sc, sc->sc_arpcom.ac_enaddr);
625 }
626
627 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
628
629 /*
630 * Save PHY address.
631 * Integrated JR0211 has fixed PHY address whereas FPGA version
632 * requires PHY probing to get correct PHY address.
633 */
634 if ((sc->jme_caps & JME_CAP_FPGA0x0001) == 0) {
635 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0808
))))
&
636 GPREG0_PHY_ADDR_MASK0x0000001F;
637 if (jmedebug) {
638 printf("%s: PHY is at address %d.\n",
639 sc->sc_dev.dv_xname, sc->jme_phyaddr);
640 }
641 } else {
642 sc->jme_phyaddr = 0;
643 }
644
645 /* Set max allowable DMA size. */
646 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_5120x00000030;
647 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_1280x03000000;
648
649#ifdef notyet
650 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
651 sc->jme_caps |= JME_CAP_PMCAP0x0004;
652#endif
653
654 /* Allocate DMA stuffs */
655 error = jme_dma_alloc(sc);
656 if (error)
657 goto fail;
658
659 ifp = &sc->sc_arpcom.ac_if;
660 ifp->if_softc = sc;
661 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
662 ifp->if_ioctl = jme_ioctl;
663 ifp->if_start = jme_start;
664 ifp->if_watchdog = jme_watchdog;
665 ifq_set_maxlen(&ifp->if_snd, JME_TX_RING_CNT - 1)((&ifp->if_snd)->ifq_maxlen = (384 - 1));
666 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ16);
667
668 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010 | IFCAP_CSUM_IPv40x00000001 |
669 IFCAP_CSUM_TCPv40x00000002 | IFCAP_CSUM_UDPv40x00000004 | IFCAP_CSUM_TCPv60x00000080 |
670 IFCAP_CSUM_UDPv60x00000100;
671
672#if NVLAN1 > 0
673 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020;
674#endif
675
676 /* Set up MII bus. */
677 sc->sc_miibus.mii_ifp = ifp;
678 sc->sc_miibus.mii_readreg = jme_miibus_readreg;
679 sc->sc_miibus.mii_writereg = jme_miibus_writereg;
680 sc->sc_miibus.mii_statchg = jme_miibus_statchg;
681
682 ifmedia_init(&sc->sc_miibus.mii_media, 0, jme_mediachange,
683 jme_mediastatus);
684 mii_attach(self, &sc->sc_miibus, 0xffffffff,
685 sc->jme_caps & JME_CAP_FPGA0x0001 ? MII_PHY_ANY-1 : sc->jme_phyaddr,
686 MII_OFFSET_ANY-1, MIIF_DOPAUSE0x0100);
687
688 if (LIST_FIRST(&sc->sc_miibus.mii_phys)((&sc->sc_miibus.mii_phys)->lh_first) == NULL((void *)0)) {
689 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
690 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER0x0000000000000100ULL | IFM_MANUAL1ULL,
691 0, NULL((void *)0));
692 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER0x0000000000000100ULL | IFM_MANUAL1ULL);
693 } else
694 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL);
695
696 /*
697 * Save PHYADDR for FPGA mode PHY not handled, not production hw
698 */
699
700 if_attach(ifp);
701 ether_ifattach(ifp);
702
703 timeout_set(&sc->jme_tick_ch, jme_tick, sc);
704
705 return;
706fail:
707 jme_detach(&sc->sc_dev, 0);
708}
709
710int
711jme_detach(struct device *self, int flags)
712{
713 struct jme_softc *sc = (struct jme_softc *)self;
714 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
715 int s;
716
717 s = splnet()splraise(0x7);
718 jme_stop(sc);
719 splx(s)spllower(s);
720
721 mii_detach(&sc->sc_miibus, MII_PHY_ANY-1, MII_OFFSET_ANY-1);
722
723 /* Delete all remaining media. */
724 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY((uint64_t) -1));
725
726 ether_ifdetach(ifp);
727 if_detach(ifp);
728 jme_dma_free(sc);
729
730 if (sc->sc_irq_handle != NULL((void *)0)) {
731 pci_intr_disestablish(sc->jme_pct, sc->sc_irq_handle);
732 sc->sc_irq_handle = NULL((void *)0);
733 }
734
735 return (0);
736}
737
738int
739jme_dma_alloc(struct jme_softc *sc)
740{
741 struct jme_txdesc *txd;
742 struct jme_rxdesc *rxd;
743 int error, i, nsegs;
744
745 /*
746 * Create DMA stuffs for TX ring
747 */
748
749 error = bus_dmamap_create(sc->sc_dmat, JME_TX_RING_SIZE, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((sizeof
(struct jme_desc) * 384)), (1), ((sizeof(struct jme_desc) * 384
)), (0), (0x0001), (&sc->jme_cdata.jme_tx_ring_map))
750 JME_TX_RING_SIZE, 0, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((sizeof
(struct jme_desc) * 384)), (1), ((sizeof(struct jme_desc) * 384
)), (0), (0x0001), (&sc->jme_cdata.jme_tx_ring_map))
751 &sc->jme_cdata.jme_tx_ring_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((sizeof
(struct jme_desc) * 384)), (1), ((sizeof(struct jme_desc) * 384
)), (0), (0x0001), (&sc->jme_cdata.jme_tx_ring_map))
;
752 if (error)
753 return (ENOBUFS55);
754
755 /* Allocate DMA'able memory for TX ring */
756 error = bus_dmamem_alloc(sc->sc_dmat, JME_TX_RING_SIZE, ETHER_ALIGN, 0,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((sizeof
(struct jme_desc) * 384)), (2), (0), (&sc->jme_rdata.jme_tx_ring_seg
), (1), (&nsegs), (0x0000))
757 &sc->jme_rdata.jme_tx_ring_seg, 1, &nsegs,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((sizeof
(struct jme_desc) * 384)), (2), (0), (&sc->jme_rdata.jme_tx_ring_seg
), (1), (&nsegs), (0x0000))
758 BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((sizeof
(struct jme_desc) * 384)), (2), (0), (&sc->jme_rdata.jme_tx_ring_seg
), (1), (&nsegs), (0x0000))
;
759/* XXX zero */
760 if (error) {
761 printf("%s: could not allocate DMA'able memory for Tx ring.\n",
762 sc->sc_dev.dv_xname);
763 return error;
764 }
765
766 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_tx_ring_seg,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->jme_rdata.jme_tx_ring_seg), (nsegs), ((sizeof(struct jme_desc
) * 384)), ((caddr_t *)&sc->jme_rdata.jme_tx_ring), (0x0001
))
767 nsegs, JME_TX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_tx_ring,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->jme_rdata.jme_tx_ring_seg), (nsegs), ((sizeof(struct jme_desc
) * 384)), ((caddr_t *)&sc->jme_rdata.jme_tx_ring), (0x0001
))
768 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->jme_rdata.jme_tx_ring_seg), (nsegs), ((sizeof(struct jme_desc
) * 384)), ((caddr_t *)&sc->jme_rdata.jme_tx_ring), (0x0001
))
;
769 if (error)
770 return (ENOBUFS55);
771
772 /* Load the DMA map for Tx ring. */
773 error = bus_dmamap_load(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
jme_cdata.jme_tx_ring_map), (sc->jme_rdata.jme_tx_ring), (
(sizeof(struct jme_desc) * 384)), (((void *)0)), (0x0001))
774 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
jme_cdata.jme_tx_ring_map), (sc->jme_rdata.jme_tx_ring), (
(sizeof(struct jme_desc) * 384)), (((void *)0)), (0x0001))
775 JME_TX_RING_SIZE, NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
jme_cdata.jme_tx_ring_map), (sc->jme_rdata.jme_tx_ring), (
(sizeof(struct jme_desc) * 384)), (((void *)0)), (0x0001))
;
776 if (error) {
777 printf("%s: could not load DMA'able memory for Tx ring.\n",
778 sc->sc_dev.dv_xname);
779 bus_dmamem_free(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), ((bus_dma_segment_t
*)&sc->jme_rdata.jme_tx_ring), (1))
780 (bus_dma_segment_t *)&sc->jme_rdata.jme_tx_ring, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), ((bus_dma_segment_t
*)&sc->jme_rdata.jme_tx_ring), (1))
;
781 return error;
782 }
783 sc->jme_rdata.jme_tx_ring_paddr =
784 sc->jme_cdata.jme_tx_ring_map->dm_segs[0].ds_addr;
785
786 /*
787 * Create DMA stuffs for RX ring
788 */
789
790 error = bus_dmamap_create(sc->sc_dmat, JME_RX_RING_SIZE, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((sizeof
(struct jme_desc) * 256)), (1), ((sizeof(struct jme_desc) * 256
)), (0), (0x0001), (&sc->jme_cdata.jme_rx_ring_map))
791 JME_RX_RING_SIZE, 0, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((sizeof
(struct jme_desc) * 256)), (1), ((sizeof(struct jme_desc) * 256
)), (0), (0x0001), (&sc->jme_cdata.jme_rx_ring_map))
792 &sc->jme_cdata.jme_rx_ring_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((sizeof
(struct jme_desc) * 256)), (1), ((sizeof(struct jme_desc) * 256
)), (0), (0x0001), (&sc->jme_cdata.jme_rx_ring_map))
;
793 if (error)
794 return (ENOBUFS55);
795
796 /* Allocate DMA'able memory for RX ring */
797 error = bus_dmamem_alloc(sc->sc_dmat, JME_RX_RING_SIZE, ETHER_ALIGN, 0,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((sizeof
(struct jme_desc) * 256)), (2), (0), (&sc->jme_rdata.jme_rx_ring_seg
), (1), (&nsegs), (0x0000 | 0x1000))
798 &sc->jme_rdata.jme_rx_ring_seg, 1, &nsegs,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((sizeof
(struct jme_desc) * 256)), (2), (0), (&sc->jme_rdata.jme_rx_ring_seg
), (1), (&nsegs), (0x0000 | 0x1000))
799 BUS_DMA_WAITOK | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((sizeof
(struct jme_desc) * 256)), (2), (0), (&sc->jme_rdata.jme_rx_ring_seg
), (1), (&nsegs), (0x0000 | 0x1000))
;
800/* XXX zero */
801 if (error) {
802 printf("%s: could not allocate DMA'able memory for Rx ring.\n",
803 sc->sc_dev.dv_xname);
804 return error;
805 }
806
807 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_rx_ring_seg,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->jme_rdata.jme_rx_ring_seg), (nsegs), ((sizeof(struct jme_desc
) * 256)), ((caddr_t *)&sc->jme_rdata.jme_rx_ring), (0x0001
))
808 nsegs, JME_RX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_rx_ring,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->jme_rdata.jme_rx_ring_seg), (nsegs), ((sizeof(struct jme_desc
) * 256)), ((caddr_t *)&sc->jme_rdata.jme_rx_ring), (0x0001
))
809 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->jme_rdata.jme_rx_ring_seg), (nsegs), ((sizeof(struct jme_desc
) * 256)), ((caddr_t *)&sc->jme_rdata.jme_rx_ring), (0x0001
))
;
810 if (error)
811 return (ENOBUFS55);
812
813 /* Load the DMA map for Rx ring. */
814 error = bus_dmamap_load(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
jme_cdata.jme_rx_ring_map), (sc->jme_rdata.jme_rx_ring), (
(sizeof(struct jme_desc) * 256)), (((void *)0)), (0x0001))
815 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
jme_cdata.jme_rx_ring_map), (sc->jme_rdata.jme_rx_ring), (
(sizeof(struct jme_desc) * 256)), (((void *)0)), (0x0001))
816 JME_RX_RING_SIZE, NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
jme_cdata.jme_rx_ring_map), (sc->jme_rdata.jme_rx_ring), (
(sizeof(struct jme_desc) * 256)), (((void *)0)), (0x0001))
;
817 if (error) {
818 printf("%s: could not load DMA'able memory for Rx ring.\n",
819 sc->sc_dev.dv_xname);
820 bus_dmamem_free(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), ((bus_dma_segment_t
*)sc->jme_rdata.jme_rx_ring), (1))
821 (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), ((bus_dma_segment_t
*)sc->jme_rdata.jme_rx_ring), (1))
;
822 return error;
823 }
824 sc->jme_rdata.jme_rx_ring_paddr =
825 sc->jme_cdata.jme_rx_ring_map->dm_segs[0].ds_addr;
826
827#if 0
828 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
829 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE(sizeof(struct jme_desc) * 384);
830 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE(sizeof(struct jme_desc) * 256);
831 if ((JME_ADDR_HI(tx_ring_end)((uint64_t) (tx_ring_end) >> 32) !=
832 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)((uint64_t) (sc->jme_rdata.jme_tx_ring_paddr) >> 32)) ||
833 (JME_ADDR_HI(rx_ring_end)((uint64_t) (rx_ring_end) >> 32) !=
834 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr)((uint64_t) (sc->jme_rdata.jme_rx_ring_paddr) >> 32))) {
835 printf("%s: 4GB boundary crossed, switching to 32bit "
836 "DMA address mode.\n", sc->sc_dev.dv_xname);
837 jme_dma_free(sc);
838 /* Limit DMA address space to 32bit and try again. */
839 lowaddr = BUS_SPACE_MAXADDR_32BIT;
840 goto again;
841 }
842#endif
843
844 /*
845 * Create DMA stuffs for shadow status block
846 */
847
848 error = bus_dmamap_create(sc->sc_dmat, JME_SSB_SIZE, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sizeof
(struct jme_ssb)), (1), (sizeof(struct jme_ssb)), (0), (0x0001
), (&sc->jme_cdata.jme_ssb_map))
849 JME_SSB_SIZE, 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_ssb_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sizeof
(struct jme_ssb)), (1), (sizeof(struct jme_ssb)), (0), (0x0001
), (&sc->jme_cdata.jme_ssb_map))
;
850 if (error)
851 return (ENOBUFS55);
852
853 /* Allocate DMA'able memory for shared status block. */
854 error = bus_dmamem_alloc(sc->sc_dmat, JME_SSB_SIZE, 1, 0,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sizeof
(struct jme_ssb)), (1), (0), (&sc->jme_rdata.jme_ssb_block_seg
), (1), (&nsegs), (0x0000))
855 &sc->jme_rdata.jme_ssb_block_seg, 1, &nsegs, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sizeof
(struct jme_ssb)), (1), (0), (&sc->jme_rdata.jme_ssb_block_seg
), (1), (&nsegs), (0x0000))
;
856 if (error) {
857 printf("%s: could not allocate DMA'able "
858 "memory for shared status block.\n", sc->sc_dev.dv_xname);
859 return error;
860 }
861
862 error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_ssb_block_seg,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->jme_rdata.jme_ssb_block_seg), (nsegs), (sizeof(struct jme_ssb
)), ((caddr_t *)&sc->jme_rdata.jme_ssb_block), (0x0001
))
863 nsegs, JME_SSB_SIZE, (caddr_t *)&sc->jme_rdata.jme_ssb_block,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->jme_rdata.jme_ssb_block_seg), (nsegs), (sizeof(struct jme_ssb
)), ((caddr_t *)&sc->jme_rdata.jme_ssb_block), (0x0001
))
864 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->jme_rdata.jme_ssb_block_seg), (nsegs), (sizeof(struct jme_ssb
)), ((caddr_t *)&sc->jme_rdata.jme_ssb_block), (0x0001
))
;
865 if (error)
866 return (ENOBUFS55);
867
868 /* Load the DMA map for shared status block */
869 error = bus_dmamap_load(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
jme_cdata.jme_ssb_map), (sc->jme_rdata.jme_ssb_block), (sizeof
(struct jme_ssb)), (((void *)0)), (0x0001))
870 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
jme_cdata.jme_ssb_map), (sc->jme_rdata.jme_ssb_block), (sizeof
(struct jme_ssb)), (((void *)0)), (0x0001))
871 JME_SSB_SIZE, NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
jme_cdata.jme_ssb_map), (sc->jme_rdata.jme_ssb_block), (sizeof
(struct jme_ssb)), (((void *)0)), (0x0001))
;
872 if (error) {
873 printf("%s: could not load DMA'able memory "
874 "for shared status block.\n", sc->sc_dev.dv_xname);
875 bus_dmamem_free(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), ((bus_dma_segment_t
*)sc->jme_rdata.jme_ssb_block), (1))
876 (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), ((bus_dma_segment_t
*)sc->jme_rdata.jme_ssb_block), (1))
;
877 return error;
878 }
879 sc->jme_rdata.jme_ssb_block_paddr =
880 sc->jme_cdata.jme_ssb_map->dm_segs[0].ds_addr;
881
882 /*
883 * Create DMA stuffs for TX buffers
884 */
885
886 /* Create DMA maps for Tx buffers. */
887 for (i = 0; i < JME_TX_RING_CNT384; i++) {
888 txd = &sc->jme_cdata.jme_txdesc[i];
889 error = bus_dmamap_create(sc->sc_dmat, JME_TSO_MAXSIZE,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((65535
+ sizeof(struct ether_vlan_header))), (32), (4096), (0), (0x0001
), (&txd->tx_dmamap))
890 JME_MAXTXSEGS, JME_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((65535
+ sizeof(struct ether_vlan_header))), (32), (4096), (0), (0x0001
), (&txd->tx_dmamap))
891 &txd->tx_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((65535
+ sizeof(struct ether_vlan_header))), (32), (4096), (0), (0x0001
), (&txd->tx_dmamap))
;
892 if (error) {
893 int j;
894
895 printf("%s: could not create %dth Tx dmamap.\n",
896 sc->sc_dev.dv_xname, i);
897
898 for (j = 0; j < i; ++j) {
899 txd = &sc->jme_cdata.jme_txdesc[j];
900 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (txd
->tx_dmamap))
;
901 }
902 return error;
903 }
904
905 }
906
907 /*
908 * Create DMA stuffs for RX buffers
909 */
910
911 /* Create DMA maps for Rx buffers. */
912 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0001), (&sc->jme_cdata
.jme_rx_sparemap))
913 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_rx_sparemap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0001), (&sc->jme_cdata
.jme_rx_sparemap))
;
914 if (error) {
915 printf("%s: could not create spare Rx dmamap.\n",
916 sc->sc_dev.dv_xname);
917 return error;
918 }
919 for (i = 0; i < JME_RX_RING_CNT256; i++) {
920 rxd = &sc->jme_cdata.jme_rxdesc[i];
921 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0001), (&rxd->rx_dmamap
))
922 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0001), (&rxd->rx_dmamap
))
;
923 if (error) {
924 int j;
925
926 printf("%s: could not create %dth Rx dmamap.\n",
927 sc->sc_dev.dv_xname, i);
928
929 for (j = 0; j < i; ++j) {
930 rxd = &sc->jme_cdata.jme_rxdesc[j];
931 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (rxd
->rx_dmamap))
;
932 }
933 bus_dmamap_destroy(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->jme_cdata.jme_rx_sparemap))
934 sc->jme_cdata.jme_rx_sparemap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->jme_cdata.jme_rx_sparemap))
;
935 sc->jme_cdata.jme_rx_tag = NULL((void *)0);
936 return error;
937 }
938 }
939
940 return 0;
941}
942
943void
944jme_dma_free(struct jme_softc *sc)
945{
946 struct jme_txdesc *txd;
947 struct jme_rxdesc *rxd;
948 int i;
949
950 /* Tx ring */
951 bus_dmamap_unload(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
jme_cdata.jme_tx_ring_map))
952 sc->jme_cdata.jme_tx_ring_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
jme_cdata.jme_tx_ring_map))
;
953 bus_dmamem_free(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), ((bus_dma_segment_t
*)sc->jme_rdata.jme_tx_ring), (1))
954 (bus_dma_segment_t *)sc->jme_rdata.jme_tx_ring, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), ((bus_dma_segment_t
*)sc->jme_rdata.jme_tx_ring), (1))
;
955
956 /* Rx ring */
957 bus_dmamap_unload(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
jme_cdata.jme_rx_ring_map))
958 sc->jme_cdata.jme_rx_ring_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
jme_cdata.jme_rx_ring_map))
;
959 bus_dmamem_free(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), ((bus_dma_segment_t
*)sc->jme_rdata.jme_rx_ring), (1))
960 (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), ((bus_dma_segment_t
*)sc->jme_rdata.jme_rx_ring), (1))
;
961
962 /* Tx buffers */
963 for (i = 0; i < JME_TX_RING_CNT384; i++) {
964 txd = &sc->jme_cdata.jme_txdesc[i];
965 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (txd
->tx_dmamap))
;
966 }
967
968 /* Rx buffers */
969 for (i = 0; i < JME_RX_RING_CNT256; i++) {
970 rxd = &sc->jme_cdata.jme_rxdesc[i];
971 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (rxd
->rx_dmamap))
;
972 }
973 bus_dmamap_destroy(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->jme_cdata.jme_rx_sparemap))
974 sc->jme_cdata.jme_rx_sparemap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->jme_cdata.jme_rx_sparemap))
;
975
976 /* Shadow status block. */
977 bus_dmamap_unload(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
jme_cdata.jme_ssb_map))
978 sc->jme_cdata.jme_ssb_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
jme_cdata.jme_ssb_map))
;
979 bus_dmamem_free(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), ((bus_dma_segment_t
*)sc->jme_rdata.jme_ssb_block), (1))
980 (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), ((bus_dma_segment_t
*)sc->jme_rdata.jme_ssb_block), (1))
;
981}
982
983#ifdef notyet
984/*
985 * Unlike other ethernet controllers, JMC250 requires
986 * explicit resetting link speed to 10/100Mbps as gigabit
987 * link will consume more power than 375mA.
988 * Note, we reset the link speed to 10/100Mbps with
989 * auto-negotiation but we don't know whether that operation
990 * would succeed or not as we have no control after powering
991 * off. If the renegotiation fail WOL may not work. Running
992 * at 1Gbps draws more power than 375mA at 3.3V which is
993 * specified in PCI specification and that would result in
994 * a complete shutdown of power to the ethernet controller.
995 *
996 * TODO
997 * Save current negotiated media speed/duplex/flow-control
998 * to softc and restore the same link again after resuming.
999 * PHY handling such as power down/resetting to 100Mbps
1000 * may be better handled in suspend method in phy driver.
1001 */
1002void
1003jme_setlinkspeed(struct jme_softc *sc)
1004{
1005 struct mii_data *mii;
1006 int aneg, i;
1007
1008 JME_LOCK_ASSERT(sc);
1009
1010 mii = &sc->sc_miibus;
1011 mii_pollstat(mii);
1012 aneg = 0;
1013 if ((mii->mii_media_status & IFM_AVALID0x0000000000000001ULL) != 0) {
1014 switch IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) {
1015 case IFM_10_T3:
1016 case IFM_100_TX6:
1017 return;
1018 case IFM_1000_T16:
1019 aneg++;
1020 default:
1021 break;
1022 }
1023 }
1024 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1025 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_ANAR,
1026 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1027 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_BMCR,
1028 BMCR_AUTOEN | BMCR_STARTNEG);
1029 DELAY(1000)(*delay_func)(1000);
1030 if (aneg != 0) {
1031 /* Poll link state until jme(4) get a 10/100 link. */
1032 for (i = 0; i < MII_ANEGTICKS_GIGE10; i++) {
1033 mii_pollstat(mii);
1034 if ((mii->mii_media_status & IFM_AVALID0x0000000000000001ULL) != 0) {
1035 switch (IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL)) {
1036 case IFM_10_T3:
1037 case IFM_100_TX6:
1038 jme_mac_config(sc);
1039 return;
1040 default:
1041 break;
1042 }
1043 }
1044 JME_UNLOCK(sc);
1045 pause("jmelnk", hz);
1046 JME_LOCK(sc);
1047 }
1048 if (i == MII_ANEGTICKS_GIGE10)
1049 printf("%s: establishing link failed, "
1050 "WOL may not work!\n", sc->sc_dev.dv_xname);
1051 }
1052 /*
1053 * No link, force MAC to have 100Mbps, full-duplex link.
1054 * This is the last resort and may/may not work.
1055 */
1056 mii->mii_media_status = IFM_AVALID0x0000000000000001ULL | IFM_ACTIVE0x0000000000000002ULL;
1057 mii->mii_media_active = IFM_ETHER0x0000000000000100ULL | IFM_100_TX6 | IFM_FDX0x0000010000000000ULL;
1058 jme_mac_config(sc);
1059}
1060
1061void
1062jme_setwol(struct jme_softc *sc)
1063{
1064 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1065 uint32_t gpr, pmcs;
1066 uint16_t pmstat;
1067 int pmc;
1068
1069 if (pci_find_extcap(sc->sc_dev, PCIY_PMG, &pmc) != 0) {
1070 /* No PME capability, PHY power down. */
1071 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1072 MII_BMCR, BMCR_PDOWN);
1073 return;
1074 }
1075
1076 gpr = CSR_READ_4(sc, JME_GPREG0)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0808
))))
& ~GPREG0_PME_ENB0x00000020;
1077 pmcs = CSR_READ_4(sc, JME_PMCS)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0060
))))
;
1078 pmcs &= ~PMCS_WOL_ENB_MASK0x0000FFFF;
1079 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1080 pmcs |= PMCS_MAGIC_FRAME0x00010000 | PMCS_MAGIC_FRAME_ENB0x00000001;
1081 /* Enable PME message. */
1082 gpr |= GPREG0_PME_ENB0x00000020;
1083 /* For gigabit controllers, reset link speed to 10/100. */
1084 if ((sc->jme_caps & JME_CAP_FASTETH0x0008) == 0)
1085 jme_setlinkspeed(sc);
1086 }
1087
1088 CSR_WRITE_4(sc, JME_PMCS, pmcs)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0060
)), ((pmcs))))
;
1089 CSR_WRITE_4(sc, JME_GPREG0, gpr)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0808
)), ((gpr))))
;
1090
1091 /* Request PME. */
1092 pmstat = pci_read_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, 2);
1093 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1094 if ((ifp->if_capenable & IFCAP_WOL0x00008000) != 0)
1095 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1096 pci_write_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1097 if ((ifp->if_capenable & IFCAP_WOL0x00008000) == 0) {
1098 /* No WOL, PHY power down. */
1099 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1100 MII_BMCR, BMCR_PDOWN);
1101 }
1102}
1103#endif
1104
1105int
1106jme_encap(struct jme_softc *sc, struct mbuf *m)
1107{
1108 struct jme_txdesc *txd;
1109 struct jme_desc *desc;
1110 int error, i, prod;
1111 uint32_t cflags;
1112
1113 prod = sc->jme_cdata.jme_tx_prod;
1114 txd = &sc->jme_cdata.jme_txdesc[prod];
1115
1116 error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
txd->tx_dmamap), (m), (0x0001))
1117 m, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
txd->tx_dmamap), (m), (0x0001))
;
1118 if (error != 0 && error != EFBIG27)
1119 goto drop;
1120 if (error != 0) {
1121 if (m_defrag(m, M_DONTWAIT0x0002)) {
1122 error = ENOBUFS55;
1123 goto drop;
1124 }
1125 error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
txd->tx_dmamap), (m), (0x0001))
1126 m, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
txd->tx_dmamap), (m), (0x0001))
;
1127 if (error != 0)
1128 goto drop;
1129 }
1130
1131 cflags = 0;
1132
1133 /* Configure checksum offload. */
1134 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_IPV4_CSUM_OUT0x0001)
1135 cflags |= JME_TD_IPCSUM0x04000000;
1136 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_TCP_CSUM_OUT0x0002)
1137 cflags |= JME_TD_TCPCSUM0x10000000;
1138 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_UDP_CSUM_OUT0x0004)
1139 cflags |= JME_TD_UDPCSUM0x08000000;
1140
1141#if NVLAN1 > 0
1142 /* Configure VLAN. */
1143 if (m->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020) {
1144 cflags |= (m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag & JME_TD_VLAN_MASK0x0000FFFF);
1145 cflags |= JME_TD_VLAN_TAG0x01000000;
1146 }
1147#endif
1148
1149 desc = &sc->jme_rdata.jme_tx_ring[prod];
1150 desc->flags = htole32(cflags)((__uint32_t)(cflags));
1151 desc->buflen = 0;
1152 desc->addr_hi = htole32(m->m_pkthdr.len)((__uint32_t)(m->M_dat.MH.MH_pkthdr.len));
1153 desc->addr_lo = 0;
1154 sc->jme_cdata.jme_tx_cnt++;
1155 JME_DESC_INC(prod, JME_TX_RING_CNT)((prod) = ((prod) + 1) % (384));
1156 for (i = 0; i < txd->tx_dmamap->dm_nsegs; i++) {
1157 desc = &sc->jme_rdata.jme_tx_ring[prod];
1158 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT)((__uint32_t)(0x80000000 | 0x20000000));
1159 desc->buflen = htole32(txd->tx_dmamap->dm_segs[i].ds_len)((__uint32_t)(txd->tx_dmamap->dm_segs[i].ds_len));
1160 desc->addr_hi =
1161 htole32(JME_ADDR_HI(txd->tx_dmamap->dm_segs[i].ds_addr))((__uint32_t)(((uint64_t) (txd->tx_dmamap->dm_segs[i].ds_addr
) >> 32)))
;
1162 desc->addr_lo =
1163 htole32(JME_ADDR_LO(txd->tx_dmamap->dm_segs[i].ds_addr))((__uint32_t)(((uint64_t) (txd->tx_dmamap->dm_segs[i].ds_addr
) & 0xFFFFFFFF)))
;
1164 sc->jme_cdata.jme_tx_cnt++;
1165 JME_DESC_INC(prod, JME_TX_RING_CNT)((prod) = ((prod) + 1) % (384));
1166 }
1167
1168 /* Update producer index. */
1169 sc->jme_cdata.jme_tx_prod = prod;
1170 /*
1171 * Finally request interrupt and give the first descriptor
1172 * ownership to hardware.
1173 */
1174 desc = txd->tx_desc;
1175 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR)((__uint32_t)(0x80000000 | 0x40000000));
1176
1177 txd->tx_m = m;
1178 txd->tx_ndesc = txd->tx_dmamap->dm_nsegs + JME_TXD_RSVD1;
1179
1180 /* Sync descriptors. */
1181 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txd->
tx_dmamap), (0), (txd->tx_dmamap->dm_mapsize), (0x04))
1182 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txd->
tx_dmamap), (0), (txd->tx_dmamap->dm_mapsize), (0x04))
;
1183 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
jme_cdata.jme_tx_ring_map), (0), (sc->jme_cdata.jme_tx_ring_map
->dm_mapsize), (0x04))
1184 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
jme_cdata.jme_tx_ring_map), (0), (sc->jme_cdata.jme_tx_ring_map
->dm_mapsize), (0x04))
;
1185
1186 return (0);
1187
1188 drop:
1189 m_freem(m);
1190 return (error);
1191}
1192
1193void
1194jme_start(struct ifnet *ifp)
1195{
1196 struct jme_softc *sc = ifp->if_softc;
1197 struct mbuf *m;
1198 int enq = 0;
1199
1200 /* Reclaim transmitted frames. */
1201 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(384 - (((384) * 3) / 10)))
1202 jme_txeof(sc);
1203
1204 if (!(ifp->if_flags & IFF_RUNNING0x40) || ifq_is_oactive(&ifp->if_snd))
1205 return;
1206 if ((sc->jme_flags & JME_FLAG_LINK0x0008) == 0)
1207 return;
1208 if (ifq_empty(&ifp->if_snd)(((&ifp->if_snd)->ifq_len) == 0))
1209 return;
1210
1211 for (;;) {
1212 /*
1213 * Check number of available TX descs, always
1214 * leave JME_TXD_RSVD free TX descs.
1215 */
1216 if (sc->jme_cdata.jme_tx_cnt + JME_TXD_RSVD1 >
1217 JME_TX_RING_CNT384 - JME_TXD_RSVD1) {
1218 ifq_set_oactive(&ifp->if_snd);
1219 break;
1220 }
1221
1222 m = ifq_dequeue(&ifp->if_snd);
1223 if (m == NULL((void *)0))
1224 break;
1225
1226 /*
1227 * Pack the data into the transmit ring. If we
1228 * don't have room, set the OACTIVE flag and wait
1229 * for the NIC to drain the ring.
1230 */
1231 if (jme_encap(sc, m) != 0) {
1232 ifp->if_oerrorsif_data.ifi_oerrors++;
1233 continue;
1234 }
1235
1236 enq++;
1237
1238#if NBPFILTER1 > 0
1239 /*
1240 * If there's a BPF listener, bounce a copy of this frame
1241 * to him.
1242 */
1243 if (ifp->if_bpf != NULL((void *)0))
1244 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
1245#endif
1246 }
1247
1248 if (enq > 0) {
1249 /*
1250 * Reading TXCSR takes very long time under heavy load
1251 * so cache TXCSR value and writes the ORed value with
1252 * the kick command to the TXCSR. This saves one register
1253 * access cycle.
1254 */
1255 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0000
)), ((sc->jme_txcsr | 0x00000001 | (0x00000001 << (8
+ (0)))))))
1256 TXCSR_TXQ_N_START(TXCSR_TXQ0))(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0000
)), ((sc->jme_txcsr | 0x00000001 | (0x00000001 << (8
+ (0)))))))
;
1257 /* Set a timeout in case the chip goes out to lunch. */
1258 ifp->if_timer = JME_TX_TIMEOUT5;
1259 }
1260}
1261
1262void
1263jme_watchdog(struct ifnet *ifp)
1264{
1265 struct jme_softc *sc = ifp->if_softc;
1266
1267 if ((sc->jme_flags & JME_FLAG_LINK0x0008) == 0) {
1268 printf("%s: watchdog timeout (missed link)\n",
1269 sc->sc_dev.dv_xname);
1270 ifp->if_oerrorsif_data.ifi_oerrors++;
1271 jme_init(ifp);
1272 return;
1273 }
1274
1275 jme_txeof(sc);
1276 if (sc->jme_cdata.jme_tx_cnt == 0) {
1277 printf("%s: watchdog timeout (missed Tx interrupts) "
1278 "-- recovering\n", sc->sc_dev.dv_xname);
1279 jme_start(ifp);
1280 return;
1281 }
1282
1283 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1284 ifp->if_oerrorsif_data.ifi_oerrors++;
1285 jme_init(ifp);
1286 jme_start(ifp);
1287}
1288
1289int
1290jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1291{
1292 struct jme_softc *sc = ifp->if_softc;
1293 struct mii_data *mii = &sc->sc_miibus;
1294 struct ifreq *ifr = (struct ifreq *)data;
1295 int error = 0, s;
1296
1297 s = splnet()splraise(0x7);
1298
1299 switch (cmd) {
1300 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
1301 ifp->if_flags |= IFF_UP0x1;
1302 if (!(ifp->if_flags & IFF_RUNNING0x40))
1303 jme_init(ifp);
1304 break;
1305
1306 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
1307 if (ifp->if_flags & IFF_UP0x1) {
1308 if (ifp->if_flags & IFF_RUNNING0x40)
1309 error = ENETRESET52;
1310 else
1311 jme_init(ifp);
1312 } else {
1313 if (ifp->if_flags & IFF_RUNNING0x40)
1314 jme_stop(sc);
1315 }
1316 break;
1317
1318 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
1319 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
1320 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1321 break;
1322
1323 default:
1324 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1325 }
1326
1327 if (error == ENETRESET52) {
1328 if (ifp->if_flags & IFF_RUNNING0x40)
1329 jme_iff(sc);
1330 error = 0;
1331 }
1332
1333 splx(s)spllower(s);
1334 return (error);
1335}
1336
1337void
1338jme_mac_config(struct jme_softc *sc)
1339{
1340 struct mii_data *mii;
1341 uint32_t ghc, rxmac, txmac, txpause, gp1;
1342 int phyconf = JMPHY_CONF_DEFFIFO0x0004, hdx = 0;
1343
1344 mii = &sc->sc_miibus;
1345
1346 CSR_WRITE_4(sc, JME_GHC, GHC_RESET)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0054
)), ((0x40000000))))
;
1347 DELAY(10)(*delay_func)(10);
1348 CSR_WRITE_4(sc, JME_GHC, 0)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0054
)), ((0))))
;
1349 ghc = 0;
1350 rxmac = CSR_READ_4(sc, JME_RXMAC)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0034
))))
;
1351 rxmac &= ~RXMAC_FC_ENB0x00000008;
1352 txmac = CSR_READ_4(sc, JME_TXMAC)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0014
))))
;
1353 txmac &= ~(TXMAC_CARRIER_EXT0x00000040 | TXMAC_FRAME_BURST0x00000080);
1354 txpause = CSR_READ_4(sc, JME_TXPFC)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0018
))))
;
1355 txpause &= ~TXPFC_PAUSE_ENB0x00000001;
1356 if ((IFM_OPTIONS(mii->mii_media_active)((mii->mii_media_active) & (0x00000000ffff0000ULL|0x00ffff0000000000ULL
))
& IFM_FDX0x0000010000000000ULL) != 0) {
1357 ghc |= GHC_FULL_DUPLEX0x00000040;
1358 rxmac &= ~RXMAC_COLL_DET_ENB0x00000020;
1359 txmac &= ~(TXMAC_COLL_ENB0x00000004 | TXMAC_CARRIER_SENSE0x00000008 |
1360 TXMAC_BACKOFF0x00000010 | TXMAC_CARRIER_EXT0x00000040 |
1361 TXMAC_FRAME_BURST0x00000080);
1362 if ((IFM_OPTIONS(mii->mii_media_active)((mii->mii_media_active) & (0x00000000ffff0000ULL|0x00ffff0000000000ULL
))
& IFM_ETH_TXPAUSE0x0000000000040000ULL) != 0)
1363 txpause |= TXPFC_PAUSE_ENB0x00000001;
1364 if ((IFM_OPTIONS(mii->mii_media_active)((mii->mii_media_active) & (0x00000000ffff0000ULL|0x00ffff0000000000ULL
))
& IFM_ETH_RXPAUSE0x0000000000020000ULL) != 0)
1365 rxmac |= RXMAC_FC_ENB0x00000008;
1366 /* Disable retry transmit timer/retry limit. */
1367 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x001C
)), (((((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh)
, ((0x001C)))) & ~(0x80000000 | 0x00000080)))))
1368 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB))(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x001C
)), (((((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh)
, ((0x001C)))) & ~(0x80000000 | 0x00000080)))))
;
1369 } else {
1370 rxmac |= RXMAC_COLL_DET_ENB0x00000020;
1371 txmac |= TXMAC_COLL_ENB0x00000004 | TXMAC_CARRIER_SENSE0x00000008 | TXMAC_BACKOFF0x00000010;
1372 /* Enable retry transmit timer/retry limit. */
1373 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x001C
)), (((((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh)
, ((0x001C)))) | 0x80000000 | 0x00000080))))
1374 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x001C
)), (((((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh)
, ((0x001C)))) | 0x80000000 | 0x00000080))))
;
1375 }
1376
1377 /*
1378 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1379 */
1380 gp1 = CSR_READ_4(sc, JME_GPREG1)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x080C
))))
;
1381 gp1 &= ~GPREG1_HALF_PATCH0x00000020;
1382
1383 if ((IFM_OPTIONS(mii->mii_media_active)((mii->mii_media_active) & (0x00000000ffff0000ULL|0x00ffff0000000000ULL
))
& IFM_FDX0x0000010000000000ULL) == 0)
1384 hdx = 1;
1385
1386 switch (IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL)) {
1387 case IFM_10_T3:
1388 ghc |= GHC_SPEED_100x00000010;
1389 if (hdx)
1390 gp1 |= GPREG1_HALF_PATCH0x00000020;
1391 break;
1392
1393 case IFM_100_TX6:
1394 ghc |= GHC_SPEED_1000x00000020;
1395 if (hdx)
1396 gp1 |= GPREG1_HALF_PATCH0x00000020;
1397
1398 /*
1399 * Use extended FIFO depth to workaround CRC errors
1400 * emitted by chips before JMC250B
1401 */
1402 phyconf = JMPHY_CONF_EXTFIFO0x0000;
1403 break;
1404
1405 case IFM_1000_T16:
1406 if (sc->jme_caps & JME_CAP_FASTETH0x0008)
1407 break;
1408
1409 ghc |= GHC_SPEED_10000x00000030;
1410 if (hdx)
1411 txmac |= TXMAC_CARRIER_EXT0x00000040 | TXMAC_FRAME_BURST0x00000080;
1412 break;
1413
1414 default:
1415 break;
1416 }
1417
1418 if (sc->jme_revfm >= 2) {
1419 /* set clock sources for tx mac and offload engine */
1420 if (IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL) == IFM_1000_T16)
1421 ghc |= GHC_TCPCK_10000x00400000 | GHC_TXCK_10000x00100000;
1422 else
1423 ghc |= GHC_TCPCK_10_1000x00800000 | GHC_TXCK_10_1000x00200000;
1424 }
1425
1426 CSR_WRITE_4(sc, JME_GHC, ghc)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0054
)), ((ghc))))
;
1427 CSR_WRITE_4(sc, JME_RXMAC, rxmac)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0034
)), ((rxmac))))
;
1428 CSR_WRITE_4(sc, JME_TXMAC, txmac)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0014
)), ((txmac))))
;
1429 CSR_WRITE_4(sc, JME_TXPFC, txpause)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0018
)), ((txpause))))
;
1430
1431 if (sc->jme_workaround & JME_WA_CRCERRORS0x0001) {
1432 jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1433 JMPHY_CONF0x1B, phyconf);
1434 }
1435 if (sc->jme_workaround & JME_WA_PACKETLOSS0x0002)
1436 CSR_WRITE_4(sc, JME_GPREG1, gp1)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x080C
)), ((gp1))))
;
1437}
1438
1439int
1440jme_intr(void *xsc)
1441{
1442 struct jme_softc *sc = xsc;
1443 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1444 uint32_t status;
1445 int claimed = 0;
1446
1447 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0824
))))
;
1448 if (status == 0 || status == 0xFFFFFFFF)
1449 return (0);
1450
1451 /* Disable interrupts. */
1452 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x082C
)), ((((0x02000000 | 0x01000000 | 0x00800000 | 0x00400000) | 0x00200000
| (0x00100000 | 0x00080000 | 0x00040000 | 0x00020000) | 0x00010000
| (0x00001000 | 0x00002000 | 0x00004000 | 0x00008000))))))
;
1453
1454 status = CSR_READ_4(sc, JME_INTR_STATUS)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0820
))))
;
1455 if ((status & JME_INTRS((0x02000000 | 0x01000000 | 0x00800000 | 0x00400000) | 0x00200000
| (0x00100000 | 0x00080000 | 0x00040000 | 0x00020000) | 0x00010000
| (0x00001000 | 0x00002000 | 0x00004000 | 0x00008000))
) == 0 || status == 0xFFFFFFFF)
1456 goto back;
1457
1458 /* Reset PCC counter/timer and Ack interrupts. */
1459 status &= ~(INTR_TXQ_COMP(0x00000001 | 0x00000002 | 0x00000004 | 0x00000008 | 0x00000010
| 0x00000020 | 0x00000040 | 0x00000080)
| INTR_RXQ_COMP(0x00000100 | 0x00000200 | 0x00000400 | 0x00000800));
1460 if (status & (INTR_TXQ_COAL0x00010000 | INTR_TXQ_COAL_TO0x00200000))
1461 status |= INTR_TXQ_COAL0x00010000 | INTR_TXQ_COAL_TO0x00200000 | INTR_TXQ_COMP(0x00000001 | 0x00000002 | 0x00000004 | 0x00000008 | 0x00000010
| 0x00000020 | 0x00000040 | 0x00000080)
;
1462 if (status & (INTR_RXQ_COAL(0x00100000 | 0x00080000 | 0x00040000 | 0x00020000) | INTR_RXQ_COAL_TO(0x02000000 | 0x01000000 | 0x00800000 | 0x00400000)))
1463 status |= INTR_RXQ_COAL(0x00100000 | 0x00080000 | 0x00040000 | 0x00020000) | INTR_RXQ_COAL_TO(0x02000000 | 0x01000000 | 0x00800000 | 0x00400000) | INTR_RXQ_COMP(0x00000100 | 0x00000200 | 0x00000400 | 0x00000800);
1464 CSR_WRITE_4(sc, JME_INTR_STATUS, status)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0820
)), ((status))))
;
1465
1466 if (ifp->if_flags & IFF_RUNNING0x40) {
1467 if (status & (INTR_RXQ_COAL(0x00100000 | 0x00080000 | 0x00040000 | 0x00020000) | INTR_RXQ_COAL_TO(0x02000000 | 0x01000000 | 0x00800000 | 0x00400000)))
1468 jme_rxeof(sc);
1469
1470 if (status & INTR_RXQ_DESC_EMPTY(0x00001000 | 0x00002000 | 0x00004000 | 0x00008000)) {
1471 /*
1472 * Notify hardware availability of new Rx buffers.
1473 * Reading RXCSR takes very long time under heavy
1474 * load so cache RXCSR value and writes the ORed
1475 * value with the kick command to the RXCSR. This
1476 * saves one register access cycle.
1477 */
1478 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0020
)), ((sc->jme_rxcsr | 0x00000001 | 0x00000004))))
1479 RXCSR_RX_ENB | RXCSR_RXQ_START)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0020
)), ((sc->jme_rxcsr | 0x00000001 | 0x00000004))))
;
1480 }
1481
1482 if (status & (INTR_TXQ_COAL0x00010000 | INTR_TXQ_COAL_TO0x00200000)) {
1483 jme_txeof(sc);
1484 jme_start(ifp);
1485 }
1486 }
1487 claimed = 1;
1488back:
1489 /* Reenable interrupts. */
1490 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0828
)), ((((0x02000000 | 0x01000000 | 0x00800000 | 0x00400000) | 0x00200000
| (0x00100000 | 0x00080000 | 0x00040000 | 0x00020000) | 0x00010000
| (0x00001000 | 0x00002000 | 0x00004000 | 0x00008000))))))
;
1491
1492 return (claimed);
1493}
1494
1495void
1496jme_txeof(struct jme_softc *sc)
1497{
1498 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1499 struct jme_txdesc *txd;
1500 uint32_t status;
1501 int cons, nsegs;
1502
1503 cons = sc->jme_cdata.jme_tx_cons;
1504 if (cons == sc->jme_cdata.jme_tx_prod)
1505 return;
1506
1507 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
jme_cdata.jme_tx_ring_map), (0), (sc->jme_cdata.jme_tx_ring_map
->dm_mapsize), (0x02))
1508 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
jme_cdata.jme_tx_ring_map), (0), (sc->jme_cdata.jme_tx_ring_map
->dm_mapsize), (0x02))
;
1509
1510 /*
1511 * Go through our Tx list and free mbufs for those
1512 * frames which have been transmitted.
1513 */
1514 while (cons != sc->jme_cdata.jme_tx_prod) {
1515 txd = &sc->jme_cdata.jme_txdesc[cons];
1516
1517 if (txd->tx_m == NULL((void *)0))
1518 panic("%s: freeing NULL mbuf!", sc->sc_dev.dv_xname);
1519
1520 status = letoh32(txd->tx_desc->flags)((__uint32_t)(txd->tx_desc->flags));
1521 if ((status & JME_TD_OWN0x80000000) == JME_TD_OWN0x80000000)
1522 break;
1523
1524 if (status & (JME_TD_TMOUT0x20000000 | JME_TD_RETRY_EXP0x10000000)) {
1525 ifp->if_oerrorsif_data.ifi_oerrors++;
1526 } else {
1527 if (status & JME_TD_COLLISION0x08000000) {
1528 ifp->if_collisionsif_data.ifi_collisions +=
1529 letoh32(txd->tx_desc->buflen)((__uint32_t)(txd->tx_desc->buflen)) &
1530 JME_TD_BUF_LEN_MASK0x0000FFFF;
1531 }
1532 }
1533
1534 /*
1535 * Only the first descriptor of multi-descriptor
1536 * transmission is updated so driver have to skip entire
1537 * chained buffers for the transmitted frame. In other
1538 * words, JME_TD_OWN bit is valid only at the first
1539 * descriptor of a multi-descriptor transmission.
1540 */
1541 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
1542 sc->jme_rdata.jme_tx_ring[cons].flags = 0;
1543 JME_DESC_INC(cons, JME_TX_RING_CNT)((cons) = ((cons) + 1) % (384));
1544 }
1545
1546 /* Reclaim transferred mbufs. */
1547 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txd
->tx_dmamap))
;
1548 m_freem(txd->tx_m);
1549 txd->tx_m = NULL((void *)0);
1550 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
1551 if (sc->jme_cdata.jme_tx_cnt < 0)
1552 panic("%s: Active Tx desc counter was garbled",
1553 sc->sc_dev.dv_xname);
1554 txd->tx_ndesc = 0;
1555 }
1556 sc->jme_cdata.jme_tx_cons = cons;
1557
1558 if (sc->jme_cdata.jme_tx_cnt == 0)
1559 ifp->if_timer = 0;
1560
1561 if (sc->jme_cdata.jme_tx_cnt + JME_TXD_RSVD1 <=
1562 JME_TX_RING_CNT384 - JME_TXD_RSVD1)
1563 ifq_clr_oactive(&ifp->if_snd);
1564
1565 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
jme_cdata.jme_tx_ring_map), (0), (sc->jme_cdata.jme_tx_ring_map
->dm_mapsize), (0x04))
1566 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
jme_cdata.jme_tx_ring_map), (0), (sc->jme_cdata.jme_tx_ring_map
->dm_mapsize), (0x04))
;
1567}
1568
1569void
1570jme_discard_rxbufs(struct jme_softc *sc, int cons, int count)
1571{
1572 int i;
1573
1574 for (i = 0; i < count; ++i) {
1575 struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons];
1576
1577 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT)((__uint32_t)(0x80000000 | 0x40000000 | 0x20000000));
1578 desc->buflen = htole32(MCLBYTES)((__uint32_t)((1 << 11)));
1579 JME_DESC_INC(cons, JME_RX_RING_CNT)((cons) = ((cons) + 1) % (256));
1580 }
1581}
1582
1583/* Receive a frame. */
1584void
1585jme_rxpkt(struct jme_softc *sc)
1586{
1587 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1588 struct jme_desc *desc;
1589 struct jme_rxdesc *rxd;
1590 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
1591 struct mbuf *mp, *m;
1592 uint32_t flags, status;
1593 int cons, count, nsegs;
1594
1595 cons = sc->jme_cdata.jme_rx_cons;
1596 desc = &sc->jme_rdata.jme_rx_ring[cons];
1597 flags = letoh32(desc->flags)((__uint32_t)(desc->flags));
1598 status = letoh32(desc->buflen)((__uint32_t)(desc->buflen));
1599 nsegs = JME_RX_NSEGS(status)(((status) & 0x7F000000) >> 24);
1600
1601 if (status & JME_RX_ERR_STAT(0x00800000 | 0x00400000 | 0x00200000 | 0x00100000 | 0x00080000
| 0x00040000 | 0x00020000 | 0x00010000)
) {
1602 ifp->if_ierrorsif_data.ifi_ierrors++;
1603 jme_discard_rxbufs(sc, cons, nsegs);
1604#ifdef JME_SHOW_ERRORS
1605 printf("%s : receive error = 0x%b\n",
1606 sc->sc_dev.dv_xname, JME_RX_ERR(status)(((status) & 0x00FF0000) >> 16), JME_RX_ERR_BITS"\20" "\1CRCERR\2FIFOOVRN\3RUNT\4ABORT" "\5COLL\6NBLRCVD\7GMIIERR\10");
1607#endif
1608 sc->jme_cdata.jme_rx_cons += nsegs;
1609 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT256;
1610 return;
1611 }
1612
1613 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status)((status) & 0x0000FFFF) - JME_RX_PAD_BYTES10;
1614 for (count = 0; count < nsegs; count++,
1615 JME_DESC_INC(cons, JME_RX_RING_CNT)((cons) = ((cons) + 1) % (256))) {
1616 rxd = &sc->jme_cdata.jme_rxdesc[cons];
1617 mp = rxd->rx_m;
1618
1619 /* Add a new receive buffer to the ring. */
1620 if (jme_newbuf(sc, rxd) != 0) {
1621 ifp->if_iqdropsif_data.ifi_iqdrops++;
1622 /* Reuse buffer. */
1623 jme_discard_rxbufs(sc, cons, nsegs - count);
1624 if (sc->jme_cdata.jme_rxhead != NULL((void *)0)) {
1625 m_freem(sc->jme_cdata.jme_rxhead);
1626 JME_RXCHAIN_RESET(sc)do { (sc)->jme_cdata.jme_rxhead = ((void *)0); (sc)->jme_cdata
.jme_rxtail = ((void *)0); (sc)->jme_cdata.jme_rxlen = 0; }
while (0)
;
1627 }
1628 break;
1629 }
1630
1631 /*
1632 * Assume we've received a full sized frame.
1633 * Actual size is fixed when we encounter the end of
1634 * multi-segmented frame.
1635 */
1636 mp->m_lenm_hdr.mh_len = MCLBYTES(1 << 11);
1637
1638 /* Chain received mbufs. */
1639 if (sc->jme_cdata.jme_rxhead == NULL((void *)0)) {
1640 sc->jme_cdata.jme_rxhead = mp;
1641 sc->jme_cdata.jme_rxtail = mp;
1642 } else {
1643 /*
1644 * Receive processor can receive a maximum frame
1645 * size of 65535 bytes.
1646 */
1647 mp->m_flagsm_hdr.mh_flags &= ~M_PKTHDR0x0002;
1648 sc->jme_cdata.jme_rxtail->m_nextm_hdr.mh_next = mp;
1649 sc->jme_cdata.jme_rxtail = mp;
1650 }
1651
1652 if (count == nsegs - 1) {
1653 /* Last desc. for this frame. */
1654 m = sc->jme_cdata.jme_rxhead;
1655 /* XXX assert PKTHDR? */
1656 m->m_flagsm_hdr.mh_flags |= M_PKTHDR0x0002;
1657 m->m_pkthdrM_dat.MH.MH_pkthdr.len = sc->jme_cdata.jme_rxlen;
1658 if (nsegs > 1) {
1659 /* Set first mbuf size. */
1660 m->m_lenm_hdr.mh_len = MCLBYTES(1 << 11) - JME_RX_PAD_BYTES10;
1661 /* Set last mbuf size. */
1662 mp->m_lenm_hdr.mh_len = sc->jme_cdata.jme_rxlen -
1663 ((MCLBYTES(1 << 11) - JME_RX_PAD_BYTES10) +
1664 (MCLBYTES(1 << 11) * (nsegs - 2)));
1665 } else {
1666 m->m_lenm_hdr.mh_len = sc->jme_cdata.jme_rxlen;
1667 }
1668
1669 /*
1670 * Account for 10bytes auto padding which is used
1671 * to align IP header on 32bit boundary. Also note,
1672 * CRC bytes is automatically removed by the
1673 * hardware.
1674 */
1675 m->m_datam_hdr.mh_data += JME_RX_PAD_BYTES10;
1676
1677 /* Set checksum information. */
1678 if (flags & (JME_RD_IPV40x00400000|JME_RD_IPV60x00200000)) {
1679 if ((flags & JME_RD_IPV40x00400000) &&
1680 (flags & JME_RD_IPCSUM0x04000000))
1681 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |=
1682 M_IPV4_CSUM_IN_OK0x0008;
1683 if ((flags & JME_RD_MORE_FRAG0x20000000) == 0 &&
1684 ((flags & (JME_RD_TCP0x10000000 | JME_RD_TCPCSUM0x02000000)) ==
1685 (JME_RD_TCP0x10000000 | JME_RD_TCPCSUM0x02000000) ||
1686 (flags & (JME_RD_UDP0x08000000 | JME_RD_UDPCSUM0x01000000)) ==
1687 (JME_RD_UDP0x08000000 | JME_RD_UDPCSUM0x01000000))) {
1688 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |=
1689 M_TCP_CSUM_IN_OK0x0020 | M_UDP_CSUM_IN_OK0x0080;
1690 }
1691 }
1692
1693#if NVLAN1 > 0
1694 /* Check for VLAN tagged packets. */
1695 if (flags & JME_RD_VLAN_TAG0x00800000) {
1696 m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag = flags & JME_RD_VLAN_MASK0x0000FFFF;
1697 m->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020;
1698 }
1699#endif
1700
1701 ml_enqueue(&ml, m);
1702
1703 /* Reset mbuf chains. */
1704 JME_RXCHAIN_RESET(sc)do { (sc)->jme_cdata.jme_rxhead = ((void *)0); (sc)->jme_cdata
.jme_rxtail = ((void *)0); (sc)->jme_cdata.jme_rxlen = 0; }
while (0)
;
1705 }
1706 }
1707
1708 if_input(ifp, &ml);
1709
1710 sc->jme_cdata.jme_rx_cons += nsegs;
1711 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT256;
1712}
1713
1714void
1715jme_rxeof(struct jme_softc *sc)
1716{
1717 struct jme_desc *desc;
1718 int nsegs, prog, pktlen;
1719
1720 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
jme_cdata.jme_rx_ring_map), (0), (sc->jme_cdata.jme_rx_ring_map
->dm_mapsize), (0x02))
1721 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
jme_cdata.jme_rx_ring_map), (0), (sc->jme_cdata.jme_rx_ring_map
->dm_mapsize), (0x02))
;
1722
1723 prog = 0;
1724 for (;;) {
1725 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
1726 if ((letoh32(desc->flags)((__uint32_t)(desc->flags)) & JME_RD_OWN0x80000000) == JME_RD_OWN0x80000000)
1727 break;
1728 if ((letoh32(desc->buflen)((__uint32_t)(desc->buflen)) & JME_RD_VALID0x80000000) == 0)
1729 break;
1730
1731 /*
1732 * Check number of segments against received bytes.
1733 * Non-matching value would indicate that hardware
1734 * is still trying to update Rx descriptors. I'm not
1735 * sure whether this check is needed.
1736 */
1737 nsegs = JME_RX_NSEGS(letoh32(desc->buflen))(((((__uint32_t)(desc->buflen))) & 0x7F000000) >>
24)
;
1738 pktlen = JME_RX_BYTES(letoh32(desc->buflen))((((__uint32_t)(desc->buflen))) & 0x0000FFFF);
1739 if (nsegs != howmany(pktlen, MCLBYTES)(((pktlen) + (((1 << 11)) - 1)) / ((1 << 11)))) {
1740 printf("%s: RX fragment count(%d) "
1741 "and packet size(%d) mismatch\n",
1742 sc->sc_dev.dv_xname, nsegs, pktlen);
1743 break;
1744 }
1745
1746 /* Received a frame. */
1747 jme_rxpkt(sc);
1748 prog++;
1749 }
1750
1751 if (prog > 0) {
1752 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
jme_cdata.jme_rx_ring_map), (0), (sc->jme_cdata.jme_rx_ring_map
->dm_mapsize), (0x04))
1753 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
jme_cdata.jme_rx_ring_map), (0), (sc->jme_cdata.jme_rx_ring_map
->dm_mapsize), (0x04))
;
1754 }
1755}
1756
1757void
1758jme_tick(void *xsc)
1759{
1760 struct jme_softc *sc = xsc;
1761 struct mii_data *mii = &sc->sc_miibus;
1762 int s;
1763
1764 s = splnet()splraise(0x7);
1765 mii_tick(mii);
1766 timeout_add_sec(&sc->jme_tick_ch, 1);
1767 splx(s)spllower(s);
1768}
1769
1770void
1771jme_reset(struct jme_softc *sc)
1772{
1773#ifdef foo
1774 /* Stop receiver, transmitter. */
1775 jme_stop_rx(sc);
1776 jme_stop_tx(sc);
1777#endif
1778 CSR_WRITE_4(sc, JME_GHC, GHC_RESET)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0054
)), ((0x40000000))))
;
1779 DELAY(10)(*delay_func)(10);
1780 CSR_WRITE_4(sc, JME_GHC, 0)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0054
)), ((0))))
;
1781}
1782
1783int
1784jme_init(struct ifnet *ifp)
1785{
1786 struct jme_softc *sc = ifp->if_softc;
1787 struct mii_data *mii;
1788 uint8_t eaddr[ETHER_ADDR_LEN6];
1789 bus_addr_t paddr;
1790 uint32_t reg;
1791 int error;
1792
1793 /*
1794 * Cancel any pending I/O.
1795 */
1796 jme_stop(sc);
1797
1798 /*
1799 * Reset the chip to a known state.
1800 */
1801 jme_reset(sc);
1802
1803 /* Init descriptors. */
1804 error = jme_init_rx_ring(sc);
1805 if (error != 0) {
1806 printf("%s: initialization failed: no memory for Rx buffers.\n",
1807 sc->sc_dev.dv_xname);
1808 jme_stop(sc);
1809 return (error);
1810 }
1811 jme_init_tx_ring(sc);
1812
1813 /* Initialize shadow status block. */
1814 jme_init_ssb(sc);
1815
1816 /* Reprogram the station address. */
1817 bcopy(LLADDR(ifp->if_sadl)((caddr_t)((ifp->if_sadl)->sdl_data + (ifp->if_sadl)
->sdl_nlen))
, eaddr, ETHER_ADDR_LEN6);
1818 CSR_WRITE_4(sc, JME_PAR0,(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0038
)), ((eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] <<
8 | eaddr[0]))))
1819 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0])(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0038
)), ((eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] <<
8 | eaddr[0]))))
;
1820 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4])(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x003C
)), ((eaddr[5] << 8 | eaddr[4]))))
;
1821
1822 /*
1823 * Configure Tx queue.
1824 * Tx priority queue weight value : 0
1825 * Tx FIFO threshold for processing next packet : 16QW
1826 * Maximum Tx DMA length : 512
1827 * Allow Tx DMA burst.
1828 */
1829 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0)(((0) << 16) & 0x00070000);
1830 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN)(((0) << 24) & 0x0F000000);
1831 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW0x000000C0;
1832 sc->jme_txcsr |= sc->jme_tx_dma_size;
1833 sc->jme_txcsr |= TXCSR_DMA_BURST0x00000004;
1834 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0000
)), ((sc->jme_txcsr))))
;
1835
1836 /* Set Tx descriptor counter. */
1837 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x000C
)), ((384))))
;
1838
1839 /* Set Tx ring address to the hardware. */
1840 paddr = JME_TX_RING_ADDR(sc, 0)((sc)->jme_rdata.jme_tx_ring_paddr + sizeof(struct jme_desc
) * (0))
;
1841 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr))(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0008
)), ((((uint64_t) (paddr) >> 32)))))
;
1842 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr))(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0004
)), ((((uint64_t) (paddr) & 0xFFFFFFFF)))))
;
1843
1844 /* Configure TxMAC parameters. */
1845 reg = TXMAC_IFG1_DEFAULT0x20000000 | TXMAC_IFG2_DEFAULT0x40000000 | TXMAC_IFG_ENB0x00000020;
1846 reg |= TXMAC_THRESH_1_PKT0x00000300;
1847 reg |= TXMAC_CRC_ENB0x00000002 | TXMAC_PAD_ENB0x00000001;
1848 CSR_WRITE_4(sc, JME_TXMAC, reg)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0014
)), ((reg))))
;
1849
1850 /*
1851 * Configure Rx queue.
1852 * FIFO full threshold for transmitting Tx pause packet : 128T
1853 * FIFO threshold for processing next packet : 128QW
1854 * Rx queue 0 select
1855 * Max Rx DMA length : 128
1856 * Rx descriptor retry : 32
1857 * Rx descriptor retry time gap : 256ns
1858 * Don't receive runt/bad frame.
1859 */
1860 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T0x30000000;
1861
1862 /*
1863 * Since Rx FIFO size is 4K bytes, receiving frames larger
1864 * than 4K bytes will suffer from Rx FIFO overruns. So
1865 * decrease FIFO threshold to reduce the FIFO overruns for
1866 * frames larger than 4000 bytes.
1867 * For best performance of standard MTU sized frames use
1868 * maximum allowable FIFO threshold, which is 32QW for
1869 * chips with a full mask >= 2 otherwise 128QW. FIFO
1870 * thresholds of 64QW and 128QW are not valid for chips
1871 * with a full mask >= 2.
1872 */
1873 if (sc->jme_revfm >= 2)
1874 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW0x00000000;
1875 else {
1876 if ((ifp->if_mtuif_data.ifi_mtu + ETHER_HDR_LEN((6 * 2) + 2) + ETHER_CRC_LEN4 +
1877 ETHER_VLAN_ENCAP_LEN4) > JME_RX_FIFO_SIZE4000)
1878 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW0x00000000;
1879 else
1880 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW0x0C000000;
1881 }
1882 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0)((0) << 16);
1883 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT)((((32) / 4) << 8) & 0x00000F00);
1884 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_2560x00000000 & RXCSR_DESC_RT_GAP_MASK0x0000F000;
1885 /* XXX TODO DROP_BAD */
1886 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0020
)), ((sc->jme_rxcsr))))
;
1887
1888 /* Set Rx descriptor counter. */
1889 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x002C
)), ((256))))
;
1890
1891 /* Set Rx ring address to the hardware. */
1892 paddr = JME_RX_RING_ADDR(sc, 0)((sc)->jme_rdata.jme_rx_ring_paddr + sizeof(struct jme_desc
) * (0))
;
1893 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr))(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0028
)), ((((uint64_t) (paddr) >> 32)))))
;
1894 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr))(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0024
)), ((((uint64_t) (paddr) & 0xFFFFFFFF)))))
;
1895
1896 /* Clear receive filter. */
1897 CSR_WRITE_4(sc, JME_RXMAC, 0)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0034
)), ((0))))
;
1898
1899 /* Set up the receive filter. */
1900 jme_iff(sc);
1901
1902 jme_set_vlan(sc);
1903
1904 /*
1905 * Disable all WOL bits as WOL can interfere normal Rx
1906 * operation. Also clear WOL detection status bits.
1907 */
1908 reg = CSR_READ_4(sc, JME_PMCS)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0060
))))
;
1909 reg &= ~PMCS_WOL_ENB_MASK0x0000FFFF;
1910 CSR_WRITE_4(sc, JME_PMCS, reg)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0060
)), ((reg))))
;
1911
1912 /*
1913 * Pad 10bytes right before received frame. This will greatly
1914 * help Rx performance on strict-alignment architectures as
1915 * it does not need to copy the frame to align the payload.
1916 */
1917 reg = CSR_READ_4(sc, JME_RXMAC)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0034
))))
;
1918 reg |= RXMAC_PAD_10BYTES0x00000002;
1919 reg |= RXMAC_CSUM_ENB0x00000001;
1920 CSR_WRITE_4(sc, JME_RXMAC, reg)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0034
)), ((reg))))
;
1921
1922 /* Configure general purpose reg0 */
1923 reg = CSR_READ_4(sc, JME_GPREG0)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0808
))))
;
1924 reg &= ~GPREG0_PCC_UNIT_MASK0x00000300;
1925 /* Set PCC timer resolution to micro-seconds unit. */
1926 reg |= GPREG0_PCC_UNIT_US0x00000200;
1927 /*
1928 * Disable all shadow register posting as we have to read
1929 * JME_INTR_STATUS register in jme_intr. Also it seems
1930 * that it's hard to synchronize interrupt status between
1931 * hardware and software with shadow posting due to
1932 * requirements of bus_dmamap_sync(9).
1933 */
1934 reg |= GPREG0_SH_POST_DW7_DIS0x80000000 | GPREG0_SH_POST_DW6_DIS0x40000000 |
1935 GPREG0_SH_POST_DW5_DIS0x20000000 | GPREG0_SH_POST_DW4_DIS0x10000000 |
1936 GPREG0_SH_POST_DW3_DIS0x08000000 | GPREG0_SH_POST_DW2_DIS0x04000000 |
1937 GPREG0_SH_POST_DW1_DIS0x02000000 | GPREG0_SH_POST_DW0_DIS0x01000000;
1938 /* Disable posting of DW0. */
1939 reg &= ~GPREG0_POST_DW0_ENB0x00040000;
1940 /* Clear PME message. */
1941 reg &= ~GPREG0_PME_ENB0x00000020;
1942 /* Set PHY address. */
1943 reg &= ~GPREG0_PHY_ADDR_MASK0x0000001F;
1944 reg |= sc->jme_phyaddr;
1945 CSR_WRITE_4(sc, JME_GPREG0, reg)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0808
)), ((reg))))
;
1946
1947 /* Configure Tx queue 0 packet completion coalescing. */
1948 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT65535;
1949 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT16) &
1950 PCCTX_COAL_TO_MASK0xFFFF0000;
1951 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT64;
1952 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT8) &
1953 PCCTX_COAL_PKT_MASK0x0000FF00;
1954 reg |= PCCTX_COAL_TXQ00x00000001;
1955 CSR_WRITE_4(sc, JME_PCCTX, reg)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0840
)), ((reg))))
;
1956
1957 /* Configure Rx queue 0 packet completion coalescing. */
1958 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT100;
1959 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT16) &
1960 PCCRX_COAL_TO_MASK0xFFFF0000;
1961 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT64;
1962 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT8) &
1963 PCCRX_COAL_PKT_MASK0x0000FF00;
1964 CSR_WRITE_4(sc, JME_PCCRX0, reg)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0830
)), ((reg))))
;
1965
1966 /* Configure shadow status block but don't enable posting. */
1967 paddr = sc->jme_rdata.jme_ssb_block_paddr;
1968 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr))(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0848
)), ((((uint64_t) (paddr) >> 32)))))
;
1969 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr))(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x084C
)), ((((uint64_t) (paddr) & 0xFFFFFFFF)))))
;
1970
1971 /* Disable Timer 1 and Timer 2. */
1972 CSR_WRITE_4(sc, JME_TIMER1, 0)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0870
)), ((0))))
;
1973 CSR_WRITE_4(sc, JME_TIMER2, 0)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0874
)), ((0))))
;
1974
1975 /* Configure retry transmit period, retry limit value. */
1976 CSR_WRITE_4(sc, JME_TXTRHD,(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x001C
)), ((((8192 << 8) & 0x7FFFFF00) | ((8 << 0) &
0)))))
1977 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x001C
)), ((((8192 << 8) & 0x7FFFFF00) | ((8 << 0) &
0)))))
1978 TXTRHD_RT_PERIOD_MASK) |(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x001C
)), ((((8192 << 8) & 0x7FFFFF00) | ((8 << 0) &
0)))))
1979 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x001C
)), ((((8192 << 8) & 0x7FFFFF00) | ((8 << 0) &
0)))))
1980 TXTRHD_RT_LIMIT_SHIFT))(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x001C
)), ((((8192 << 8) & 0x7FFFFF00) | ((8 << 0) &
0)))))
;
1981
1982 /* Disable RSS. */
1983 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0C00
)), ((0x00000000))))
;
1984
1985 /* Initialize the interrupt mask. */
1986 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0828
)), ((((0x02000000 | 0x01000000 | 0x00800000 | 0x00400000) | 0x00200000
| (0x00100000 | 0x00080000 | 0x00040000 | 0x00020000) | 0x00010000
| (0x00001000 | 0x00002000 | 0x00004000 | 0x00008000))))))
;
1987 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0820
)), ((0xFFFFFFFF))))
;
1988
1989 /*
1990 * Enabling Tx/Rx DMA engines and Rx queue processing is
1991 * done after detection of valid link in jme_miibus_statchg.
1992 */
1993 sc->jme_flags &= ~JME_FLAG_LINK0x0008;
1994
1995 /* Set the current media. */
1996 mii = &sc->sc_miibus;
1997 mii_mediachg(mii);
1998
1999 timeout_add_sec(&sc->jme_tick_ch, 1);
2000
2001 ifp->if_flags |= IFF_RUNNING0x40;
2002 ifq_clr_oactive(&ifp->if_snd);
2003
2004 return (0);
2005}
2006
2007void
2008jme_stop(struct jme_softc *sc)
2009{
2010 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2011 struct jme_txdesc *txd;
2012 struct jme_rxdesc *rxd;
2013 int i;
2014
2015 /*
2016 * Mark the interface down and cancel the watchdog timer.
2017 */
2018 ifp->if_flags &= ~IFF_RUNNING0x40;
2019 ifq_clr_oactive(&ifp->if_snd);
2020 ifp->if_timer = 0;
2021
2022 timeout_del(&sc->jme_tick_ch);
2023 sc->jme_flags &= ~JME_FLAG_LINK0x0008;
2024
2025 /*
2026 * Disable interrupts.
2027 */
2028 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x082C
)), ((((0x02000000 | 0x01000000 | 0x00800000 | 0x00400000) | 0x00200000
| (0x00100000 | 0x00080000 | 0x00040000 | 0x00020000) | 0x00010000
| (0x00001000 | 0x00002000 | 0x00004000 | 0x00008000))))))
;
2029 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0820
)), ((0xFFFFFFFF))))
;
2030
2031 /* Disable updating shadow status block. */
2032 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x084C
)), (((((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh)
, ((0x084C)))) & ~0x00000001))))
2033 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x084C
)), (((((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh)
, ((0x084C)))) & ~0x00000001))))
;
2034
2035 /* Stop receiver, transmitter. */
2036 jme_stop_rx(sc);
2037 jme_stop_tx(sc);
2038
2039#ifdef foo
2040 /* Reclaim Rx/Tx buffers that have been completed. */
2041 jme_rxeof(sc);
2042 m_freem(sc->jme_cdata.jme_rxhead);
2043 JME_RXCHAIN_RESET(sc)do { (sc)->jme_cdata.jme_rxhead = ((void *)0); (sc)->jme_cdata
.jme_rxtail = ((void *)0); (sc)->jme_cdata.jme_rxlen = 0; }
while (0)
;
2044 jme_txeof(sc);
2045#endif
2046
2047 /*
2048 * Free partial finished RX segments
2049 */
2050 m_freem(sc->jme_cdata.jme_rxhead);
2051 JME_RXCHAIN_RESET(sc)do { (sc)->jme_cdata.jme_rxhead = ((void *)0); (sc)->jme_cdata
.jme_rxtail = ((void *)0); (sc)->jme_cdata.jme_rxlen = 0; }
while (0)
;
2052
2053 /*
2054 * Free RX and TX mbufs still in the queues.
2055 */
2056 for (i = 0; i < JME_RX_RING_CNT256; i++) {
2057 rxd = &sc->jme_cdata.jme_rxdesc[i];
2058 if (rxd->rx_m != NULL((void *)0)) {
2059 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (rxd
->rx_dmamap))
;
2060 m_freem(rxd->rx_m);
2061 rxd->rx_m = NULL((void *)0);
2062 }
2063 }
2064 for (i = 0; i < JME_TX_RING_CNT384; i++) {
2065 txd = &sc->jme_cdata.jme_txdesc[i];
2066 if (txd->tx_m != NULL((void *)0)) {
2067 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txd
->tx_dmamap))
;
2068 m_freem(txd->tx_m);
2069 txd->tx_m = NULL((void *)0);
2070 txd->tx_ndesc = 0;
2071 }
2072 }
2073}
2074
2075void
2076jme_stop_tx(struct jme_softc *sc)
2077{
2078 uint32_t reg;
2079 int i;
2080
2081 reg = CSR_READ_4(sc, JME_TXCSR)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0000
))))
;
2082 if ((reg & TXCSR_TX_ENB0x00000001) == 0)
2083 return;
2084 reg &= ~TXCSR_TX_ENB0x00000001;
2085 CSR_WRITE_4(sc, JME_TXCSR, reg)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0000
)), ((reg))))
;
2086 for (i = JME_TIMEOUT1000; i > 0; i--) {
2087 DELAY(1)(*delay_func)(1);
2088 if ((CSR_READ_4(sc, JME_TXCSR)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0000
))))
& TXCSR_TX_ENB0x00000001) == 0)
2089 break;
2090 }
2091 if (i == 0)
2092 printf("%s: stopping transmitter timeout!\n",
2093 sc->sc_dev.dv_xname);
2094}
2095
2096void
2097jme_stop_rx(struct jme_softc *sc)
2098{
2099 uint32_t reg;
2100 int i;
2101
2102 reg = CSR_READ_4(sc, JME_RXCSR)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0020
))))
;
2103 if ((reg & RXCSR_RX_ENB0x00000001) == 0)
2104 return;
2105 reg &= ~RXCSR_RX_ENB0x00000001;
2106 CSR_WRITE_4(sc, JME_RXCSR, reg)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0020
)), ((reg))))
;
2107 for (i = JME_TIMEOUT1000; i > 0; i--) {
2108 DELAY(1)(*delay_func)(1);
2109 if ((CSR_READ_4(sc, JME_RXCSR)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0020
))))
& RXCSR_RX_ENB0x00000001) == 0)
2110 break;
2111 }
2112 if (i == 0)
2113 printf("%s: stopping receiver timeout!\n", sc->sc_dev.dv_xname);
2114}
2115
2116void
2117jme_init_tx_ring(struct jme_softc *sc)
2118{
2119 struct jme_ring_data *rd;
2120 struct jme_txdesc *txd;
2121 int i;
2122
2123 sc->jme_cdata.jme_tx_prod = 0;
2124 sc->jme_cdata.jme_tx_cons = 0;
2125 sc->jme_cdata.jme_tx_cnt = 0;
2126
2127 rd = &sc->jme_rdata;
2128 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE)__builtin_bzero((rd->jme_tx_ring), ((sizeof(struct jme_desc
) * 384)))
;
2129 for (i = 0; i < JME_TX_RING_CNT384; i++) {
2130 txd = &sc->jme_cdata.jme_txdesc[i];
2131 txd->tx_m = NULL((void *)0);
2132 txd->tx_desc = &rd->jme_tx_ring[i];
2133 txd->tx_ndesc = 0;
2134 }
2135
2136 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
jme_cdata.jme_tx_ring_map), (0), (sc->jme_cdata.jme_tx_ring_map
->dm_mapsize), (0x04))
2137 sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
jme_cdata.jme_tx_ring_map), (0), (sc->jme_cdata.jme_tx_ring_map
->dm_mapsize), (0x04))
;
2138}
2139
2140void
2141jme_init_ssb(struct jme_softc *sc)
2142{
2143 struct jme_ring_data *rd;
2144
2145 rd = &sc->jme_rdata;
2146 bzero(rd->jme_ssb_block, JME_SSB_SIZE)__builtin_bzero((rd->jme_ssb_block), (sizeof(struct jme_ssb
)))
;
2147 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_ssb_map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
jme_cdata.jme_ssb_map), (0), (sc->jme_cdata.jme_ssb_map->
dm_mapsize), (0x04))
2148 sc->jme_cdata.jme_ssb_map->dm_mapsize, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
jme_cdata.jme_ssb_map), (0), (sc->jme_cdata.jme_ssb_map->
dm_mapsize), (0x04))
;
2149}
2150
2151int
2152jme_init_rx_ring(struct jme_softc *sc)
2153{
2154 struct jme_ring_data *rd;
2155 struct jme_rxdesc *rxd;
2156 int i;
2157
2158 KASSERT(sc->jme_cdata.jme_rxhead == NULL &&((sc->jme_cdata.jme_rxhead == ((void *)0) && sc->
jme_cdata.jme_rxtail == ((void *)0) && sc->jme_cdata
.jme_rxlen == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_jme.c"
, 2160, "sc->jme_cdata.jme_rxhead == NULL && sc->jme_cdata.jme_rxtail == NULL && sc->jme_cdata.jme_rxlen == 0"
))
2159 sc->jme_cdata.jme_rxtail == NULL &&((sc->jme_cdata.jme_rxhead == ((void *)0) && sc->
jme_cdata.jme_rxtail == ((void *)0) && sc->jme_cdata
.jme_rxlen == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_jme.c"
, 2160, "sc->jme_cdata.jme_rxhead == NULL && sc->jme_cdata.jme_rxtail == NULL && sc->jme_cdata.jme_rxlen == 0"
))
2160 sc->jme_cdata.jme_rxlen == 0)((sc->jme_cdata.jme_rxhead == ((void *)0) && sc->
jme_cdata.jme_rxtail == ((void *)0) && sc->jme_cdata
.jme_rxlen == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_jme.c"
, 2160, "sc->jme_cdata.jme_rxhead == NULL && sc->jme_cdata.jme_rxtail == NULL && sc->jme_cdata.jme_rxlen == 0"
))
;
2161 sc->jme_cdata.jme_rx_cons = 0;
2162
2163 rd = &sc->jme_rdata;
2164 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE)__builtin_bzero((rd->jme_rx_ring), ((sizeof(struct jme_desc
) * 256)))
;
2165 for (i = 0; i < JME_RX_RING_CNT256; i++) {
2166 int error;
2167
2168 rxd = &sc->jme_cdata.jme_rxdesc[i];
2169 rxd->rx_m = NULL((void *)0);
2170 rxd->rx_desc = &rd->jme_rx_ring[i];
2171 error = jme_newbuf(sc, rxd);
2172 if (error)
2173 return (error);
2174 }
2175
2176 bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
jme_cdata.jme_rx_ring_map), (0), (sc->jme_cdata.jme_rx_ring_map
->dm_mapsize), (0x04))
2177 sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
jme_cdata.jme_rx_ring_map), (0), (sc->jme_cdata.jme_rx_ring_map
->dm_mapsize), (0x04))
;
2178
2179 return (0);
2180}
2181
2182int
2183jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
2184{
2185 struct jme_desc *desc;
2186 struct mbuf *m;
2187 bus_dmamap_t map;
2188 int error;
2189
2190 MGETHDR(m, M_DONTWAIT, MT_DATA)m = m_gethdr((0x0002), (1));
2191 if (m == NULL((void *)0))
2192 return (ENOBUFS55);
2193 MCLGET(m, M_DONTWAIT)(void) m_clget((m), (0x0002), (1 << 11));
2194 if (!(m->m_flagsm_hdr.mh_flags & M_EXT0x0001)) {
2195 m_freem(m);
2196 return (ENOBUFS55);
2197 }
2198
2199 /*
2200 * JMC250 has 64bit boundary alignment limitation so jme(4)
2201 * takes advantage of 10 bytes padding feature of hardware
2202 * in order not to copy entire frame to align IP header on
2203 * 32bit boundary.
2204 */
2205 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = MCLBYTES(1 << 11);
2206
2207 error = bus_dmamap_load_mbuf(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
sc->jme_cdata.jme_rx_sparemap), (m), (0x0001))
2208 sc->jme_cdata.jme_rx_sparemap, m, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
sc->jme_cdata.jme_rx_sparemap), (m), (0x0001))
;
2209
2210 if (error != 0) {
2211 m_freem(m);
2212 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
2213 return (error);
2214 }
2215
2216 if (rxd->rx_m != NULL((void *)0)) {
2217 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxd->
rx_dmamap), (0), (rxd->rx_dmamap->dm_mapsize), (0x02))
2218 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxd->
rx_dmamap), (0), (rxd->rx_dmamap->dm_mapsize), (0x02))
;
2219 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (rxd
->rx_dmamap))
;
2220 }
2221 map = rxd->rx_dmamap;
2222 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
2223 sc->jme_cdata.jme_rx_sparemap = map;
2224 rxd->rx_m = m;
2225
2226 desc = rxd->rx_desc;
2227 desc->buflen = htole32(rxd->rx_dmamap->dm_segs[0].ds_len)((__uint32_t)(rxd->rx_dmamap->dm_segs[0].ds_len));
2228 desc->addr_lo =
2229 htole32(JME_ADDR_LO(rxd->rx_dmamap->dm_segs[0].ds_addr))((__uint32_t)(((uint64_t) (rxd->rx_dmamap->dm_segs[0].ds_addr
) & 0xFFFFFFFF)))
;
2230 desc->addr_hi =
2231 htole32(JME_ADDR_HI(rxd->rx_dmamap->dm_segs[0].ds_addr))((__uint32_t)(((uint64_t) (rxd->rx_dmamap->dm_segs[0].ds_addr
) >> 32)))
;
2232 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT)((__uint32_t)(0x80000000 | 0x40000000 | 0x20000000));
2233
2234 return (0);
2235}
2236
2237void
2238jme_set_vlan(struct jme_softc *sc)
2239{
2240 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2241 uint32_t reg;
2242
2243 reg = CSR_READ_4(sc, JME_RXMAC)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0034
))))
;
2244 reg &= ~RXMAC_VLAN_ENB0x00000004;
2245 if (ifp->if_capabilitiesif_data.ifi_capabilities & IFCAP_VLAN_HWTAGGING0x00000020)
2246 reg |= RXMAC_VLAN_ENB0x00000004;
2247 CSR_WRITE_4(sc, JME_RXMAC, reg)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0034
)), ((reg))))
;
2248}
2249
2250void
2251jme_iff(struct jme_softc *sc)
2252{
2253 struct arpcom *ac = &sc->sc_arpcom;
2254 struct ifnet *ifp = &ac->ac_if;
2255 struct ether_multi *enm;
2256 struct ether_multistep step;
2257 uint32_t crc;
2258 uint32_t mchash[2];
2259 uint32_t rxcfg;
2260
2261 rxcfg = CSR_READ_4(sc, JME_RXMAC)(((sc)->jme_mem_bt)->read_4(((sc)->jme_mem_bh), ((0x0034
))))
;
2262 rxcfg &= ~(RXMAC_BROADCAST0x00000400 | RXMAC_PROMISC0x00000800 | RXMAC_MULTICAST0x00000200 |
2263 RXMAC_ALLMULTI0x00000080);
2264 ifp->if_flags &= ~IFF_ALLMULTI0x200;
2265
2266 /*
2267 * Always accept frames destined to our station address.
2268 * Always accept broadcast frames.
2269 */
2270 rxcfg |= RXMAC_UNICAST0x00000100 | RXMAC_BROADCAST0x00000400;
2271
2272 if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0) {
2273 ifp->if_flags |= IFF_ALLMULTI0x200;
2274 if (ifp->if_flags & IFF_PROMISC0x100)
2275 rxcfg |= RXMAC_PROMISC0x00000800;
2276 else
2277 rxcfg |= RXMAC_ALLMULTI0x00000080;
2278 mchash[0] = mchash[1] = 0xFFFFFFFF;
2279 } else {
2280 /*
2281 * Set up the multicast address filter by passing all
2282 * multicast addresses through a CRC generator, and then
2283 * using the low-order 6 bits as an index into the 64 bit
2284 * multicast hash table. The high order bits select the
2285 * register, while the rest of the bits select the bit
2286 * within the register.
2287 */
2288 rxcfg |= RXMAC_MULTICAST0x00000200;
2289 bzero(mchash, sizeof(mchash))__builtin_bzero((mchash), (sizeof(mchash)));
2290
2291 ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first
); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step
)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while
( 0)
;
2292 while (enm != NULL((void *)0)) {
2293 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN6);
2294
2295 /* Just want the 6 least significant bits. */
2296 crc &= 0x3f;
2297
2298 /* Set the corresponding bit in the hash table. */
2299 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2300
2301 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
2302 }
2303 }
2304
2305 CSR_WRITE_4(sc, JME_MAR0, mchash[0])(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0040
)), ((mchash[0]))))
;
2306 CSR_WRITE_4(sc, JME_MAR1, mchash[1])(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0044
)), ((mchash[1]))))
;
2307 CSR_WRITE_4(sc, JME_RXMAC, rxcfg)(((sc)->jme_mem_bt)->write_4(((sc)->jme_mem_bh), ((0x0034
)), ((rxcfg))))
;
2308}