Bug Summary

File:dev/pci/if_xge.c
Warning:line 866, column 7
Although the value stored to 'val' is used in the enclosing expression, the value is never actually read from 'val'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name if_xge.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/if_xge.c
1/* $OpenBSD: if_xge.c,v 1.81 2022/01/09 05:42:56 jsg Exp $ */
2/* $NetBSD: if_xge.c,v 1.1 2005/09/09 10:30:27 ragge Exp $ */
3
4/*
5 * Copyright (c) 2004, SUNET, Swedish University Computer Network.
6 * All rights reserved.
7 *
8 * Written by Anders Magnusson for SUNET, Swedish University Computer Network.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed for the NetBSD Project by
21 * SUNET, Swedish University Computer Network.
22 * 4. The name of SUNET may not be used to endorse or promote products
23 * derived from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SUNET
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*
39 * Driver for the Neterion Xframe Ten Gigabit Ethernet controller.
40 */
41
42#include "bpfilter.h"
43#include "vlan.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/sockio.h>
48#include <sys/mbuf.h>
49#include <sys/malloc.h>
50#include <sys/kernel.h>
51#include <sys/socket.h>
52#include <sys/device.h>
53#include <sys/endian.h>
54
55#include <net/if.h>
56#include <net/if_media.h>
57
58#include <netinet/in.h>
59#include <netinet/if_ether.h>
60
61#if NBPFILTER1 > 0
62#include <net/bpf.h>
63#endif
64
65#include <machine/bus.h>
66#include <machine/intr.h>
67
68#include <dev/pci/pcivar.h>
69#include <dev/pci/pcireg.h>
70#include <dev/pci/pcidevs.h>
71
72#include <sys/lock.h>
73
74#include <dev/pci/if_xgereg.h>
75
76/* Xframe chipset revisions */
77#define XGE_TYPE_XENA1 1 /* Xframe */
78#define XGE_TYPE_HERC2 2 /* Xframe-II */
79
80#define XGE_PCISIZE_XENA26 26
81#define XGE_PCISIZE_HERC64 64
82
83/*
84 * Some tunable constants, tune with care!
85 */
86#define RX_MODE1 RX_MODE_11 /* Receive mode (buffer usage, see below) */
87#define NRXDESCS1016 1016 /* # of receive descriptors (requested) */
88#define NTXDESCS2048 2048 /* Number of transmit descriptors */
89#define NTXFRAGS100 100 /* Max fragments per packet */
90
91/*
92 * Receive buffer modes; 1, 3 or 5 buffers.
93 */
94#define RX_MODE_11 1
95#define RX_MODE_33 3
96#define RX_MODE_55 5
97
98/*
99 * Use clever macros to avoid a bunch of #ifdef's.
100 */
101#define XCONCAT3(x,y,z)xyz x ## y ## z
102#define CONCAT3(x,y,z)xyz XCONCAT3(x,y,z)xyz
103#define NDESC_BUFMODE127 CONCAT3(NDESC_,RX_MODE,BUFMODE)127
104#define rxd_4krxd1_4k CONCAT3(rxd,RX_MODE,_4k)rxd1_4k
105/* XXX */
106#if 0
107#define rxdescrxd1 ___CONCAT(rxd,RX_MODE1)
108#endif
109#define rxdescrxd1 rxd1
110
111#define NEXTTX(x)(((x)+1) % 2048) (((x)+1) % NTXDESCS2048)
112#define NRXFRAGS1 RX_MODE1 /* hardware imposed frags */
113#define NRXPAGES((1016/127)+1) ((NRXDESCS1016/NDESC_BUFMODE127)+1)
114#define NRXREAL(((1016/127)+1)*127) (NRXPAGES((1016/127)+1)*NDESC_BUFMODE127)
115#define RXMAPSZ(((1016/127)+1)*(1 << 12)) (NRXPAGES((1016/127)+1)*PAGE_SIZE(1 << 12))
116
117/*
118 * Magic to fix a bug when the MAC address cannot be read correctly.
119 * This came from the Linux driver.
120 */
121static const uint64_t xge_fix_mac[] = {
122 0x0060000000000000ULL, 0x0060600000000000ULL,
123 0x0040600000000000ULL, 0x0000600000000000ULL,
124 0x0020600000000000ULL, 0x0060600000000000ULL,
125 0x0020600000000000ULL, 0x0060600000000000ULL,
126 0x0020600000000000ULL, 0x0060600000000000ULL,
127 0x0020600000000000ULL, 0x0060600000000000ULL,
128 0x0020600000000000ULL, 0x0060600000000000ULL,
129 0x0020600000000000ULL, 0x0060600000000000ULL,
130 0x0020600000000000ULL, 0x0060600000000000ULL,
131 0x0020600000000000ULL, 0x0060600000000000ULL,
132 0x0020600000000000ULL, 0x0060600000000000ULL,
133 0x0020600000000000ULL, 0x0060600000000000ULL,
134 0x0020600000000000ULL, 0x0000600000000000ULL,
135 0x0040600000000000ULL, 0x0060600000000000ULL,
136};
137
138/*
139 * Constants to be programmed into Hercules's registers, to configure
140 * the XGXS transceiver.
141 */
142static const uint64_t xge_herc_dtx_cfg[] = {
143 0x8000051536750000ULL, 0x80000515367500E0ULL,
144 0x8000051536750004ULL, 0x80000515367500E4ULL,
145
146 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
147 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
148
149 0x801205150D440000ULL, 0x801205150D4400E0ULL,
150 0x801205150D440004ULL, 0x801205150D4400E4ULL,
151
152 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
153 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
154};
155
156static const uint64_t xge_xena_dtx_cfg[] = {
157 0x8000051500000000ULL, 0x80000515000000E0ULL,
158 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
159
160 0x8001051500000000ULL, 0x80010515000000E0ULL,
161 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
162
163 0x8002051500000000ULL, 0x80020515000000E0ULL,
164 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
165 };
166
167struct xge_softc {
168 struct device sc_dev;
169 struct arpcom sc_arpcom;
170 struct ifmedia xena_media;
171
172 void *sc_ih;
173
174 bus_dma_tag_t sc_dmat;
175 bus_space_tag_t sc_st;
176 bus_space_handle_t sc_sh;
177 bus_space_tag_t sc_txt;
178 bus_space_handle_t sc_txh;
179
180 pcireg_t sc_pciregs[16];
181
182 int xge_type; /* chip type */
183 int xge_if_flags;
184
185 /* Transmit structures */
186 struct txd *sc_txd[NTXDESCS2048]; /* transmit frags array */
187 bus_addr_t sc_txdp[NTXDESCS2048]; /* dva of transmit frags */
188 bus_dmamap_t sc_txm[NTXDESCS2048]; /* transmit frags map */
189 struct mbuf *sc_txb[NTXDESCS2048]; /* transmit mbuf pointer */
190 int sc_nexttx, sc_lasttx;
191 bus_dmamap_t sc_txmap; /* transmit descriptor map */
192
193 /* Receive data */
194 bus_dmamap_t sc_rxmap; /* receive descriptor map */
195 struct rxd_4krxd1_4k *sc_rxd_4k[NRXPAGES((1016/127)+1)]; /* receive desc pages */
196 bus_dmamap_t sc_rxm[NRXREAL(((1016/127)+1)*127)]; /* receive buffer map */
197 struct mbuf *sc_rxb[NRXREAL(((1016/127)+1)*127)]; /* mbufs on rx descriptors */
198 int sc_nextrx; /* next descriptor to check */
199};
200
201#ifdef XGE_DEBUG
202#define DPRINTF(x) do { if (xgedebug) printf x ; } while (0)
203#define DPRINTFN(n,x) do { if (xgedebug >= (n)) printf x ; } while (0)
204int xgedebug = 0;
205#else
206#define DPRINTF(x)
207#define DPRINTFN(n,x)
208#endif
209
210int xge_match(struct device *, void *, void *);
211void xge_attach(struct device *, struct device *, void *);
212int xge_alloc_txmem(struct xge_softc *);
213int xge_alloc_rxmem(struct xge_softc *);
214void xge_start(struct ifnet *);
215void xge_stop(struct ifnet *, int);
216int xge_add_rxbuf(struct xge_softc *, int);
217void xge_setmulti(struct xge_softc *);
218void xge_setpromisc(struct xge_softc *);
219int xge_setup_xgxs_xena(struct xge_softc *);
220int xge_setup_xgxs_herc(struct xge_softc *);
221int xge_ioctl(struct ifnet *, u_long, caddr_t);
222int xge_init(struct ifnet *);
223void xge_ifmedia_status(struct ifnet *, struct ifmediareq *);
224int xge_xgmii_mediachange(struct ifnet *);
225void xge_enable(struct xge_softc *);
226int xge_intr(void *);
227
228/*
229 * Helpers to address registers.
230 */
231#define PIF_WCSR(csr, val)pif_wcsr(sc, csr, val) pif_wcsr(sc, csr, val)
232#define PIF_RCSR(csr)pif_rcsr(sc, csr) pif_rcsr(sc, csr)
233#define TXP_WCSR(csr, val)txp_wcsr(sc, csr, val) txp_wcsr(sc, csr, val)
234#define PIF_WKEY(csr, val)pif_wkey(sc, csr, val) pif_wkey(sc, csr, val)
235
236static inline void
237pif_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val)
238{
239#if defined(__LP64__1)
240 bus_space_write_raw_8(sc->sc_st, sc->sc_sh, csr, val)((sc->sc_st)->write_8((sc->sc_sh), (csr), (val)));
241#else
242 uint32_t lval, hval;
243
244 lval = val&0xffffffff;
245 hval = val>>32;
246
247#if BYTE_ORDER1234 == LITTLE_ENDIAN1234
248 bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr, lval)((sc->sc_st)->write_4((sc->sc_sh), (csr), (lval)));
249 bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr+4, hval)((sc->sc_st)->write_4((sc->sc_sh), (csr+4), (hval)));
250#else
251 bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr+4, lval)((sc->sc_st)->write_4((sc->sc_sh), (csr+4), (lval)));
252 bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr, hval)((sc->sc_st)->write_4((sc->sc_sh), (csr), (hval)));
253#endif
254#endif
255}
256
257static inline uint64_t
258pif_rcsr(struct xge_softc *sc, bus_size_t csr)
259{
260 uint64_t val;
261#if defined(__LP64__1)
262 val = bus_space_read_raw_8(sc->sc_st, sc->sc_sh, csr)((sc->sc_st)->read_8((sc->sc_sh), (csr)));
263#else
264 uint64_t val2;
265
266 val = bus_space_read_raw_4(sc->sc_st, sc->sc_sh, csr)((sc->sc_st)->read_4((sc->sc_sh), (csr)));
267 val2 = bus_space_read_raw_4(sc->sc_st, sc->sc_sh, csr+4)((sc->sc_st)->read_4((sc->sc_sh), (csr+4)));
268#if BYTE_ORDER1234 == LITTLE_ENDIAN1234
269 val |= (val2 << 32);
270#else
271 val = (val << 32 | val2);
272#endif
273#endif
274 return (val);
275}
276
277static inline void
278txp_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val)
279{
280#if defined(__LP64__1)
281 bus_space_write_raw_8(sc->sc_txt, sc->sc_txh, csr, val)((sc->sc_txt)->write_8((sc->sc_txh), (csr), (val)));
282#else
283 uint32_t lval, hval;
284
285 lval = val&0xffffffff;
286 hval = val>>32;
287
288#if BYTE_ORDER1234 == LITTLE_ENDIAN1234
289 bus_space_write_raw_4(sc->sc_txt, sc->sc_txh, csr, lval)((sc->sc_txt)->write_4((sc->sc_txh), (csr), (lval)));
290 bus_space_write_raw_4(sc->sc_txt, sc->sc_txh, csr+4, hval)((sc->sc_txt)->write_4((sc->sc_txh), (csr+4), (hval)
))
;
291#else
292 bus_space_write_raw_4(sc->sc_txt, sc->sc_txh, csr, hval)((sc->sc_txt)->write_4((sc->sc_txh), (csr), (hval)));
293 bus_space_write_raw_4(sc->sc_txt, sc->sc_txh, csr+4, lval)((sc->sc_txt)->write_4((sc->sc_txh), (csr+4), (lval)
))
;
294#endif
295#endif
296}
297
298static inline void
299pif_wkey(struct xge_softc *sc, bus_size_t csr, uint64_t val)
300{
301#if defined(__LP64__1)
302 if (sc->xge_type == XGE_TYPE_XENA1)
303 PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE)pif_wcsr(sc, (0x2000+(0x0120)), (0x4c0dULL<<48));
304
305 bus_space_write_raw_8(sc->sc_st, sc->sc_sh, csr, val)((sc->sc_st)->write_8((sc->sc_sh), (csr), (val)));
306#else
307 uint32_t lval, hval;
308
309 lval = val&0xffffffff;
310 hval = val>>32;
311
312 if (sc->xge_type == XGE_TYPE_XENA1)
313 PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE)pif_wcsr(sc, (0x2000+(0x0120)), (0x4c0dULL<<48));
314
315#if BYTE_ORDER1234 == LITTLE_ENDIAN1234
316 bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr, lval)((sc->sc_st)->write_4((sc->sc_sh), (csr), (lval)));
317#else
318 bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr, hval)((sc->sc_st)->write_4((sc->sc_sh), (csr), (hval)));
319#endif
320
321 if (sc->xge_type == XGE_TYPE_XENA1)
322 PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE)pif_wcsr(sc, (0x2000+(0x0120)), (0x4c0dULL<<48));
323#if BYTE_ORDER1234 == LITTLE_ENDIAN1234
324 bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr+4, hval)((sc->sc_st)->write_4((sc->sc_sh), (csr+4), (hval)));
325#else
326 bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr+4, lval)((sc->sc_st)->write_4((sc->sc_sh), (csr+4), (lval)));
327#endif
328#endif
329}
330
331struct cfattach xge_ca = {
332 sizeof(struct xge_softc), xge_match, xge_attach
333};
334
335struct cfdriver xge_cd = {
336 NULL((void *)0), "xge", DV_IFNET
337};
338
339#define XNAMEsc->sc_dev.dv_xname sc->sc_dev.dv_xname
340
341#define XGE_RXSYNC(desc, what)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rxmap), ((desc/127) * 4096 + sizeof(struct rxd1) * (desc%127
)), (sizeof(struct rxd1)), (what))
\
342 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap, \(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rxmap), ((desc/127) * 4096 + sizeof(struct rxd1) * (desc%127
)), (sizeof(struct rxd1)), (what))
343 (desc/NDESC_BUFMODE) * XGE_PAGE + sizeof(struct rxdesc) * \(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rxmap), ((desc/127) * 4096 + sizeof(struct rxd1) * (desc%127
)), (sizeof(struct rxd1)), (what))
344 (desc%NDESC_BUFMODE), sizeof(struct rxdesc), what)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rxmap), ((desc/127) * 4096 + sizeof(struct rxd1) * (desc%127
)), (sizeof(struct rxd1)), (what))
345#define XGE_RXD(desc)&sc->sc_rxd_4k[desc/127]-> r4_rxd[desc%127] &sc->sc_rxd_4k[desc/NDESC_BUFMODE127]-> \
346 r4_rxd[desc%NDESC_BUFMODE127]
347
348/*
349 * Non-tunable constants.
350 */
351#define XGE_MAX_FRAMELEN9622 9622
352#define XGE_MAX_MTU(9622 - ((6 * 2) + 2) - 4 - 4) (XGE_MAX_FRAMELEN9622 - ETHER_HDR_LEN((6 * 2) + 2) - \
353 ETHER_CRC_LEN4 - ETHER_VLAN_ENCAP_LEN4)
354
355const struct pci_matchid xge_devices[] = {
356 { PCI_VENDOR_NETERION0x17d5, PCI_PRODUCT_NETERION_XFRAME0x5831 },
357 { PCI_VENDOR_NETERION0x17d5, PCI_PRODUCT_NETERION_XFRAME_20x5832 }
358};
359
360int
361xge_match(struct device *parent, void *match, void *aux)
362{
363 return (pci_matchbyid((struct pci_attach_args *)aux, xge_devices,
364 nitems(xge_devices)(sizeof((xge_devices)) / sizeof((xge_devices)[0]))));
365}
366
367void
368xge_attach(struct device *parent, struct device *self, void *aux)
369{
370 struct pci_attach_args *pa = aux;
371 struct xge_softc *sc;
372 struct ifnet *ifp;
373 pcireg_t memtype;
374 pci_intr_handle_t ih;
375 const char *intrstr = NULL((void *)0);
376 pci_chipset_tag_t pc = pa->pa_pc;
377 uint8_t enaddr[ETHER_ADDR_LEN6];
378 uint64_t val;
379 int i;
380
381 sc = (struct xge_softc *)self;
382
383 sc->sc_dmat = pa->pa_dmat;
384
385 if (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_NETERION_XFRAME0x5831)
386 sc->xge_type = XGE_TYPE_XENA1;
387 else
388 sc->xge_type = XGE_TYPE_HERC2;
389
390 /* Get BAR0 address */
391 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_PIF_BAR0x10);
392 if (pci_mapreg_map(pa, XGE_PIF_BAR0x10, memtype, 0,
393 &sc->sc_st, &sc->sc_sh, 0, 0, 0)) {
394 printf(": unable to map PIF BAR registers\n");
395 return;
396 }
397
398 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_TXP_BAR0x18);
399 if (pci_mapreg_map(pa, XGE_TXP_BAR0x18, memtype, 0,
400 &sc->sc_txt, &sc->sc_txh, 0, 0, 0)) {
401 printf(": unable to map TXP BAR registers\n");
402 return;
403 }
404
405 if (sc->xge_type == XGE_TYPE_XENA1) {
406 /* Save PCI config space */
407 for (i = 0; i < XGE_PCISIZE_XENA26; i += 4)
408 sc->sc_pciregs[i/4] = pci_conf_read(pa->pa_pc, pa->pa_tag, i);
409 }
410
411#if BYTE_ORDER1234 == LITTLE_ENDIAN1234
412 val = (uint64_t)0xFFFFFFFFFFFFFFFFULL;
413 val &= ~(TxF_R_SE(1ULL<<40)|RxF_W_SE(1ULL<<26));
414 PIF_WCSR(SWAPPER_CTRL, val)pif_wcsr(sc, (0x0800+(0x108)), val);
415 PIF_WCSR(SWAPPER_CTRL, val)pif_wcsr(sc, (0x0800+(0x108)), val);
416#endif
417 if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)pif_rcsr(sc, (0x0800+(0x110)))) != SWAPPER_MAGIC0x0123456789abcdefULL) {
418 printf(": failed configuring endian (read), %llx != %llx!\n",
419 (unsigned long long)val, SWAPPER_MAGIC0x0123456789abcdefULL);
420 }
421
422 PIF_WCSR(XMSI_ADDRESS, SWAPPER_MAGIC)pif_wcsr(sc, (0x0800+(0x160)), 0x0123456789abcdefULL);
423 if ((val = PIF_RCSR(XMSI_ADDRESS)pif_rcsr(sc, (0x0800+(0x160)))) != SWAPPER_MAGIC0x0123456789abcdefULL) {
424 printf(": failed configuring endian (write), %llx != %llx!\n",
425 (unsigned long long)val, SWAPPER_MAGIC0x0123456789abcdefULL);
426 }
427
428 /*
429 * Fix for all "FFs" MAC address problems observed on
430 * Alpha platforms. Not needed for Herc.
431 */
432 if (sc->xge_type == XGE_TYPE_XENA1) {
433 /*
434 * The MAC addr may be all FF's, which is not good.
435 * Resolve it by writing some magics to GPIO_CONTROL and
436 * force a chip reset to read in the serial eeprom again.
437 */
438 for (i = 0; i < nitems(xge_fix_mac)(sizeof((xge_fix_mac)) / sizeof((xge_fix_mac)[0])); i++) {
439 PIF_WCSR(GPIO_CONTROL, xge_fix_mac[i])pif_wcsr(sc, (0x0800+(0x1f8)), xge_fix_mac[i]);
440 PIF_RCSR(GPIO_CONTROL)pif_rcsr(sc, (0x0800+(0x1f8)));
441 }
442
443 /*
444 * Reset the chip and restore the PCI registers.
445 */
446 PIF_WCSR(SW_RESET, 0xa5a5a50000000000ULL)pif_wcsr(sc, (0x0000+(0x0100)), 0xa5a5a50000000000ULL);
447 DELAY(500000)(*delay_func)(500000);
448 for (i = 0; i < XGE_PCISIZE_XENA26; i += 4)
449 pci_conf_write(pa->pa_pc, pa->pa_tag, i, sc->sc_pciregs[i/4]);
450
451 /*
452 * Restore the byte order registers.
453 */
454#if BYTE_ORDER1234 == LITTLE_ENDIAN1234
455 val = (uint64_t)0xFFFFFFFFFFFFFFFFULL;
456 val &= ~(TxF_R_SE(1ULL<<40)|RxF_W_SE(1ULL<<26));
457 PIF_WCSR(SWAPPER_CTRL, val)pif_wcsr(sc, (0x0800+(0x108)), val);
458 PIF_WCSR(SWAPPER_CTRL, val)pif_wcsr(sc, (0x0800+(0x108)), val);
459#endif
460
461 if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)pif_rcsr(sc, (0x0800+(0x110)))) != SWAPPER_MAGIC0x0123456789abcdefULL) {
462 printf(": failed configuring endian2 (read), %llx != %llx!\n",
463 (unsigned long long)val, SWAPPER_MAGIC0x0123456789abcdefULL);
464 return;
465 }
466
467 PIF_WCSR(XMSI_ADDRESS, SWAPPER_MAGIC)pif_wcsr(sc, (0x0800+(0x160)), 0x0123456789abcdefULL);
468 if ((val = PIF_RCSR(XMSI_ADDRESS)pif_rcsr(sc, (0x0800+(0x160)))) != SWAPPER_MAGIC0x0123456789abcdefULL) {
469 printf(": failed configuring endian2 (write), %llx != %llx!\n",
470 (unsigned long long)val, SWAPPER_MAGIC0x0123456789abcdefULL);
471 return;
472 }
473 }
474
475 /*
476 * XGXS initialization.
477 */
478
479 /*
480 * For Herc, bring EOI out of reset before XGXS.
481 */
482 if (sc->xge_type == XGE_TYPE_HERC2) {
483 val = PIF_RCSR(SW_RESET)pif_rcsr(sc, (0x0000+(0x0100)));
484 val &= 0xffff00ffffffffffULL;
485 PIF_WCSR(SW_RESET,val)pif_wcsr(sc, (0x0000+(0x0100)), val);
486 delay(1000*1000)(*delay_func)(1000*1000); /* wait for 1 sec */
487 }
488
489 /* 29, Bring adapter out of reset */
490 val = PIF_RCSR(SW_RESET)pif_rcsr(sc, (0x0000+(0x0100)));
491 val &= 0xffffff00ffffffffULL;
492 PIF_WCSR(SW_RESET, val)pif_wcsr(sc, (0x0000+(0x0100)), val);
493 DELAY(500000)(*delay_func)(500000);
494
495 /* Ensure that it's safe to access registers by checking
496 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
497 */
498 if (sc->xge_type == XGE_TYPE_HERC2){
499 for (i = 0; i < 50; i++) {
500 val = PIF_RCSR(ADAPTER_STATUS)pif_rcsr(sc, (0x0000+(0x0108)));
501 if (!(val & RIC_RUNNING(1ULL<<37)))
502 break;
503 delay(20*1000)(*delay_func)(20*1000);
504 }
505
506 if (i == 50) {
507 printf(": not safe to access registers\n");
508 return;
509 }
510 }
511
512 /* 30, configure XGXS transceiver */
513 if (sc->xge_type == XGE_TYPE_XENA1)
514 xge_setup_xgxs_xena(sc);
515 else if(sc->xge_type == XGE_TYPE_HERC2)
516 xge_setup_xgxs_herc(sc);
517
518 /* 33, program MAC address (not needed here) */
519 /* Get ethernet address */
520 PIF_WCSR(RMAC_ADDR_CMD_MEM,pif_wcsr(sc, (0x2000+(0x0128)), (1ULL<<48)|((uint64_t)(
0) << 32))
521 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(0))pif_wcsr(sc, (0x2000+(0x0128)), (1ULL<<48)|((uint64_t)(
0) << 32))
;
522 while (PIF_RCSR(RMAC_ADDR_CMD_MEM)pif_rcsr(sc, (0x2000+(0x0128))) & RMAC_ADDR_CMD_MEM_STR(1ULL<<48))
523 ;
524 val = PIF_RCSR(RMAC_ADDR_DATA0_MEM)pif_rcsr(sc, (0x2000+(0x0130)));
525 for (i = 0; i < ETHER_ADDR_LEN6; i++)
526 enaddr[i] = (uint8_t)(val >> (56 - (8*i)));
527
528 /*
529 * Get memory for transmit descriptor lists.
530 */
531 if (xge_alloc_txmem(sc)) {
532 printf(": failed allocating txmem.\n");
533 return;
534 }
535
536 /* 9 and 10 - set FIFO number/prio */
537 PIF_WCSR(TX_FIFO_P0, TX_FIFO_LEN0(NTXDESCS))pif_wcsr(sc, (0x1000+(0x0108)), ((uint64_t)((2048)-1) <<
32))
;
538 PIF_WCSR(TX_FIFO_P1, 0ULL)pif_wcsr(sc, (0x1000+(0x0110)), 0ULL);
539 PIF_WCSR(TX_FIFO_P2, 0ULL)pif_wcsr(sc, (0x1000+(0x0118)), 0ULL);
540 PIF_WCSR(TX_FIFO_P3, 0ULL)pif_wcsr(sc, (0x1000+(0x0120)), 0ULL);
541
542 /* 11, XXX set round-robin prio? */
543
544 /* 12, enable transmit FIFO */
545 val = PIF_RCSR(TX_FIFO_P0)pif_rcsr(sc, (0x1000+(0x0108)));
546 val |= TX_FIFO_ENABLE(1ULL<<63);
547 PIF_WCSR(TX_FIFO_P0, val)pif_wcsr(sc, (0x1000+(0x0108)), val);
548
549 /* 13, disable some error checks */
550 PIF_WCSR(TX_PA_CFG,pif_wcsr(sc, (0x1000+(0x0168)), (1ULL<<62)|(1ULL<<
61)|(1ULL<<60)|(1ULL<<57))
551 TX_PA_CFG_IFR|TX_PA_CFG_ISO|TX_PA_CFG_ILC|TX_PA_CFG_ILE)pif_wcsr(sc, (0x1000+(0x0168)), (1ULL<<62)|(1ULL<<
61)|(1ULL<<60)|(1ULL<<57))
;
552
553 /* Create transmit DMA maps */
554 for (i = 0; i < NTXDESCS2048; i++) {
555 if (bus_dmamap_create(sc->sc_dmat, XGE_MAX_FRAMELEN,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9622
), (100), (9622), (0), (0x0001), (&sc->sc_txm[i]))
556 NTXFRAGS, XGE_MAX_FRAMELEN, 0, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9622
), (100), (9622), (0), (0x0001), (&sc->sc_txm[i]))
557 &sc->sc_txm[i])(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9622
), (100), (9622), (0), (0x0001), (&sc->sc_txm[i]))
) {
558 printf(": cannot create TX DMA maps\n");
559 return;
560 }
561 }
562
563 sc->sc_lasttx = NTXDESCS2048-1;
564
565 /*
566 * RxDMA initialization.
567 * Only use one out of 8 possible receive queues.
568 */
569 /* allocate rx descriptor memory */
570 if (xge_alloc_rxmem(sc)) {
571 printf(": failed allocating rxmem\n");
572 return;
573 }
574
575 /* Create receive buffer DMA maps */
576 for (i = 0; i < NRXREAL(((1016/127)+1)*127); i++) {
577 if (bus_dmamap_create(sc->sc_dmat, XGE_MAX_FRAMELEN,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9622
), (1), (9622), (0), (0x0001), (&sc->sc_rxm[i]))
578 NRXFRAGS, XGE_MAX_FRAMELEN, 0, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9622
), (1), (9622), (0), (0x0001), (&sc->sc_rxm[i]))
579 &sc->sc_rxm[i])(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9622
), (1), (9622), (0), (0x0001), (&sc->sc_rxm[i]))
) {
580 printf(": cannot create RX DMA maps\n");
581 return;
582 }
583 }
584
585 /* allocate mbufs to receive descriptors */
586 for (i = 0; i < NRXREAL(((1016/127)+1)*127); i++)
587 if (xge_add_rxbuf(sc, i))
588 panic("out of mbufs too early");
589
590 /* 14, setup receive ring priority */
591 PIF_WCSR(RX_QUEUE_PRIORITY, 0ULL)pif_wcsr(sc, (0x1800+(0x100)), 0ULL); /* only use one ring */
592
593 /* 15, setup receive ring round-robin calendar */
594 PIF_WCSR(RX_W_ROUND_ROBIN_0, 0ULL)pif_wcsr(sc, (0x1800+(0x108)), 0ULL); /* only use one ring */
595 PIF_WCSR(RX_W_ROUND_ROBIN_1, 0ULL)pif_wcsr(sc, (0x1800+(0x110)), 0ULL);
596 PIF_WCSR(RX_W_ROUND_ROBIN_2, 0ULL)pif_wcsr(sc, (0x1800+(0x118)), 0ULL);
597 PIF_WCSR(RX_W_ROUND_ROBIN_3, 0ULL)pif_wcsr(sc, (0x1800+(0x120)), 0ULL);
598 PIF_WCSR(RX_W_ROUND_ROBIN_4, 0ULL)pif_wcsr(sc, (0x1800+(0x128)), 0ULL);
599
600 /* 16, write receive ring start address */
601 PIF_WCSR(PRC_RXD0_0, (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr)pif_wcsr(sc, (0x1800+(0x130)), (uint64_t)sc->sc_rxmap->
dm_segs[0].ds_addr)
;
602 /* PRC_RXD0_[1-7] are not used */
603
604 /* 17, Setup alarm registers */
605 PIF_WCSR(PRC_ALARM_ACTION, 0ULL)pif_wcsr(sc, (0x1800+(0x1b0)), 0ULL); /* Default everything to retry */
606
607 /* 18, init receive ring controller */
608#if RX_MODE1 == RX_MODE_11
609 val = RING_MODE_1(0ULL << 48);
610#elif RX_MODE1 == RX_MODE_33
611 val = RING_MODE_3(1ULL << 48);
612#else /* RX_MODE == RX_MODE_5 */
613 val = RING_MODE_5(2ULL << 48);
614#endif
615 PIF_WCSR(PRC_CTRL_0, RC_IN_SVC|val)pif_wcsr(sc, (0x1800+(0x170)), (1ULL << 56)|val);
616 /* leave 1-7 disabled */
617 /* XXXX snoop configuration? */
618
619 /* 19, set chip memory assigned to the queue */
620 if (sc->xge_type == XGE_TYPE_XENA1) {
621 /* all 64M to queue 0 */
622 PIF_WCSR(RX_QUEUE_CFG, MC_QUEUE(0, 64))pif_wcsr(sc, (0x2800+(0x100)), ((uint64_t)(64)<<(56-(0*
8))))
;
623 } else {
624 /* all 32M to queue 0 */
625 PIF_WCSR(RX_QUEUE_CFG, MC_QUEUE(0, 32))pif_wcsr(sc, (0x2800+(0x100)), ((uint64_t)(32)<<(56-(0*
8))))
;
626 }
627
628 /* 20, setup RLDRAM parameters */
629 /* do not touch it for now */
630
631 /* 21, setup pause frame thresholds */
632 /* so not touch the defaults */
633 /* XXX - must 0xff be written as stated in the manual? */
634
635 /* 22, configure RED */
636 /* we do not want to drop packets, so ignore */
637
638 /* 23, initiate RLDRAM */
639 val = PIF_RCSR(MC_RLDRAM_MRS)pif_rcsr(sc, (0x2800+(0x108)));
640 val |= MC_QUEUE_SIZE_ENABLE(1ULL<<24)|MC_RLDRAM_MRS_ENABLE(1ULL<<16);
641 PIF_WCSR(MC_RLDRAM_MRS, val)pif_wcsr(sc, (0x2800+(0x108)), val);
642 DELAY(1000)(*delay_func)(1000);
643
644 /*
645 * Setup interrupt policies.
646 */
647 /* 40, Transmit interrupts */
648 PIF_WCSR(TTI_DATA1_MEM, TX_TIMER_VAL(0x1ff) | TX_TIMER_AC |pif_wcsr(sc, (0x1000+(0x158)), ((uint64_t)(0x1ff) << 32
) | (1ULL<<25) | ((uint64_t)(5) << 16) | ((uint64_t
)(20) << 8) | ((uint64_t)(48) << 0))
649 TX_URNG_A(5) | TX_URNG_B(20) | TX_URNG_C(48))pif_wcsr(sc, (0x1000+(0x158)), ((uint64_t)(0x1ff) << 32
) | (1ULL<<25) | ((uint64_t)(5) << 16) | ((uint64_t
)(20) << 8) | ((uint64_t)(48) << 0))
;
650 PIF_WCSR(TTI_DATA2_MEM,pif_wcsr(sc, (0x1000+(0x160)), ((uint64_t)(25) << 48) |
((uint64_t)(64) << 32) | ((uint64_t)(128) << 16)
| ((uint64_t)(512) << 0))
651 TX_UFC_A(25) | TX_UFC_B(64) | TX_UFC_C(128) | TX_UFC_D(512))pif_wcsr(sc, (0x1000+(0x160)), ((uint64_t)(25) << 48) |
((uint64_t)(64) << 32) | ((uint64_t)(128) << 16)
| ((uint64_t)(512) << 0))
;
652 PIF_WCSR(TTI_COMMAND_MEM, TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE)pif_wcsr(sc, (0x1000+(0x150)), (1ULL<<56) | (1ULL<<
48))
;
653 while (PIF_RCSR(TTI_COMMAND_MEM)pif_rcsr(sc, (0x1000+(0x150))) & TTI_CMD_MEM_STROBE(1ULL<<48))
654 ;
655
656 /* 41, Receive interrupts */
657 PIF_WCSR(RTI_DATA1_MEM, RX_TIMER_VAL(0x800) | RX_TIMER_AC |pif_wcsr(sc, (0x1800+(0x1c0)), ((uint64_t)(0x800) << 32
) | (1ULL << 25) | ((uint64_t)(5) << 16) | ((uint64_t
)(20) << 8) | ((uint64_t)(50) << 0))
658 RX_URNG_A(5) | RX_URNG_B(20) | RX_URNG_C(50))pif_wcsr(sc, (0x1800+(0x1c0)), ((uint64_t)(0x800) << 32
) | (1ULL << 25) | ((uint64_t)(5) << 16) | ((uint64_t
)(20) << 8) | ((uint64_t)(50) << 0))
;
659 PIF_WCSR(RTI_DATA2_MEM,pif_wcsr(sc, (0x1800+(0x1c8)), ((uint64_t)(64) << 48) |
((uint64_t)(128) << 32) | ((uint64_t)(256) << 16
) | ((uint64_t)(512) << 0))
660 RX_UFC_A(64) | RX_UFC_B(128) | RX_UFC_C(256) | RX_UFC_D(512))pif_wcsr(sc, (0x1800+(0x1c8)), ((uint64_t)(64) << 48) |
((uint64_t)(128) << 32) | ((uint64_t)(256) << 16
) | ((uint64_t)(512) << 0))
;
661 PIF_WCSR(RTI_COMMAND_MEM, RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE)pif_wcsr(sc, (0x1800+(0x1b8)), (1ULL << 56) | (1ULL <<
48))
;
662 while (PIF_RCSR(RTI_COMMAND_MEM)pif_rcsr(sc, (0x1800+(0x1b8))) & RTI_CMD_MEM_STROBE(1ULL << 48))
663 ;
664
665 /*
666 * Setup media stuff.
667 */
668 ifmedia_init(&sc->xena_media, IFM_IMASK0xff00000000000000ULL, xge_xgmii_mediachange,
669 xge_ifmedia_status);
670 ifmedia_add(&sc->xena_media, IFM_ETHER0x0000000000000100ULL|IFM_10G_SR19, 0, NULL((void *)0));
671 ifmedia_set(&sc->xena_media, IFM_ETHER0x0000000000000100ULL|IFM_10G_SR19);
672
673 ifp = &sc->sc_arpcom.ac_if;
674 strlcpy(ifp->if_xname, XNAMEsc->sc_dev.dv_xname, IFNAMSIZ16);
675 memcpy(sc->sc_arpcom.ac_enaddr, enaddr, ETHER_ADDR_LEN)__builtin_memcpy((sc->sc_arpcom.ac_enaddr), (enaddr), (6));
676 ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL)));
677 ifp->if_softc = sc;
678 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
679 ifp->if_ioctl = xge_ioctl;
680 ifp->if_start = xge_start;
681 ifp->if_hardmtu = XGE_MAX_MTU(9622 - ((6 * 2) + 2) - 4 - 4);
682 ifq_set_maxlen(&ifp->if_snd, NTXDESCS - 1)((&ifp->if_snd)->ifq_maxlen = (2048 - 1));
683
684 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010 | IFCAP_CSUM_IPv40x00000001 |
685 IFCAP_CSUM_TCPv40x00000002 | IFCAP_CSUM_UDPv40x00000004;
686
687#if NVLAN1 > 0
688 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020;
689#endif
690
691 /*
692 * Attach the interface.
693 */
694 if_attach(ifp);
695 ether_ifattach(ifp);
696
697 /*
698 * Setup interrupt vector before initializing.
699 */
700 if (pci_intr_map(pa, &ih)) {
701 printf(": unable to map interrupt\n");
702 return;
703 }
704 intrstr = pci_intr_string(pc, ih);
705 if ((sc->sc_ih =
706 pci_intr_establish(pc, ih, IPL_NET0x7, xge_intr, sc, XNAMEsc->sc_dev.dv_xname)) == NULL((void *)0)) {
707 printf(": unable to establish interrupt at %s\n",
708 intrstr ? intrstr : "<unknown>");
709 return;
710 }
711 printf(": %s, address %s\n", intrstr, ether_sprintf(enaddr));
712}
713
714void
715xge_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
716{
717 struct xge_softc *sc = ifp->if_softc;
718 uint64_t reg;
719
720 ifmr->ifm_status = IFM_AVALID0x0000000000000001ULL;
721 ifmr->ifm_active = IFM_ETHER0x0000000000000100ULL|IFM_10G_SR19;
722
723 reg = PIF_RCSR(ADAPTER_STATUS)pif_rcsr(sc, (0x0000+(0x0108)));
724 if ((reg & (RMAC_REMOTE_FAULT(1ULL<<57)|RMAC_LOCAL_FAULT(1ULL<<56))) == 0)
725 ifmr->ifm_status |= IFM_ACTIVE0x0000000000000002ULL;
726}
727
728int
729xge_xgmii_mediachange(struct ifnet *ifp)
730{
731 return (0);
732}
733
734void
735xge_enable(struct xge_softc *sc)
736{
737 uint64_t val;
738
739 /* 2, enable adapter */
740 val = PIF_RCSR(ADAPTER_CONTROL)pif_rcsr(sc, (0x0000+(0x0110)));
741 val |= ADAPTER_EN(1ULL<<56);
742 PIF_WCSR(ADAPTER_CONTROL, val)pif_wcsr(sc, (0x0000+(0x0110)), val);
743
744 /* 3, light the card enable led */
745 val = PIF_RCSR(ADAPTER_CONTROL)pif_rcsr(sc, (0x0000+(0x0110)));
746 val |= LED_ON(1ULL<<40);
747 PIF_WCSR(ADAPTER_CONTROL, val)pif_wcsr(sc, (0x0000+(0x0110)), val);
748#ifdef XGE_DEBUG
749 printf("%s: link up\n", XNAMEsc->sc_dev.dv_xname);
750#endif
751}
752
753int
754xge_init(struct ifnet *ifp)
755{
756 struct xge_softc *sc = ifp->if_softc;
757 uint64_t val;
758 int s;
759
760 s = splnet()splraise(0x7);
761
762 /*
763 * Cancel any pending I/O
764 */
765 xge_stop(ifp, 0);
766
767 /* 31+32, setup MAC config */
768 PIF_WKEY(MAC_CFG, TMAC_EN|RMAC_EN|TMAC_APPEND_PAD|RMAC_STRIP_FCS|pif_wkey(sc, (0x2000+(0x0100)), (1ULL<<63)|(1ULL<<
62)|(1ULL<<59)|(1ULL<<58)| (1ULL<<54)|(1ULL
<<55))
769 RMAC_BCAST_EN|RMAC_DISCARD_PFRM)pif_wkey(sc, (0x2000+(0x0100)), (1ULL<<63)|(1ULL<<
62)|(1ULL<<59)|(1ULL<<58)| (1ULL<<54)|(1ULL
<<55))
;
770
771 DELAY(1000)(*delay_func)(1000);
772
773 /* 54, ensure that the adapter is 'quiescent' */
774 val = PIF_RCSR(ADAPTER_STATUS)pif_rcsr(sc, (0x0000+(0x0108)));
775 if ((val & QUIESCENT((1ULL<<63)|(1ULL<<62)|(1ULL<<61)|(1ULL<<
60)| (1ULL<<58)|(1ULL<<39)|(1ULL<<38)|(1ULL
<<33)|(1ULL<<32))
) != QUIESCENT((1ULL<<63)|(1ULL<<62)|(1ULL<<61)|(1ULL<<
60)| (1ULL<<58)|(1ULL<<39)|(1ULL<<38)|(1ULL
<<33)|(1ULL<<32))
) {
776#if 0
777 char buf[200];
778#endif
779 printf("%s: adapter not quiescent, aborting\n", XNAMEsc->sc_dev.dv_xname);
780 val = (val & QUIESCENT((1ULL<<63)|(1ULL<<62)|(1ULL<<61)|(1ULL<<
60)| (1ULL<<58)|(1ULL<<39)|(1ULL<<38)|(1ULL
<<33)|(1ULL<<32))
) ^ QUIESCENT((1ULL<<63)|(1ULL<<62)|(1ULL<<61)|(1ULL<<
60)| (1ULL<<58)|(1ULL<<39)|(1ULL<<38)|(1ULL
<<33)|(1ULL<<32))
;
781#if 0
782 bitmask_snprintf(val, QUIESCENT_BMSK"\177\20b\x3fTDMA_READY\0b\x3eRDMA_READY\0b\x3dPFC_READY\0" "b\x3cTMAC_BUF_EMPTY\0b\x3aPIC_QUIESCENT\0\x39RMAC_REMOTE_FAULT\0"
"b\x38RMAC_LOCAL_FAULT\0b\x27MC_DRAM_READY\0b\x26MC_QUEUES_READY\0"
"b\x21M_PLL_LOCK\0b\x20P_PLL_LOCK"
, buf, sizeof buf);
783 printf("%s: ADAPTER_STATUS missing bits %s\n", XNAMEsc->sc_dev.dv_xname, buf);
784#endif
785 splx(s)spllower(s);
786 return (1);
787 }
788
789 if (!(ifp->if_capabilitiesif_data.ifi_capabilities & IFCAP_VLAN_HWTAGGING0x00000020)) {
790 /* disable VLAN tag stripping */
791 val = PIF_RCSR(RX_PA_CFG)pif_rcsr(sc, (0x1800+(0x1d0)));
792 val &= ~STRIP_VLAN_TAG(1ULL << 48);
793 PIF_WCSR(RX_PA_CFG, val)pif_wcsr(sc, (0x1800+(0x1d0)), val);
794 }
795
796 /* set MRU */
797 PIF_WCSR(RMAC_MAX_PYLD_LEN, RMAC_PYLD_LEN(XGE_MAX_FRAMELEN))pif_wcsr(sc, (0x2000+(0x0110)), ((uint64_t)(9622) << 48
))
;
798
799 /* 56, enable the transmit laser */
800 val = PIF_RCSR(ADAPTER_CONTROL)pif_rcsr(sc, (0x0000+(0x0110)));
801 val |= EOI_TX_ON(1ULL<<48);
802 PIF_WCSR(ADAPTER_CONTROL, val)pif_wcsr(sc, (0x0000+(0x0110)), val);
803
804 xge_enable(sc);
805
806 /*
807 * Enable all interrupts
808 */
809 PIF_WCSR(TX_TRAFFIC_MASK, 0)pif_wcsr(sc, (0x0800+(0x0e8)), 0);
810 PIF_WCSR(RX_TRAFFIC_MASK, 0)pif_wcsr(sc, (0x0800+(0x0f8)), 0);
811 PIF_WCSR(TXPIC_INT_MASK, 0)pif_wcsr(sc, (0x0800+(0x018)), 0);
812 PIF_WCSR(RXPIC_INT_MASK, 0)pif_wcsr(sc, (0x0800+(0x030)), 0);
813
814 PIF_WCSR(MAC_INT_MASK, MAC_TMAC_INT)pif_wcsr(sc, (0x2000+(0x008)), (1ULL<<63)); /* only from RMAC */
815 PIF_WCSR(MAC_RMAC_ERR_REG, RMAC_LINK_STATE_CHANGE_INT)pif_wcsr(sc, (0x2000+(0x028)), (1ULL<<32));
816 PIF_WCSR(MAC_RMAC_ERR_MASK, ~RMAC_LINK_STATE_CHANGE_INT)pif_wcsr(sc, (0x2000+(0x030)), ~(1ULL<<32));
817 PIF_WCSR(GENERAL_INT_MASK, 0)pif_wcsr(sc, (0x0000+(0x0008)), 0);
818
819 xge_setpromisc(sc);
820
821 xge_setmulti(sc);
822
823 /* Done... */
824 ifp->if_flags |= IFF_RUNNING0x40;
825 ifq_clr_oactive(&ifp->if_snd);
826
827 splx(s)spllower(s);
828
829 return (0);
830}
831
832void
833xge_stop(struct ifnet *ifp, int disable)
834{
835 struct xge_softc *sc = ifp->if_softc;
836 uint64_t val;
837
838 ifp->if_flags &= ~IFF_RUNNING0x40;
839 ifq_clr_oactive(&ifp->if_snd);
840
841 val = PIF_RCSR(ADAPTER_CONTROL)pif_rcsr(sc, (0x0000+(0x0110)));
842 val &= ~ADAPTER_EN(1ULL<<56);
843 PIF_WCSR(ADAPTER_CONTROL, val)pif_wcsr(sc, (0x0000+(0x0110)), val);
844
845 while ((PIF_RCSR(ADAPTER_STATUS)pif_rcsr(sc, (0x0000+(0x0108))) & QUIESCENT((1ULL<<63)|(1ULL<<62)|(1ULL<<61)|(1ULL<<
60)| (1ULL<<58)|(1ULL<<39)|(1ULL<<38)|(1ULL
<<33)|(1ULL<<32))
) != QUIESCENT((1ULL<<63)|(1ULL<<62)|(1ULL<<61)|(1ULL<<
60)| (1ULL<<58)|(1ULL<<39)|(1ULL<<38)|(1ULL
<<33)|(1ULL<<32))
)
846 ;
847}
848
849int
850xge_intr(void *pv)
851{
852 struct xge_softc *sc = pv;
853 struct txd *txd;
854 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
855 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
856 bus_dmamap_t dmp;
857 uint64_t val;
858 int i, lasttx, plen;
859
860 val = PIF_RCSR(GENERAL_INT_STATUS)pif_rcsr(sc, (0x0000+(0x0000)));
861 if (val == 0)
862 return (0); /* no interrupt here */
863
864 PIF_WCSR(GENERAL_INT_STATUS, val)pif_wcsr(sc, (0x0000+(0x0000)), val);
865
866 if ((val = PIF_RCSR(MAC_RMAC_ERR_REG)pif_rcsr(sc, (0x2000+(0x028)))) & RMAC_LINK_STATE_CHANGE_INT(1ULL<<32)) {
Although the value stored to 'val' is used in the enclosing expression, the value is never actually read from 'val'
867 /* Wait for quiescence */
868#ifdef XGE_DEBUG
869 printf("%s: link down\n", XNAMEsc->sc_dev.dv_xname);
870#endif
871 while ((PIF_RCSR(ADAPTER_STATUS)pif_rcsr(sc, (0x0000+(0x0108))) & QUIESCENT((1ULL<<63)|(1ULL<<62)|(1ULL<<61)|(1ULL<<
60)| (1ULL<<58)|(1ULL<<39)|(1ULL<<38)|(1ULL
<<33)|(1ULL<<32))
) != QUIESCENT((1ULL<<63)|(1ULL<<62)|(1ULL<<61)|(1ULL<<
60)| (1ULL<<58)|(1ULL<<39)|(1ULL<<38)|(1ULL
<<33)|(1ULL<<32))
)
872 ;
873 PIF_WCSR(MAC_RMAC_ERR_REG, RMAC_LINK_STATE_CHANGE_INT)pif_wcsr(sc, (0x2000+(0x028)), (1ULL<<32));
874
875 val = PIF_RCSR(ADAPTER_STATUS)pif_rcsr(sc, (0x0000+(0x0108)));
876 if ((val & (RMAC_REMOTE_FAULT(1ULL<<57)|RMAC_LOCAL_FAULT(1ULL<<56))) == 0)
877 xge_enable(sc); /* Only if link restored */
878 }
879
880 if ((val = PIF_RCSR(TX_TRAFFIC_INT)pif_rcsr(sc, (0x0800+(0x0e0)))))
881 PIF_WCSR(TX_TRAFFIC_INT, val)pif_wcsr(sc, (0x0800+(0x0e0)), val); /* clear interrupt bits */
882 /*
883 * Collect sent packets.
884 */
885 lasttx = sc->sc_lasttx;
886 while ((i = NEXTTX(sc->sc_lasttx)(((sc->sc_lasttx)+1) % 2048)) != sc->sc_nexttx) {
887 txd = sc->sc_txd[i];
888 dmp = sc->sc_txm[i];
889
890 bus_dmamap_sync(sc->sc_dmat, dmp, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmp),
(0), (dmp->dm_mapsize), (0x02|0x08))
891 dmp->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmp),
(0), (dmp->dm_mapsize), (0x02|0x08))
892 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmp),
(0), (dmp->dm_mapsize), (0x02|0x08))
;
893
894 if (txd->txd_control1 & TXD_CTL1_OWN(1ULL << 56)) {
895 bus_dmamap_sync(sc->sc_dmat, dmp, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmp),
(0), (dmp->dm_mapsize), (0x01))
896 dmp->dm_mapsize, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmp),
(0), (dmp->dm_mapsize), (0x01))
;
897 break;
898 }
899 bus_dmamap_unload(sc->sc_dmat, dmp)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (dmp
))
;
900 m_freem(sc->sc_txb[i]);
901 sc->sc_lasttx = i;
902 }
903
904 if (sc->sc_lasttx != lasttx)
905 ifq_clr_oactive(&ifp->if_snd);
906
907 /* Try to get more packets on the wire */
908 xge_start(ifp);
909
910 /* clear interrupt bits */
911 if ((val = PIF_RCSR(RX_TRAFFIC_INT)pif_rcsr(sc, (0x0800+(0x0f0)))))
912 PIF_WCSR(RX_TRAFFIC_INT, val)pif_wcsr(sc, (0x0800+(0x0f0)), val);
913
914 for (;;) {
915 struct rxdescrxd1 *rxd;
916 struct mbuf *m;
917
918 XGE_RXSYNC(sc->sc_nextrx,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rxmap), ((sc->sc_nextrx/127) * 4096 + sizeof(struct rxd1
) * (sc->sc_nextrx%127)), (sizeof(struct rxd1)), (0x02|0x08
))
919 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rxmap), ((sc->sc_nextrx/127) * 4096 + sizeof(struct rxd1
) * (sc->sc_nextrx%127)), (sizeof(struct rxd1)), (0x02|0x08
))
;
920
921 rxd = XGE_RXD(sc->sc_nextrx)&sc->sc_rxd_4k[sc->sc_nextrx/127]-> r4_rxd[sc->
sc_nextrx%127]
;
922 if (rxd->rxd_control1 & RXD_CTL1_OWN(1ULL << 56)) {
923 XGE_RXSYNC(sc->sc_nextrx, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rxmap), ((sc->sc_nextrx/127) * 4096 + sizeof(struct rxd1
) * (sc->sc_nextrx%127)), (sizeof(struct rxd1)), (0x01))
;
924 break;
925 }
926
927 /* got a packet */
928 m = sc->sc_rxb[sc->sc_nextrx];
929#if RX_MODE1 == RX_MODE_11
930 plen = m->m_lenm_hdr.mh_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2)(((rxd->rxd_control2) >> 48) & 0xffff);
931#elif RX_MODE1 == RX_MODE_33
932#error Fix rxmodes in xge_intr
933#elif RX_MODE1 == RX_MODE_55
934 plen = m->m_lenm_hdr.mh_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2)(((rxd->rxd_control2) >> 48) & 0xffff);
935 plen += m->m_nextm_hdr.mh_next->m_lenm_hdr.mh_len = RXD_CTL2_BUF1SIZ(rxd->rxd_control2)(((rxd->rxd_control2) >> 32) & 0xffff);
936 plen += m->m_nextm_hdr.mh_next->m_nextm_hdr.mh_next->m_lenm_hdr.mh_len =
937 RXD_CTL2_BUF2SIZ(rxd->rxd_control2)(((rxd->rxd_control2) >> 16) & 0xffff);
938 plen += m->m_nextm_hdr.mh_next->m_nextm_hdr.mh_next->m_nextm_hdr.mh_next->m_lenm_hdr.mh_len =
939 RXD_CTL3_BUF3SIZ(rxd->rxd_control3)(((rxd->rxd_control3) >> 16) & 0xffff);
940 plen += m->m_nextm_hdr.mh_next->m_nextm_hdr.mh_next->m_nextm_hdr.mh_next->m_nextm_hdr.mh_next->m_lenm_hdr.mh_len =
941 RXD_CTL3_BUF4SIZ(rxd->rxd_control3)((rxd->rxd_control3) & 0xffff);
942#endif
943 m->m_pkthdrM_dat.MH.MH_pkthdr.len = plen;
944
945 val = rxd->rxd_control1;
946
947 if (xge_add_rxbuf(sc, sc->sc_nextrx)) {
948 /* Failed, recycle this mbuf */
949#if RX_MODE1 == RX_MODE_11
950 rxd->rxd_control2 = RXD_MKCTL2(MCLBYTES, 0, 0)(((uint64_t)((1 << 11)) << 48) | ((uint64_t)(0) <<
32) | ((uint64_t)(0) << 16))
;
951 rxd->rxd_control1 = RXD_CTL1_OWN(1ULL << 56);
952#elif RX_MODE1 == RX_MODE_33
953#elif RX_MODE1 == RX_MODE_55
954#endif
955 XGE_RXSYNC(sc->sc_nextrx,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rxmap), ((sc->sc_nextrx/127) * 4096 + sizeof(struct rxd1
) * (sc->sc_nextrx%127)), (sizeof(struct rxd1)), (0x01|0x04
))
956 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rxmap), ((sc->sc_nextrx/127) * 4096 + sizeof(struct rxd1
) * (sc->sc_nextrx%127)), (sizeof(struct rxd1)), (0x01|0x04
))
;
957 ifp->if_ierrorsif_data.ifi_ierrors++;
958 break;
959 }
960
961 if (RXD_CTL1_PROTOS(val)(((val) >> 32) & 0xff) & RXD_CTL1_P_IPv40x10)
962 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK0x0008;
963 if (RXD_CTL1_PROTOS(val)(((val) >> 32) & 0xff) & RXD_CTL1_P_TCP0x02)
964 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK0x0020;
965 if (RXD_CTL1_PROTOS(val)(((val) >> 32) & 0xff) & RXD_CTL1_P_UDP0x01)
966 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK0x0080;
967
968#if NVLAN1 > 0
969 if (RXD_CTL1_PROTOS(val)(((val) >> 32) & 0xff) & RXD_CTL1_P_VLAN0x80) {
970 m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag =
971 RXD_CTL2_VLANTAG(rxd->rxd_control2)((rxd->rxd_control2) & 0xffff);
972 m->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020;
973 }
974#endif
975
976 ml_enqueue(&ml, m);
977
978 if (++sc->sc_nextrx == NRXREAL(((1016/127)+1)*127))
979 sc->sc_nextrx = 0;
980 }
981
982 if_input(ifp, &ml);
983
984 return (1);
985}
986
987int
988xge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
989{
990 struct xge_softc *sc = ifp->if_softc;
991 struct ifreq *ifr = (struct ifreq *) data;
992 int s, error = 0;
993
994 s = splnet()splraise(0x7);
995
996 switch (cmd) {
997 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
998 ifp->if_flags |= IFF_UP0x1;
999 if (!(ifp->if_flags & IFF_RUNNING0x40))
1000 xge_init(ifp);
1001 break;
1002
1003 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
1004 if (ifp->if_flags & IFF_UP0x1) {
1005 if (ifp->if_flags & IFF_RUNNING0x40 &&
1006 (ifp->if_flags ^ sc->xge_if_flags) &
1007 IFF_PROMISC0x100) {
1008 xge_setpromisc(sc);
1009 } else {
1010 if (!(ifp->if_flags & IFF_RUNNING0x40))
1011 xge_init(ifp);
1012 }
1013 } else {
1014 if (ifp->if_flags & IFF_RUNNING0x40)
1015 xge_stop(ifp, 1);
1016 }
1017 sc->xge_if_flags = ifp->if_flags;
1018 break;
1019
1020 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
1021 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
1022 error = ifmedia_ioctl(ifp, ifr, &sc->xena_media, cmd);
1023 break;
1024
1025 default:
1026 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1027 }
1028
1029 if (error == ENETRESET52) {
1030 if (ifp->if_flags & IFF_RUNNING0x40)
1031 xge_setmulti(sc);
1032 error = 0;
1033 }
1034
1035 splx(s)spllower(s);
1036 return (error);
1037}
1038
1039void
1040xge_setmulti(struct xge_softc *sc)
1041{
1042 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1043 struct arpcom *ac = &sc->sc_arpcom;
1044 struct ether_multi *enm;
1045 struct ether_multistep step;
1046 int i, numaddr = 1; /* first slot used for card unicast address */
1047 uint64_t val;
1048
1049 if (ac->ac_multirangecnt > 0)
1050 goto allmulti;
1051
1052 ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first
); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step
)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while
( 0)
;
1053 while (enm != NULL((void *)0)) {
1054 if (numaddr == MAX_MCAST_ADDR64)
1055 goto allmulti;
1056 for (val = 0, i = 0; i < ETHER_ADDR_LEN6; i++) {
1057 val <<= 8;
1058 val |= enm->enm_addrlo[i];
1059 }
1060 PIF_WCSR(RMAC_ADDR_DATA0_MEM, val << 16)pif_wcsr(sc, (0x2000+(0x0130)), val << 16);
1061 PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL)pif_wcsr(sc, (0x2000+(0x0138)), 0xFFFFFFFFFFFFFFFFULL);
1062 PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|pif_wcsr(sc, (0x2000+(0x0128)), (1ULL<<56)| (1ULL<<
48)|((uint64_t)(numaddr) << 32))
1063 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(numaddr))pif_wcsr(sc, (0x2000+(0x0128)), (1ULL<<56)| (1ULL<<
48)|((uint64_t)(numaddr) << 32))
;
1064 while (PIF_RCSR(RMAC_ADDR_CMD_MEM)pif_rcsr(sc, (0x2000+(0x0128))) & RMAC_ADDR_CMD_MEM_STR(1ULL<<48))
1065 ;
1066 numaddr++;
1067 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
1068 }
1069 /* set the remaining entries to the broadcast address */
1070 for (i = numaddr; i < MAX_MCAST_ADDR64; i++) {
1071 PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0xffffffffffff0000ULL)pif_wcsr(sc, (0x2000+(0x0130)), 0xffffffffffff0000ULL);
1072 PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL)pif_wcsr(sc, (0x2000+(0x0138)), 0xFFFFFFFFFFFFFFFFULL);
1073 PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|pif_wcsr(sc, (0x2000+(0x0128)), (1ULL<<56)| (1ULL<<
48)|((uint64_t)(i) << 32))
1074 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(i))pif_wcsr(sc, (0x2000+(0x0128)), (1ULL<<56)| (1ULL<<
48)|((uint64_t)(i) << 32))
;
1075 while (PIF_RCSR(RMAC_ADDR_CMD_MEM)pif_rcsr(sc, (0x2000+(0x0128))) & RMAC_ADDR_CMD_MEM_STR(1ULL<<48))
1076 ;
1077 }
1078 ifp->if_flags &= ~IFF_ALLMULTI0x200;
1079 return;
1080
1081allmulti:
1082 /* Just receive everything with the multicast bit set */
1083 ifp->if_flags |= IFF_ALLMULTI0x200;
1084 PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0x8000000000000000ULL)pif_wcsr(sc, (0x2000+(0x0130)), 0x8000000000000000ULL);
1085 PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xF000000000000000ULL)pif_wcsr(sc, (0x2000+(0x0138)), 0xF000000000000000ULL);
1086 PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|pif_wcsr(sc, (0x2000+(0x0128)), (1ULL<<56)| (1ULL<<
48)|((uint64_t)(1) << 32))
1087 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(1))pif_wcsr(sc, (0x2000+(0x0128)), (1ULL<<56)| (1ULL<<
48)|((uint64_t)(1) << 32))
;
1088 while (PIF_RCSR(RMAC_ADDR_CMD_MEM)pif_rcsr(sc, (0x2000+(0x0128))) & RMAC_ADDR_CMD_MEM_STR(1ULL<<48))
1089 ;
1090}
1091
1092void
1093xge_setpromisc(struct xge_softc *sc)
1094{
1095 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1096 uint64_t val;
1097
1098 val = PIF_RCSR(MAC_CFG)pif_rcsr(sc, (0x2000+(0x0100)));
1099
1100 if (ifp->if_flags & IFF_PROMISC0x100)
1101 val |= RMAC_PROM_EN(1ULL<<56);
1102 else
1103 val &= ~RMAC_PROM_EN(1ULL<<56);
1104
1105 PIF_WCSR(MAC_CFG, val)pif_wcsr(sc, (0x2000+(0x0100)), val);
1106}
1107
1108void
1109xge_start(struct ifnet *ifp)
1110{
1111 struct xge_softc *sc = ifp->if_softc;
1112 struct txd *txd = NULL((void *)0); /* XXX - gcc */
1113 bus_dmamap_t dmp;
1114 struct mbuf *m;
1115 uint64_t par, lcr;
1116 int nexttx = 0, ntxd, i;
1117
1118 if (!(ifp->if_flags & IFF_RUNNING0x40) || ifq_is_oactive(&ifp->if_snd))
1119 return;
1120
1121 par = lcr = 0;
1122 for (;;) {
1123 if (sc->sc_nexttx == sc->sc_lasttx) {
1124 ifq_set_oactive(&ifp->if_snd);
1125 break; /* No more space */
1126 }
1127
1128 m = ifq_dequeue(&ifp->if_snd);
1129 if (m == NULL((void *)0))
1130 break; /* out of packets */
1131
1132 nexttx = sc->sc_nexttx;
1133 dmp = sc->sc_txm[nexttx];
1134
1135 switch (bus_dmamap_load_mbuf(sc->sc_dmat, dmp, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
dmp), (m), (0x0400|0x0001))
1136 BUS_DMA_WRITE|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
dmp), (m), (0x0400|0x0001))
) {
1137 case 0:
1138 break;
1139 case EFBIG27:
1140 if (m_defrag(m, M_DONTWAIT0x0002) == 0 &&
1141 bus_dmamap_load_mbuf(sc->sc_dmat, dmp, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
dmp), (m), (0x0400|0x0001))
1142 BUS_DMA_WRITE|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
dmp), (m), (0x0400|0x0001))
== 0)
1143 break;
1144 default:
1145 m_freem(m);
1146 continue;
1147 }
1148
1149 bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmp),
(0), (dmp->dm_mapsize), (0x04))
1150 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmp),
(0), (dmp->dm_mapsize), (0x04))
;
1151
1152 txd = sc->sc_txd[nexttx];
1153 sc->sc_txb[nexttx] = m;
1154 for (i = 0; i < dmp->dm_nsegs; i++) {
1155 if (dmp->dm_segs[i].ds_len == 0)
1156 continue;
1157 txd->txd_control1 = dmp->dm_segs[i].ds_len;
1158 txd->txd_control2 = 0;
1159 txd->txd_bufaddr = dmp->dm_segs[i].ds_addr;
1160 txd++;
1161 }
1162 ntxd = txd - sc->sc_txd[nexttx] - 1;
1163 txd = sc->sc_txd[nexttx];
1164 txd->txd_control1 |= TXD_CTL1_OWN(1ULL << 56)|TXD_CTL1_GCF(1ULL << 41);
1165 txd->txd_control2 = TXD_CTL2_UTIL(1ULL << 17);
1166
1167#if NVLAN1 > 0
1168 if (m->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020) {
1169 txd->txd_control2 |= TXD_CTL2_VLANE(1ULL << 48);
1170 txd->txd_control2 |=
1171 TXD_CTL2_VLANT(m->m_pkthdr.ether_vtag)((uint64_t)(m->M_dat.MH.MH_pkthdr.ether_vtag) << 32);
1172 }
1173#endif
1174
1175 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_IPV4_CSUM_OUT0x0001)
1176 txd->txd_control2 |= TXD_CTL2_CIPv4(1ULL << 58);
1177 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_TCP_CSUM_OUT0x0002)
1178 txd->txd_control2 |= TXD_CTL2_CTCP(1ULL << 57);
1179 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_UDP_CSUM_OUT0x0004)
1180 txd->txd_control2 |= TXD_CTL2_CUDP(1ULL << 56);
1181
1182 txd[ntxd].txd_control1 |= TXD_CTL1_GCL(1ULL << 40);
1183
1184 bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmp),
(0), (dmp->dm_mapsize), (0x01|0x04))
1185 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmp),
(0), (dmp->dm_mapsize), (0x01|0x04))
;
1186
1187 par = sc->sc_txdp[nexttx];
1188 lcr = TXDL_NUMTXD(ntxd)((uint64_t)(ntxd) << 56) | TXDL_LGC_FIRST(1ULL << 49) | TXDL_LGC_LAST(1ULL << 48);
1189 TXP_WCSR(TXDL_PAR, par)txp_wcsr(sc, 0, par);
1190 TXP_WCSR(TXDL_LCR, lcr)txp_wcsr(sc, 8, lcr);
1191
1192#if NBPFILTER1 > 0
1193 if (ifp->if_bpf)
1194 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
1195#endif /* NBPFILTER > 0 */
1196
1197 sc->sc_nexttx = NEXTTX(nexttx)(((nexttx)+1) % 2048);
1198 }
1199}
1200
1201/*
1202 * Allocate DMA memory for transmit descriptor fragments.
1203 * Only one map is used for all descriptors.
1204 */
1205int
1206xge_alloc_txmem(struct xge_softc *sc)
1207{
1208 struct txd *txp;
1209 bus_dma_segment_t seg;
1210 bus_addr_t txdp;
1211 caddr_t kva;
1212 int i, rseg, state;
1213
1214#define TXMAPSZ(2048*100*sizeof(struct txd)) (NTXDESCS2048*NTXFRAGS100*sizeof(struct txd))
1215 state = 0;
1216 if (bus_dmamem_alloc(sc->sc_dmat, TXMAPSZ, PAGE_SIZE, 0,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((2048
*100*sizeof(struct txd))), ((1 << 12)), (0), (&seg)
, (1), (&rseg), (0x0001))
1217 &seg, 1, &rseg, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((2048
*100*sizeof(struct txd))), ((1 << 12)), (0), (&seg)
, (1), (&rseg), (0x0001))
)
1218 goto err;
1219 state++;
1220 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, TXMAPSZ, &kva,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&seg
), (rseg), ((2048*100*sizeof(struct txd))), (&kva), (0x0001
))
1221 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&seg
), (rseg), ((2048*100*sizeof(struct txd))), (&kva), (0x0001
))
)
1222 goto err;
1223
1224 state++;
1225 if (bus_dmamap_create(sc->sc_dmat, TXMAPSZ, 1, TXMAPSZ, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((2048
*100*sizeof(struct txd))), (1), ((2048*100*sizeof(struct txd)
)), (0), (0x0001), (&sc->sc_txmap))
1226 BUS_DMA_NOWAIT, &sc->sc_txmap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((2048
*100*sizeof(struct txd))), (1), ((2048*100*sizeof(struct txd)
)), (0), (0x0001), (&sc->sc_txmap))
)
1227 goto err;
1228 state++;
1229 if (bus_dmamap_load(sc->sc_dmat, sc->sc_txmap,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_txmap), (kva), ((2048*100*sizeof(struct txd))), (((void *)
0)), (0x0001))
1230 kva, TXMAPSZ, NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_txmap), (kva), ((2048*100*sizeof(struct txd))), (((void *)
0)), (0x0001))
)
1231 goto err;
1232
1233 /* setup transmit array pointers */
1234 txp = (struct txd *)kva;
1235 txdp = seg.ds_addr;
1236 for (i = 0; i < NTXDESCS2048; i++) {
1237 sc->sc_txd[i] = txp;
1238 sc->sc_txdp[i] = txdp;
1239 txp += NTXFRAGS100;
1240 txdp += (NTXFRAGS100 * sizeof(struct txd));
1241 }
1242
1243 return (0);
1244
1245err:
1246 if (state > 2)
1247 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_txmap))
;
1248 if (state > 1)
1249 bus_dmamem_unmap(sc->sc_dmat, kva, TXMAPSZ)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (kva)
, ((2048*100*sizeof(struct txd))))
;
1250 if (state > 0)
1251 bus_dmamem_free(sc->sc_dmat, &seg, rseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
seg), (rseg))
;
1252 return (ENOBUFS55);
1253}
1254
1255/*
1256 * Allocate DMA memory for receive descriptor,
1257 * only one map is used for all descriptors.
1258 * link receive descriptor pages together.
1259 */
1260int
1261xge_alloc_rxmem(struct xge_softc *sc)
1262{
1263 struct rxd_4krxd1_4k *rxpp;
1264 bus_dma_segment_t seg;
1265 caddr_t kva;
1266 int i, rseg, state;
1267
1268 /* sanity check */
1269 if (sizeof(struct rxd_4krxd1_4k) != XGE_PAGE4096) {
1270 printf("bad compiler struct alignment, %d != %d\n",
1271 (int)sizeof(struct rxd_4krxd1_4k), XGE_PAGE4096);
1272 return (EINVAL22);
1273 }
1274
1275 state = 0;
1276 if (bus_dmamem_alloc(sc->sc_dmat, RXMAPSZ, PAGE_SIZE, 0,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((((1016
/127)+1)*(1 << 12))), ((1 << 12)), (0), (&seg
), (1), (&rseg), (0x0001))
1277 &seg, 1, &rseg, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((((1016
/127)+1)*(1 << 12))), ((1 << 12)), (0), (&seg
), (1), (&rseg), (0x0001))
)
1278 goto err;
1279 state++;
1280 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, RXMAPSZ, &kva,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&seg
), (rseg), ((((1016/127)+1)*(1 << 12))), (&kva), (0x0001
))
1281 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&seg
), (rseg), ((((1016/127)+1)*(1 << 12))), (&kva), (0x0001
))
)
1282 goto err;
1283
1284 state++;
1285 if (bus_dmamap_create(sc->sc_dmat, RXMAPSZ, 1, RXMAPSZ, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((((
1016/127)+1)*(1 << 12))), (1), ((((1016/127)+1)*(1 <<
12))), (0), (0x0001), (&sc->sc_rxmap))
1286 BUS_DMA_NOWAIT, &sc->sc_rxmap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((((
1016/127)+1)*(1 << 12))), (1), ((((1016/127)+1)*(1 <<
12))), (0), (0x0001), (&sc->sc_rxmap))
)
1287 goto err;
1288 state++;
1289 if (bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_rxmap), (kva), ((((1016/127)+1)*(1 << 12))), (((void
*)0)), (0x0001))
1290 kva, RXMAPSZ, NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_rxmap), (kva), ((((1016/127)+1)*(1 << 12))), (((void
*)0)), (0x0001))
)
1291 goto err;
1292
1293 /* setup receive page link pointers */
1294 for (rxpp = (struct rxd_4krxd1_4k *)kva, i = 0; i < NRXPAGES((1016/127)+1); i++, rxpp++) {
1295 sc->sc_rxd_4k[i] = rxpp;
1296 rxpp->r4_next = (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr +
1297 (i*sizeof(struct rxd_4krxd1_4k)) + sizeof(struct rxd_4krxd1_4k);
1298 }
1299 sc->sc_rxd_4k[NRXPAGES((1016/127)+1)-1]->r4_next =
1300 (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr;
1301
1302 return (0);
1303
1304err:
1305 if (state > 2)
1306 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_rxmap))
;
1307 if (state > 1)
1308 bus_dmamem_unmap(sc->sc_dmat, kva, RXMAPSZ)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (kva)
, ((((1016/127)+1)*(1 << 12))))
;
1309 if (state > 0)
1310 bus_dmamem_free(sc->sc_dmat, &seg, rseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
seg), (rseg))
;
1311 return (ENOBUFS55);
1312}
1313
1314
1315/*
1316 * Add a new mbuf chain to descriptor id.
1317 */
1318int
1319xge_add_rxbuf(struct xge_softc *sc, int id)
1320{
1321 struct rxdescrxd1 *rxd;
1322 struct mbuf *m[5];
1323 int page, desc, error;
1324#if RX_MODE1 == RX_MODE_55
1325 int i;
1326#endif
1327
1328 page = id/NDESC_BUFMODE127;
1329 desc = id%NDESC_BUFMODE127;
1330
1331 rxd = &sc->sc_rxd_4k[page]->r4_rxd[desc];
1332
1333 /*
1334 * Allocate mbufs.
1335 * Currently five mbufs and two clusters are used,
1336 * the hardware will put (ethernet, ip, tcp/udp) headers in
1337 * their own buffer and the clusters are only used for data.
1338 */
1339#if RX_MODE1 == RX_MODE_11
1340 MGETHDR(m[0], M_DONTWAIT, MT_DATA)m[0] = m_gethdr((0x0002), (1));
1341 if (m[0] == NULL((void *)0))
1342 return (ENOBUFS55);
1343 MCLGETL(m[0], M_DONTWAIT, XGE_MAX_FRAMELEN + ETHER_ALIGN)m_clget((m[0]), (0x0002), (9622 + 2));
1344 if ((m[0]->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0) {
1345 m_freem(m[0]);
1346 return (ENOBUFS55);
1347 }
1348 m[0]->m_lenm_hdr.mh_len = m[0]->m_pkthdrM_dat.MH.MH_pkthdr.len = XGE_MAX_FRAMELEN9622 + ETHER_ALIGN2;
1349#elif RX_MODE1 == RX_MODE_33
1350#error missing rxmode 3.
1351#elif RX_MODE1 == RX_MODE_55
1352 MGETHDR(m[0], M_DONTWAIT, MT_DATA)m[0] = m_gethdr((0x0002), (1));
1353 for (i = 1; i < 5; i++) {
1354 MGET(m[i], M_DONTWAIT, MT_DATA)m[i] = m_get((0x0002), (1));
1355 }
1356 if (m[3])
1357 MCLGET(m[3], M_DONTWAIT)(void) m_clget((m[3]), (0x0002), (1 << 11));
1358 if (m[4])
1359 MCLGET(m[4], M_DONTWAIT)(void) m_clget((m[4]), (0x0002), (1 << 11));
1360 if (!m[0] || !m[1] || !m[2] || !m[3] || !m[4] ||
1361 ((m[3]->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0) || ((m[4]->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0)) {
1362 /* Out of something */
1363 for (i = 0; i < 5; i++)
1364 m_free(m[i]);
1365 return (ENOBUFS55);
1366 }
1367 /* Link'em together */
1368 m[0]->m_nextm_hdr.mh_next = m[1];
1369 m[1]->m_nextm_hdr.mh_next = m[2];
1370 m[2]->m_nextm_hdr.mh_next = m[3];
1371 m[3]->m_nextm_hdr.mh_next = m[4];
1372#else
1373#error bad mode RX_MODE1
1374#endif
1375
1376 if (sc->sc_rxb[id])
1377 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxm[id])(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
sc_rxm[id]))
;
1378 sc->sc_rxb[id] = m[0];
1379
1380 m_adj(m[0], ETHER_ALIGN2);
1381
1382 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rxm[id], m[0],(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
sc->sc_rxm[id]), (m[0]), (0x0200|0x0001))
1383 BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
sc->sc_rxm[id]), (m[0]), (0x0200|0x0001))
;
1384 if (error)
1385 return (error);
1386 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxm[id], 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rxm[id]), (0), (sc->sc_rxm[id]->dm_mapsize), (0x01))
1387 sc->sc_rxm[id]->dm_mapsize, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rxm[id]), (0), (sc->sc_rxm[id]->dm_mapsize), (0x01))
;
1388
1389#if RX_MODE1 == RX_MODE_11
1390 rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, 0, 0)(((uint64_t)(m[0]->m_hdr.mh_len) << 48) | ((uint64_t
)(0) << 32) | ((uint64_t)(0) << 16))
;
1391 rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr;
1392 rxd->rxd_control1 = RXD_CTL1_OWN(1ULL << 56);
1393#elif RX_MODE1 == RX_MODE_33
1394#elif RX_MODE1 == RX_MODE_55
1395 rxd->rxd_control3 = RXD_MKCTL3(0, m[3]->m_len, m[4]->m_len)(((uint64_t)(0) << 32) | ((uint64_t)(m[3]->m_hdr.mh_len
) << 16) | (uint64_t)(m[4]->m_hdr.mh_len))
;
1396 rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, m[1]->m_len, m[2]->m_len)(((uint64_t)(m[0]->m_hdr.mh_len) << 48) | ((uint64_t
)(m[1]->m_hdr.mh_len) << 32) | ((uint64_t)(m[2]->
m_hdr.mh_len) << 16))
;
1397 rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr;
1398 rxd->rxd_buf1 = (uint64_t)sc->sc_rxm[id]->dm_segs[1].ds_addr;
1399 rxd->rxd_buf2 = (uint64_t)sc->sc_rxm[id]->dm_segs[2].ds_addr;
1400 rxd->rxd_buf3 = (uint64_t)sc->sc_rxm[id]->dm_segs[3].ds_addr;
1401 rxd->rxd_buf4 = (uint64_t)sc->sc_rxm[id]->dm_segs[4].ds_addr;
1402 rxd->rxd_control1 = RXD_CTL1_OWN(1ULL << 56);
1403#endif
1404
1405 XGE_RXSYNC(id, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rxmap), ((id/127) * 4096 + sizeof(struct rxd1) * (id%127))
, (sizeof(struct rxd1)), (0x01|0x04))
;
1406 return (0);
1407}
1408
1409/*
1410 * This magic comes from the FreeBSD driver.
1411 */
1412int
1413xge_setup_xgxs_xena(struct xge_softc *sc)
1414{
1415 int i;
1416
1417 for (i = 0; i < nitems(xge_xena_dtx_cfg)(sizeof((xge_xena_dtx_cfg)) / sizeof((xge_xena_dtx_cfg)[0])); i++) {
1418 PIF_WCSR(DTX_CONTROL, xge_xena_dtx_cfg[i])pif_wcsr(sc, (0x0800+(0x1e8)), xge_xena_dtx_cfg[i]);
1419 DELAY(100)(*delay_func)(100);
1420 }
1421
1422 return (0);
1423}
1424
1425int
1426xge_setup_xgxs_herc(struct xge_softc *sc)
1427{
1428 int i;
1429
1430 for (i = 0; i < nitems(xge_herc_dtx_cfg)(sizeof((xge_herc_dtx_cfg)) / sizeof((xge_herc_dtx_cfg)[0])); i++) {
1431 PIF_WCSR(DTX_CONTROL, xge_herc_dtx_cfg[i])pif_wcsr(sc, (0x0800+(0x1e8)), xge_herc_dtx_cfg[i]);
1432 DELAY(100)(*delay_func)(100);
1433 }
1434
1435 return (0);
1436}