Bug Summary

File:dev/pci/if_cas.c
Warning:line 865, column 2
Value stored to 'rxs' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name if_cas.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/if_cas.c
1/* $OpenBSD: if_cas.c,v 1.53 2020/07/10 13:26:37 patrick Exp $ */
2
3/*
4 *
5 * Copyright (C) 2007 Mark Kettenis.
6 * Copyright (C) 2001 Eduardo Horvath.
7 * All rights reserved.
8 *
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 */
32
33/*
34 * Driver for Sun Cassini ethernet controllers.
35 *
36 * There are basically two variants of this chip: Cassini and
37 * Cassini+. We can distinguish between the two by revision: 0x10 and
38 * up are Cassini+. The most important difference is that Cassini+
39 * has a second RX descriptor ring. Cassini+ will not work without
40 * configuring that second ring. However, since we don't use it we
41 * don't actually fill the descriptors, and only hand off the first
42 * four to the chip.
43 */
44
45#include "bpfilter.h"
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/timeout.h>
50#include <sys/mbuf.h>
51#include <sys/syslog.h>
52#include <sys/malloc.h>
53#include <sys/kernel.h>
54#include <sys/socket.h>
55#include <sys/ioctl.h>
56#include <sys/errno.h>
57#include <sys/device.h>
58#include <sys/endian.h>
59#include <sys/atomic.h>
60
61#include <net/if.h>
62#include <net/if_media.h>
63
64#include <netinet/in.h>
65#include <netinet/if_ether.h>
66
67#if NBPFILTER1 > 0
68#include <net/bpf.h>
69#endif
70
71#include <machine/bus.h>
72#include <machine/intr.h>
73
74#include <dev/mii/mii.h>
75#include <dev/mii/miivar.h>
76
77#include <dev/pci/if_casreg.h>
78#include <dev/pci/if_casvar.h>
79
80#include <dev/pci/pcivar.h>
81#include <dev/pci/pcireg.h>
82#include <dev/pci/pcidevs.h>
83
84#ifdef __sparc64__
85#include <dev/ofw/openfirm.h>
86#endif
87
88#define TRIES10000 10000
89
90struct cfdriver cas_cd = {
91 NULL((void *)0), "cas", DV_IFNET
92};
93
94int cas_match(struct device *, void *, void *);
95void cas_attach(struct device *, struct device *, void *);
96int cas_pci_enaddr(struct cas_softc *, struct pci_attach_args *);
97
98struct cfattach cas_ca = {
99 sizeof(struct cas_softc), cas_match, cas_attach
100};
101
102void cas_config(struct cas_softc *);
103void cas_start(struct ifnet *);
104void cas_stop(struct ifnet *, int);
105int cas_ioctl(struct ifnet *, u_long, caddr_t);
106void cas_tick(void *);
107void cas_watchdog(struct ifnet *);
108int cas_init(struct ifnet *);
109void cas_init_regs(struct cas_softc *);
110int cas_ringsize(int);
111int cas_cringsize(int);
112int cas_meminit(struct cas_softc *);
113void cas_mifinit(struct cas_softc *);
114int cas_bitwait(struct cas_softc *, bus_space_handle_t, int,
115 u_int32_t, u_int32_t);
116void cas_reset(struct cas_softc *);
117int cas_reset_rx(struct cas_softc *);
118int cas_reset_tx(struct cas_softc *);
119int cas_disable_rx(struct cas_softc *);
120int cas_disable_tx(struct cas_softc *);
121void cas_rxdrain(struct cas_softc *);
122int cas_add_rxbuf(struct cas_softc *, int idx);
123void cas_iff(struct cas_softc *);
124int cas_encap(struct cas_softc *, struct mbuf *, int *);
125
126/* MII methods & callbacks */
127int cas_mii_readreg(struct device *, int, int);
128void cas_mii_writereg(struct device *, int, int, int);
129void cas_mii_statchg(struct device *);
130int cas_pcs_readreg(struct device *, int, int);
131void cas_pcs_writereg(struct device *, int, int, int);
132
133int cas_mediachange(struct ifnet *);
134void cas_mediastatus(struct ifnet *, struct ifmediareq *);
135
136int cas_eint(struct cas_softc *, u_int);
137int cas_rint(struct cas_softc *);
138int cas_tint(struct cas_softc *, u_int32_t);
139int cas_pint(struct cas_softc *);
140int cas_intr(void *);
141
142#ifdef CAS_DEBUG
143#define DPRINTF(sc, x) if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG0x4) \
144 printf x
145#else
146#define DPRINTF(sc, x) /* nothing */
147#endif
148
149const struct pci_matchid cas_pci_devices[] = {
150 { PCI_VENDOR_SUN0x108e, PCI_PRODUCT_SUN_CASSINI0xabba },
151 { PCI_VENDOR_NS0x100b, PCI_PRODUCT_NS_SATURN0x0035 }
152};
153
154int
155cas_match(struct device *parent, void *cf, void *aux)
156{
157 return (pci_matchbyid((struct pci_attach_args *)aux, cas_pci_devices,
158 nitems(cas_pci_devices)(sizeof((cas_pci_devices)) / sizeof((cas_pci_devices)[0]))));
159}
160
161#define PROMHDR_PTR_DATA0x18 0x18
162#define PROMDATA_PTR_VPD0x08 0x08
163#define PROMDATA_DATA20x0a 0x0a
164
165static const u_int8_t cas_promhdr[] = { 0x55, 0xaa };
166static const u_int8_t cas_promdat_sun[] = {
167 'P', 'C', 'I', 'R',
168 PCI_VENDOR_SUN0x108e & 0xff, PCI_VENDOR_SUN0x108e >> 8,
169 PCI_PRODUCT_SUN_CASSINI0xabba & 0xff, PCI_PRODUCT_SUN_CASSINI0xabba >> 8
170};
171static const u_int8_t cas_promdat_ns[] = {
172 'P', 'C', 'I', 'R',
173 PCI_VENDOR_NS0x100b & 0xff, PCI_VENDOR_NS0x100b >> 8,
174 PCI_PRODUCT_NS_SATURN0x0035 & 0xff, PCI_PRODUCT_NS_SATURN0x0035 >> 8
175};
176
177static const u_int8_t cas_promdat2[] = {
178 0x18, 0x00, /* structure length */
179 0x00, /* structure revision */
180 0x00, /* interface revision */
181 PCI_SUBCLASS_NETWORK_ETHERNET0x00, /* subclass code */
182 PCI_CLASS_NETWORK0x02 /* class code */
183};
184
185int
186cas_pci_enaddr(struct cas_softc *sc, struct pci_attach_args *pa)
187{
188 struct pci_vpd_largeres *res;
189 struct pci_vpd *vpd;
190 bus_space_handle_t romh;
191 bus_space_tag_t romt;
192 bus_size_t romsize = 0;
193 u_int8_t buf[32], *desc;
194 pcireg_t address;
195 int dataoff, vpdoff, len;
196 int rv = -1;
197
198 if (pci_mapreg_map(pa, PCI_ROM_REG0x30, PCI_MAPREG_TYPE_MEM0x00000000, 0,
199 &romt, &romh, 0, &romsize, 0))
200 return (-1);
201
202 address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG0x30);
203 address |= PCI_ROM_ENABLE0x00000001;
204 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG0x30, address);
205
206 bus_space_read_region_1(romt, romh, 0, buf, sizeof(buf))((romt)->read_region_1((romh), (0), (buf), (sizeof(buf))));
207 if (bcmp(buf, cas_promhdr, sizeof(cas_promhdr)))
208 goto fail;
209
210 dataoff = buf[PROMHDR_PTR_DATA0x18] | (buf[PROMHDR_PTR_DATA0x18 + 1] << 8);
211 if (dataoff < 0x1c)
212 goto fail;
213
214 bus_space_read_region_1(romt, romh, dataoff, buf, sizeof(buf))((romt)->read_region_1((romh), (dataoff), (buf), (sizeof(buf
))))
;
215 if ((bcmp(buf, cas_promdat_sun, sizeof(cas_promdat_sun)) &&
216 bcmp(buf, cas_promdat_ns, sizeof(cas_promdat_ns))) ||
217 bcmp(buf + PROMDATA_DATA20x0a, cas_promdat2, sizeof(cas_promdat2)))
218 goto fail;
219
220 vpdoff = buf[PROMDATA_PTR_VPD0x08] | (buf[PROMDATA_PTR_VPD0x08 + 1] << 8);
221 if (vpdoff < 0x1c)
222 goto fail;
223
224next:
225 bus_space_read_region_1(romt, romh, vpdoff, buf, sizeof(buf))((romt)->read_region_1((romh), (vpdoff), (buf), (sizeof(buf
))))
;
226 if (!PCI_VPDRES_ISLARGE(buf[0])((buf[0]) & 0x80))
227 goto fail;
228
229 res = (struct pci_vpd_largeres *)buf;
230 vpdoff += sizeof(*res);
231
232 len = ((res->vpdres_len_msb << 8) + res->vpdres_len_lsb);
233 switch(PCI_VPDRES_LARGE_NAME(res->vpdres_byte0)((res->vpdres_byte0) & 0x7f)) {
234 case PCI_VPDRES_TYPE_IDENTIFIER_STRING0x02:
235 /* Skip identifier string. */
236 vpdoff += len;
237 goto next;
238
239 case PCI_VPDRES_TYPE_VPD0x10:
240 while (len > 0) {
241 bus_space_read_region_1(romt, romh, vpdoff,((romt)->read_region_1((romh), (vpdoff), (buf), (sizeof(buf
))))
242 buf, sizeof(buf))((romt)->read_region_1((romh), (vpdoff), (buf), (sizeof(buf
))))
;
243
244 vpd = (struct pci_vpd *)buf;
245 vpdoff += sizeof(*vpd) + vpd->vpd_len;
246 len -= sizeof(*vpd) + vpd->vpd_len;
247
248 /*
249 * We're looking for an "Enhanced" VPD...
250 */
251 if (vpd->vpd_key0 != 'Z')
252 continue;
253
254 desc = buf + sizeof(*vpd);
255
256 /*
257 * ...which is an instance property...
258 */
259 if (desc[0] != 'I')
260 continue;
261 desc += 3;
262
263 /*
264 * ...that's a byte array with the proper
265 * length for a MAC address...
266 */
267 if (desc[0] != 'B' || desc[1] != ETHER_ADDR_LEN6)
268 continue;
269 desc += 2;
270
271 /*
272 * ...named "local-mac-address".
273 */
274 if (strcmp(desc, "local-mac-address") != 0)
275 continue;
276 desc += strlen("local-mac-address") + 1;
277
278 bcopy(desc, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN6);
279 sc->sc_arpcom.ac_enaddr[5] += pa->pa_device;
280 rv = 0;
281 }
282 break;
283
284 default:
285 goto fail;
286 }
287
288 fail:
289 if (romsize != 0)
290 bus_space_unmap(romt, romh, romsize);
291
292 address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG0x30);
293 address &= ~PCI_ROM_ENABLE0x00000001;
294 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG0x30, address);
295
296 return (rv);
297}
298
299void
300cas_attach(struct device *parent, struct device *self, void *aux)
301{
302 struct pci_attach_args *pa = aux;
303 struct cas_softc *sc = (void *)self;
304 pci_intr_handle_t ih;
305#ifdef __sparc64__
306 /* XXX the following declarations should be elsewhere */
307 extern void myetheraddr(u_char *);
308#endif
309 const char *intrstr = NULL((void *)0);
310 bus_size_t size;
311 int gotenaddr = 0;
312
313 sc->sc_rev = PCI_REVISION(pa->pa_class)(((pa->pa_class) >> 0) & 0xff);
314 sc->sc_dmatag = pa->pa_dmat;
315
316#define PCI_CAS_BASEADDR0x10 0x10
317 if (pci_mapreg_map(pa, PCI_CAS_BASEADDR0x10, PCI_MAPREG_TYPE_MEM0x00000000, 0,
318 &sc->sc_memt, &sc->sc_memh, NULL((void *)0), &size, 0) != 0) {
319 printf(": can't map registers\n");
320 return;
321 }
322
323 if (cas_pci_enaddr(sc, pa) == 0)
324 gotenaddr = 1;
325
326#ifdef __sparc64__
327 if (!gotenaddr) {
328 if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address",
329 sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN6) <= 0)
330 myetheraddr(sc->sc_arpcom.ac_enaddr);
331 gotenaddr = 1;
332 }
333#endif
334#ifdef __powerpc__
335 if (!gotenaddr) {
336 pci_ether_hw_addr(pa->pa_pc, sc->sc_arpcom.ac_enaddr);
337 gotenaddr = 1;
338 }
339#endif
340
341 sc->sc_burst = 16; /* XXX */
342
343 if (pci_intr_map(pa, &ih) != 0) {
344 printf(": couldn't map interrupt\n");
345 bus_space_unmap(sc->sc_memt, sc->sc_memh, size);
346 return;
347 }
348 intrstr = pci_intr_string(pa->pa_pc, ih);
349 sc->sc_ih = pci_intr_establish(pa->pa_pc,
350 ih, IPL_NET0x7 | IPL_MPSAFE0x100, cas_intr, sc, self->dv_xname);
351 if (sc->sc_ih == NULL((void *)0)) {
352 printf(": couldn't establish interrupt");
353 if (intrstr != NULL((void *)0))
354 printf(" at %s", intrstr);
355 printf("\n");
356 bus_space_unmap(sc->sc_memt, sc->sc_memh, size);
357 return;
358 }
359
360 printf(": %s", intrstr);
361
362 /*
363 * call the main configure
364 */
365 cas_config(sc);
366}
367
368/*
369 * cas_config:
370 *
371 * Attach a Cassini interface to the system.
372 */
373void
374cas_config(struct cas_softc *sc)
375{
376 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
377 struct mii_data *mii = &sc->sc_mii;
378 struct mii_softc *child;
379 int i, error;
380
381 /* Make sure the chip is stopped. */
382 ifp->if_softc = sc;
383 cas_reset(sc);
384
385 /*
386 * Allocate the control data structures, and create and load the
387 * DMA map for it.
388 */
389 if ((error = bus_dmamem_alloc(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), (
sizeof(struct cas_control_data)), (8192), (0), (&sc->sc_cdseg
), (1), (&sc->sc_cdnseg), (0x1000))
390 sizeof(struct cas_control_data), CAS_PAGE_SIZE, 0, &sc->sc_cdseg,(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), (
sizeof(struct cas_control_data)), (8192), (0), (&sc->sc_cdseg
), (1), (&sc->sc_cdnseg), (0x1000))
391 1, &sc->sc_cdnseg, BUS_DMA_ZERO)(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), (
sizeof(struct cas_control_data)), (8192), (0), (&sc->sc_cdseg
), (1), (&sc->sc_cdnseg), (0x1000))
) != 0) {
392 printf("\n%s: unable to allocate control data, error = %d\n",
393 sc->sc_dev.dv_xname, error);
394 goto fail_0;
395 }
396
397 /* XXX should map this in with correct endianness */
398 if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,(*(sc->sc_dmatag)->_dmamem_map)((sc->sc_dmatag), (&
sc->sc_cdseg), (sc->sc_cdnseg), (sizeof(struct cas_control_data
)), ((caddr_t *)&sc->sc_control_data), (0x0004))
399 sizeof(struct cas_control_data), (caddr_t *)&sc->sc_control_data,(*(sc->sc_dmatag)->_dmamem_map)((sc->sc_dmatag), (&
sc->sc_cdseg), (sc->sc_cdnseg), (sizeof(struct cas_control_data
)), ((caddr_t *)&sc->sc_control_data), (0x0004))
400 BUS_DMA_COHERENT)(*(sc->sc_dmatag)->_dmamem_map)((sc->sc_dmatag), (&
sc->sc_cdseg), (sc->sc_cdnseg), (sizeof(struct cas_control_data
)), ((caddr_t *)&sc->sc_control_data), (0x0004))
) != 0) {
401 printf("\n%s: unable to map control data, error = %d\n",
402 sc->sc_dev.dv_xname, error);
403 goto fail_1;
404 }
405
406 if ((error = bus_dmamap_create(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
sizeof(struct cas_control_data)), (1), (sizeof(struct cas_control_data
)), (0), (0), (&sc->sc_cddmamap))
407 sizeof(struct cas_control_data), 1,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
sizeof(struct cas_control_data)), (1), (sizeof(struct cas_control_data
)), (0), (0), (&sc->sc_cddmamap))
408 sizeof(struct cas_control_data), 0, 0, &sc->sc_cddmamap)(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
sizeof(struct cas_control_data)), (1), (sizeof(struct cas_control_data
)), (0), (0), (&sc->sc_cddmamap))
) != 0) {
409 printf("\n%s: unable to create control data DMA map, "
410 "error = %d\n", sc->sc_dev.dv_xname, error);
411 goto fail_2;
412 }
413
414 if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc
->sc_cddmamap), (sc->sc_control_data), (sizeof(struct cas_control_data
)), (((void *)0)), (0))
415 sc->sc_control_data, sizeof(struct cas_control_data), NULL,(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc
->sc_cddmamap), (sc->sc_control_data), (sizeof(struct cas_control_data
)), (((void *)0)), (0))
416 0)(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc
->sc_cddmamap), (sc->sc_control_data), (sizeof(struct cas_control_data
)), (((void *)0)), (0))
) != 0) {
417 printf("\n%s: unable to load control data DMA map, error = %d\n",
418 sc->sc_dev.dv_xname, error);
419 goto fail_3;
420 }
421
422 /*
423 * Create the receive buffer DMA maps.
424 */
425 for (i = 0; i < CAS_NRXDESC128; i++) {
426 bus_dma_segment_t seg;
427 caddr_t kva;
428 int rseg;
429
430 if ((error = bus_dmamem_alloc(sc->sc_dmatag, CAS_PAGE_SIZE,(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), (
8192), (8192), (0), (&seg), (1), (&rseg), (0x0001))
431 CAS_PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), (
8192), (8192), (0), (&seg), (1), (&rseg), (0x0001))
) != 0) {
432 printf("\n%s: unable to alloc rx DMA mem %d, "
433 "error = %d\n", sc->sc_dev.dv_xname, i, error);
434 goto fail_5;
435 }
436 sc->sc_rxsoft[i].rxs_dmaseg = seg;
437
438 if ((error = bus_dmamem_map(sc->sc_dmatag, &seg, rseg,(*(sc->sc_dmatag)->_dmamem_map)((sc->sc_dmatag), (&
seg), (rseg), (8192), (&kva), (0x0001))
439 CAS_PAGE_SIZE, &kva, BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamem_map)((sc->sc_dmatag), (&
seg), (rseg), (8192), (&kva), (0x0001))
) != 0) {
440 printf("\n%s: unable to alloc rx DMA mem %d, "
441 "error = %d\n", sc->sc_dev.dv_xname, i, error);
442 goto fail_5;
443 }
444 sc->sc_rxsoft[i].rxs_kva = kva;
445
446 if ((error = bus_dmamap_create(sc->sc_dmatag, CAS_PAGE_SIZE, 1,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
8192), (1), (8192), (0), (0), (&sc->sc_rxsoft[i].rxs_dmamap
))
447 CAS_PAGE_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
8192), (1), (8192), (0), (0), (&sc->sc_rxsoft[i].rxs_dmamap
))
) != 0) {
448 printf("\n%s: unable to create rx DMA map %d, "
449 "error = %d\n", sc->sc_dev.dv_xname, i, error);
450 goto fail_5;
451 }
452
453 if ((error = bus_dmamap_load(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc
->sc_rxsoft[i].rxs_dmamap), (kva), (8192), (((void *)0)), (
0x0001))
454 sc->sc_rxsoft[i].rxs_dmamap, kva, CAS_PAGE_SIZE, NULL,(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc
->sc_rxsoft[i].rxs_dmamap), (kva), (8192), (((void *)0)), (
0x0001))
455 BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc
->sc_rxsoft[i].rxs_dmamap), (kva), (8192), (((void *)0)), (
0x0001))
) != 0) {
456 printf("\n%s: unable to load rx DMA map %d, "
457 "error = %d\n", sc->sc_dev.dv_xname, i, error);
458 goto fail_5;
459 }
460 }
461
462 /*
463 * Create the transmit buffer DMA maps.
464 */
465 for (i = 0; i < CAS_NTXDESC(64 * 16); i++) {
466 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
(1 << 11)), (16), ((1 << 11)), (0), (0x0001), (&
sc->sc_txd[i].sd_map))
467 CAS_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
(1 << 11)), (16), ((1 << 11)), (0), (0x0001), (&
sc->sc_txd[i].sd_map))
468 &sc->sc_txd[i].sd_map)(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
(1 << 11)), (16), ((1 << 11)), (0), (0x0001), (&
sc->sc_txd[i].sd_map))
) != 0) {
469 printf("\n%s: unable to create tx DMA map %d, "
470 "error = %d\n", sc->sc_dev.dv_xname, i, error);
471 goto fail_6;
472 }
473 sc->sc_txd[i].sd_mbuf = NULL((void *)0);
474 }
475
476 /*
477 * From this point forward, the attachment cannot fail. A failure
478 * before this point releases all resources that may have been
479 * allocated.
480 */
481
482 /* Announce ourselves. */
483 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
484
485 /* Get RX FIFO size */
486 sc->sc_rxfifosize = 16 * 1024;
487
488 /* Initialize ifnet structure. */
489 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
490 ifp->if_softc = sc;
491 ifp->if_flags =
492 IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
493 ifp->if_start = cas_start;
494 ifp->if_ioctl = cas_ioctl;
495 ifp->if_watchdog = cas_watchdog;
496 ifq_set_maxlen(&ifp->if_snd, CAS_NTXDESC - 1)((&ifp->if_snd)->ifq_maxlen = ((64 * 16) - 1));
497
498 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010;
499
500 /* Initialize ifmedia structures and MII info */
501 mii->mii_ifp = ifp;
502 mii->mii_readreg = cas_mii_readreg;
503 mii->mii_writereg = cas_mii_writereg;
504 mii->mii_statchg = cas_mii_statchg;
505
506 ifmedia_init(&mii->mii_media, 0, cas_mediachange, cas_mediastatus);
507
508 bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_MII_DATAPATH_MODE, 0)((sc->sc_memt)->write_4((sc->sc_memh), (0x9050), (0)
))
;
509
510 cas_mifinit(sc);
511
512 if (sc->sc_mif_config & CAS_MIF_CONFIG_MDI10x00000200) {
513 sc->sc_mif_config |= CAS_MIF_CONFIG_PHY_SEL0x00000001;
514 bus_space_write_4(sc->sc_memt, sc->sc_memh,((sc->sc_memt)->write_4((sc->sc_memh), (0x6210), (sc
->sc_mif_config)))
515 CAS_MIF_CONFIG, sc->sc_mif_config)((sc->sc_memt)->write_4((sc->sc_memh), (0x6210), (sc
->sc_mif_config)))
;
516 }
517
518 mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY-1,
519 MII_OFFSET_ANY-1, 0);
520
521 child = LIST_FIRST(&mii->mii_phys)((&mii->mii_phys)->lh_first);
522 if (child == NULL((void *)0) &&
523 sc->sc_mif_config & (CAS_MIF_CONFIG_MDI00x00000100|CAS_MIF_CONFIG_MDI10x00000200)) {
524 /*
525 * Try the external PCS SERDES if we didn't find any
526 * MII devices.
527 */
528 bus_space_write_4(sc->sc_memt, sc->sc_memh,((sc->sc_memt)->write_4((sc->sc_memh), (0x9050), (0x00000002
)))
529 CAS_MII_DATAPATH_MODE, CAS_MII_DATAPATH_SERDES)((sc->sc_memt)->write_4((sc->sc_memh), (0x9050), (0x00000002
)))
;
530
531 bus_space_write_4(sc->sc_memt, sc->sc_memh,((sc->sc_memt)->write_4((sc->sc_memh), (0x9010), (0x00000001
)))
532 CAS_MII_CONFIG, CAS_MII_CONFIG_ENABLE)((sc->sc_memt)->write_4((sc->sc_memh), (0x9010), (0x00000001
)))
;
533
534 mii->mii_readreg = cas_pcs_readreg;
535 mii->mii_writereg = cas_pcs_writereg;
536
537 mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY-1,
538 MII_OFFSET_ANY-1, MIIF_NOISOLATE0x0002);
539 }
540
541 child = LIST_FIRST(&mii->mii_phys)((&mii->mii_phys)->lh_first);
542 if (child == NULL((void *)0)) {
543 /* No PHY attached */
544 ifmedia_add(&sc->sc_mediasc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_MANUAL1ULL, 0, NULL((void *)0));
545 ifmedia_set(&sc->sc_mediasc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_MANUAL1ULL);
546 } else {
547 /*
548 * Walk along the list of attached MII devices and
549 * establish an `MII instance' to `phy number'
550 * mapping. We'll use this mapping in media change
551 * requests to determine which phy to use to program
552 * the MIF configuration register.
553 */
554 for (; child != NULL((void *)0); child = LIST_NEXT(child, mii_list)((child)->mii_list.le_next)) {
555 /*
556 * Note: we support just two PHYs: the built-in
557 * internal device and an external on the MII
558 * connector.
559 */
560 if (child->mii_phy > 1 || child->mii_inst > 1) {
561 printf("%s: cannot accommodate MII device %s"
562 " at phy %d, instance %lld\n",
563 sc->sc_dev.dv_xname,
564 child->mii_dev.dv_xname,
565 child->mii_phy, child->mii_inst);
566 continue;
567 }
568
569 sc->sc_phys[child->mii_inst] = child->mii_phy;
570 }
571
572 /*
573 * XXX - we can really do the following ONLY if the
574 * phy indeed has the auto negotiation capability!!
575 */
576 ifmedia_set(&sc->sc_mediasc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL);
577 }
578
579 /* Attach the interface. */
580 if_attach(ifp);
581 ether_ifattach(ifp);
582
583 timeout_set(&sc->sc_tick_ch, cas_tick, sc);
584 return;
585
586 /*
587 * Free any resources we've allocated during the failed attach
588 * attempt. Do this in reverse order and fall through.
589 */
590 fail_6:
591 for (i = 0; i < CAS_NTXDESC(64 * 16); i++) {
592 if (sc->sc_txd[i].sd_map != NULL((void *)0))
593 bus_dmamap_destroy(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag),
(sc->sc_txd[i].sd_map))
594 sc->sc_txd[i].sd_map)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag),
(sc->sc_txd[i].sd_map))
;
595 }
596 fail_5:
597 for (i = 0; i < CAS_NRXDESC128; i++) {
598 if (sc->sc_rxsoft[i].rxs_dmamap != NULL((void *)0))
599 bus_dmamap_destroy(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag),
(sc->sc_rxsoft[i].rxs_dmamap))
600 sc->sc_rxsoft[i].rxs_dmamap)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag),
(sc->sc_rxsoft[i].rxs_dmamap))
;
601 }
602 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), (
sc->sc_cddmamap))
;
603 fail_3:
604 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag),
(sc->sc_cddmamap))
;
605 fail_2:
606 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,(*(sc->sc_dmatag)->_dmamem_unmap)((sc->sc_dmatag), (
(caddr_t)sc->sc_control_data), (sizeof(struct cas_control_data
)))
607 sizeof(struct cas_control_data))(*(sc->sc_dmatag)->_dmamem_unmap)((sc->sc_dmatag), (
(caddr_t)sc->sc_control_data), (sizeof(struct cas_control_data
)))
;
608 fail_1:
609 bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg)(*(sc->sc_dmatag)->_dmamem_free)((sc->sc_dmatag), (&
sc->sc_cdseg), (sc->sc_cdnseg))
;
610 fail_0:
611 return;
612}
613
614
615void
616cas_tick(void *arg)
617{
618 struct cas_softc *sc = arg;
619 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
620 bus_space_tag_t t = sc->sc_memt;
621 bus_space_handle_t mac = sc->sc_memh;
622 int s;
623 u_int32_t v;
624
625 /* unload collisions counters */
626 v = bus_space_read_4(t, mac, CAS_MAC_EXCESS_COLL_CNT)((t)->read_4((mac), (0x61a8))) +
627 bus_space_read_4(t, mac, CAS_MAC_LATE_COLL_CNT)((t)->read_4((mac), (0x61ac)));
628 ifp->if_collisionsif_data.ifi_collisions += v +
629 bus_space_read_4(t, mac, CAS_MAC_NORM_COLL_CNT)((t)->read_4((mac), (0x61a0))) +
630 bus_space_read_4(t, mac, CAS_MAC_FIRST_COLL_CNT)((t)->read_4((mac), (0x61a4)));
631 ifp->if_oerrorsif_data.ifi_oerrors += v;
632
633 /* read error counters */
634 ifp->if_ierrorsif_data.ifi_ierrors +=
635 bus_space_read_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT)((t)->read_4((mac), (0x61bc))) +
636 bus_space_read_4(t, mac, CAS_MAC_RX_ALIGN_ERR)((t)->read_4((mac), (0x61c0))) +
637 bus_space_read_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT)((t)->read_4((mac), (0x61c4))) +
638 bus_space_read_4(t, mac, CAS_MAC_RX_CODE_VIOL)((t)->read_4((mac), (0x61c8)));
639
640 /* clear the hardware counters */
641 bus_space_write_4(t, mac, CAS_MAC_NORM_COLL_CNT, 0)((t)->write_4((mac), (0x61a0), (0)));
642 bus_space_write_4(t, mac, CAS_MAC_FIRST_COLL_CNT, 0)((t)->write_4((mac), (0x61a4), (0)));
643 bus_space_write_4(t, mac, CAS_MAC_EXCESS_COLL_CNT, 0)((t)->write_4((mac), (0x61a8), (0)));
644 bus_space_write_4(t, mac, CAS_MAC_LATE_COLL_CNT, 0)((t)->write_4((mac), (0x61ac), (0)));
645 bus_space_write_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT, 0)((t)->write_4((mac), (0x61bc), (0)));
646 bus_space_write_4(t, mac, CAS_MAC_RX_ALIGN_ERR, 0)((t)->write_4((mac), (0x61c0), (0)));
647 bus_space_write_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT, 0)((t)->write_4((mac), (0x61c4), (0)));
648 bus_space_write_4(t, mac, CAS_MAC_RX_CODE_VIOL, 0)((t)->write_4((mac), (0x61c8), (0)));
649
650 s = splnet()splraise(0x7);
651 mii_tick(&sc->sc_mii);
652 splx(s)spllower(s);
653
654 timeout_add_sec(&sc->sc_tick_ch, 1);
655}
656
657int
658cas_bitwait(struct cas_softc *sc, bus_space_handle_t h, int r,
659 u_int32_t clr, u_int32_t set)
660{
661 int i;
662 u_int32_t reg;
663
664 for (i = TRIES10000; i--; DELAY(100)(*delay_func)(100)) {
665 reg = bus_space_read_4(sc->sc_memt, h, r)((sc->sc_memt)->read_4((h), (r)));
666 if ((reg & clr) == 0 && (reg & set) == set)
667 return (1);
668 }
669
670 return (0);
671}
672
673void
674cas_reset(struct cas_softc *sc)
675{
676 bus_space_tag_t t = sc->sc_memt;
677 bus_space_handle_t h = sc->sc_memh;
678 int s;
679
680 s = splnet()splraise(0x7);
681 DPRINTF(sc, ("%s: cas_reset\n", sc->sc_dev.dv_xname));
682 cas_reset_rx(sc);
683 cas_reset_tx(sc);
684
685 /* Do a full reset */
686 bus_space_write_4(t, h, CAS_RESET,((t)->write_4((h), (0x1010), (0x000000002 | 0x000000001 | 0x00000008
)))
687 CAS_RESET_RX | CAS_RESET_TX | CAS_RESET_BLOCK_PCS)((t)->write_4((h), (0x1010), (0x000000002 | 0x000000001 | 0x00000008
)))
;
688 if (!cas_bitwait(sc, h, CAS_RESET0x1010, CAS_RESET_RX0x000000002 | CAS_RESET_TX0x000000001, 0))
689 printf("%s: cannot reset device\n", sc->sc_dev.dv_xname);
690 splx(s)spllower(s);
691}
692
693
694/*
695 * cas_rxdrain:
696 *
697 * Drain the receive queue.
698 */
699void
700cas_rxdrain(struct cas_softc *sc)
701{
702 /* Nothing to do yet. */
703}
704
705/*
706 * Reset the whole thing.
707 */
708void
709cas_stop(struct ifnet *ifp, int disable)
710{
711 struct cas_softc *sc = (struct cas_softc *)ifp->if_softc;
712 struct cas_sxd *sd;
713 u_int32_t i;
714
715 DPRINTF(sc, ("%s: cas_stop\n", sc->sc_dev.dv_xname));
716
717 timeout_del(&sc->sc_tick_ch);
718
719 /*
720 * Mark the interface down and cancel the watchdog timer.
721 */
722 ifp->if_flags &= ~IFF_RUNNING0x40;
723 ifq_clr_oactive(&ifp->if_snd);
724 ifp->if_timer = 0;
725
726 mii_down(&sc->sc_mii);
727
728 cas_reset_rx(sc);
729 cas_reset_tx(sc);
730
731 intr_barrier(sc->sc_ih);
732 KASSERT((ifp->if_flags & IFF_RUNNING) == 0)(((ifp->if_flags & 0x40) == 0) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_cas.c", 732, "(ifp->if_flags & IFF_RUNNING) == 0"
))
;
733
734 /*
735 * Release any queued transmit buffers.
736 */
737 for (i = 0; i < CAS_NTXDESC(64 * 16); i++) {
738 sd = &sc->sc_txd[i];
739 if (sd->sd_mbuf != NULL((void *)0)) {
740 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sd
->sd_map), (0), (sd->sd_map->dm_mapsize), (0x08))
741 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sd
->sd_map), (0), (sd->sd_map->dm_mapsize), (0x08))
;
742 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), (
sd->sd_map))
;
743 m_freem(sd->sd_mbuf);
744 sd->sd_mbuf = NULL((void *)0);
745 }
746 }
747 sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0;
748
749 if (disable)
750 cas_rxdrain(sc);
751}
752
753
754/*
755 * Reset the receiver
756 */
757int
758cas_reset_rx(struct cas_softc *sc)
759{
760 bus_space_tag_t t = sc->sc_memt;
761 bus_space_handle_t h = sc->sc_memh;
762
763 /*
764 * Resetting while DMA is in progress can cause a bus hang, so we
765 * disable DMA first.
766 */
767 cas_disable_rx(sc);
768 bus_space_write_4(t, h, CAS_RX_CONFIG, 0)((t)->write_4((h), (0x4000), (0)));
769 /* Wait till it finishes */
770 if (!cas_bitwait(sc, h, CAS_RX_CONFIG0x4000, 1, 0))
771 printf("%s: cannot disable rx dma\n", sc->sc_dev.dv_xname);
772 /* Wait 5ms extra. */
773 delay(5000)(*delay_func)(5000);
774
775 /* Finally, reset the ERX */
776 bus_space_write_4(t, h, CAS_RESET, CAS_RESET_RX)((t)->write_4((h), (0x1010), (0x000000002)));
777 /* Wait till it finishes */
778 if (!cas_bitwait(sc, h, CAS_RESET0x1010, CAS_RESET_RX0x000000002, 0)) {
779 printf("%s: cannot reset receiver\n", sc->sc_dev.dv_xname);
780 return (1);
781 }
782 return (0);
783}
784
785
786/*
787 * Reset the transmitter
788 */
789int
790cas_reset_tx(struct cas_softc *sc)
791{
792 bus_space_tag_t t = sc->sc_memt;
793 bus_space_handle_t h = sc->sc_memh;
794
795 /*
796 * Resetting while DMA is in progress can cause a bus hang, so we
797 * disable DMA first.
798 */
799 cas_disable_tx(sc);
800 bus_space_write_4(t, h, CAS_TX_CONFIG, 0)((t)->write_4((h), (0x2004), (0)));
801 /* Wait till it finishes */
802 if (!cas_bitwait(sc, h, CAS_TX_CONFIG0x2004, 1, 0))
803 printf("%s: cannot disable tx dma\n", sc->sc_dev.dv_xname);
804 /* Wait 5ms extra. */
805 delay(5000)(*delay_func)(5000);
806
807 /* Finally, reset the ETX */
808 bus_space_write_4(t, h, CAS_RESET, CAS_RESET_TX)((t)->write_4((h), (0x1010), (0x000000001)));
809 /* Wait till it finishes */
810 if (!cas_bitwait(sc, h, CAS_RESET0x1010, CAS_RESET_TX0x000000001, 0)) {
811 printf("%s: cannot reset transmitter\n",
812 sc->sc_dev.dv_xname);
813 return (1);
814 }
815 return (0);
816}
817
818/*
819 * disable receiver.
820 */
821int
822cas_disable_rx(struct cas_softc *sc)
823{
824 bus_space_tag_t t = sc->sc_memt;
825 bus_space_handle_t h = sc->sc_memh;
826 u_int32_t cfg;
827
828 /* Flip the enable bit */
829 cfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG)((t)->read_4((h), (0x6034)));
830 cfg &= ~CAS_MAC_RX_ENABLE0x00000001;
831 bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, cfg)((t)->write_4((h), (0x6034), (cfg)));
832
833 /* Wait for it to finish */
834 return (cas_bitwait(sc, h, CAS_MAC_RX_CONFIG0x6034, CAS_MAC_RX_ENABLE0x00000001, 0));
835}
836
837/*
838 * disable transmitter.
839 */
840int
841cas_disable_tx(struct cas_softc *sc)
842{
843 bus_space_tag_t t = sc->sc_memt;
844 bus_space_handle_t h = sc->sc_memh;
845 u_int32_t cfg;
846
847 /* Flip the enable bit */
848 cfg = bus_space_read_4(t, h, CAS_MAC_TX_CONFIG)((t)->read_4((h), (0x6030)));
849 cfg &= ~CAS_MAC_TX_ENABLE0x00000001;
850 bus_space_write_4(t, h, CAS_MAC_TX_CONFIG, cfg)((t)->write_4((h), (0x6030), (cfg)));
851
852 /* Wait for it to finish */
853 return (cas_bitwait(sc, h, CAS_MAC_TX_CONFIG0x6030, CAS_MAC_TX_ENABLE0x00000001, 0));
854}
855
856/*
857 * Initialize interface.
858 */
859int
860cas_meminit(struct cas_softc *sc)
861{
862 struct cas_rxsoft *rxs;
863 int i, error;
864
865 rxs = (void *)&error;
Value stored to 'rxs' is never read
866
867 /*
868 * Initialize the transmit descriptor ring.
869 */
870 for (i = 0; i < CAS_NTXDESC(64 * 16); i++) {
871 sc->sc_txdescssc_control_data->ccd_txdescs[i].cd_flags = 0;
872 sc->sc_txdescssc_control_data->ccd_txdescs[i].cd_addr = 0;
873 }
874 CAS_CDTXSYNC(sc, 0, CAS_NTXDESC,do { int __x, __n; __x = (0); __n = ((64 * 16)); if ((__x + __n
) > (64 * 16)) { (*((sc)->sc_dmatag)->_dmamap_sync)(
((sc)->sc_dmatag), ((sc)->sc_cddmamap), (__builtin_offsetof
(struct cas_control_data, ccd_txdescs[(__x)])), (sizeof(struct
cas_desc) * ((64 * 16) - __x)), ((0x01|0x04))); __n -= ((64 *
16) - __x); __x = 0; } (*((sc)->sc_dmatag)->_dmamap_sync
)(((sc)->sc_dmatag), ((sc)->sc_cddmamap), (__builtin_offsetof
(struct cas_control_data, ccd_txdescs[(__x)])), (sizeof(struct
cas_desc) * __n), ((0x01|0x04))); } while (0)
875 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)do { int __x, __n; __x = (0); __n = ((64 * 16)); if ((__x + __n
) > (64 * 16)) { (*((sc)->sc_dmatag)->_dmamap_sync)(
((sc)->sc_dmatag), ((sc)->sc_cddmamap), (__builtin_offsetof
(struct cas_control_data, ccd_txdescs[(__x)])), (sizeof(struct
cas_desc) * ((64 * 16) - __x)), ((0x01|0x04))); __n -= ((64 *
16) - __x); __x = 0; } (*((sc)->sc_dmatag)->_dmamap_sync
)(((sc)->sc_dmatag), ((sc)->sc_cddmamap), (__builtin_offsetof
(struct cas_control_data, ccd_txdescs[(__x)])), (sizeof(struct
cas_desc) * __n), ((0x01|0x04))); } while (0)
;
876
877 /*
878 * Initialize the receive descriptor and receive job
879 * descriptor rings.
880 */
881 for (i = 0; i < CAS_NRXDESC128; i++)
882 CAS_INIT_RXDESC(sc, i, i)do { struct cas_rxsoft *__rxs = &sc->sc_rxsoft[(i)]; struct
cas_desc *__rxd = &sc->sc_control_data->ccd_rxdescs
[(i)]; __rxd->cd_addr = ((__uint64_t)(__rxs->rxs_dmamap
->dm_segs[0].ds_addr)); __rxd->cd_flags = ((__uint64_t)
((i))); (*(((sc))->sc_dmatag)->_dmamap_sync)((((sc))->
sc_dmatag), (((sc))->sc_cddmamap), (__builtin_offsetof(struct
cas_control_data, ccd_rxdescs[(((i)))])), (sizeof(struct cas_desc
)), ((0x01|0x04))); } while (0)
;
883 sc->sc_rxdptr = 0;
884 sc->sc_rxptr = 0;
885
886 /*
887 * Initialize the receive completion ring.
888 */
889 for (i = 0; i < CAS_NRXCOMP256; i++) {
890 sc->sc_rxcompssc_control_data->ccd_rxcomps[i].cc_word[0] = 0;
891 sc->sc_rxcompssc_control_data->ccd_rxcomps[i].cc_word[1] = 0;
892 sc->sc_rxcompssc_control_data->ccd_rxcomps[i].cc_word[2] = 0;
893 sc->sc_rxcompssc_control_data->ccd_rxcomps[i].cc_word[3] = CAS_DMA_WRITE(CAS_RC3_OWN)((__uint64_t)(0x0000080000000000ULL));
894 CAS_CDRXCSYNC(sc, i,(*((sc)->sc_dmatag)->_dmamap_sync)(((sc)->sc_dmatag)
, ((sc)->sc_cddmamap), (__builtin_offsetof(struct cas_control_data
, ccd_rxcomps[((i))])), (sizeof(struct cas_desc)), ((0x01|0x04
)))
895 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*((sc)->sc_dmatag)->_dmamap_sync)(((sc)->sc_dmatag)
, ((sc)->sc_cddmamap), (__builtin_offsetof(struct cas_control_data
, ccd_rxcomps[((i))])), (sizeof(struct cas_desc)), ((0x01|0x04
)))
;
896 }
897
898 return (0);
899}
900
901int
902cas_ringsize(int sz)
903{
904 switch (sz) {
905 case 32:
906 return CAS_RING_SZ_320;
907 case 64:
908 return CAS_RING_SZ_641;
909 case 128:
910 return CAS_RING_SZ_1282;
911 case 256:
912 return CAS_RING_SZ_2563;
913 case 512:
914 return CAS_RING_SZ_5124;
915 case 1024:
916 return CAS_RING_SZ_10245;
917 case 2048:
918 return CAS_RING_SZ_20486;
919 case 4096:
920 return CAS_RING_SZ_40967;
921 case 8192:
922 return CAS_RING_SZ_81928;
923 default:
924 printf("cas: invalid Receive Descriptor ring size %d\n", sz);
925 return CAS_RING_SZ_320;
926 }
927}
928
929int
930cas_cringsize(int sz)
931{
932 int i;
933
934 for (i = 0; i < 9; i++)
935 if (sz == (128 << i))
936 return i;
937
938 printf("cas: invalid completion ring size %d\n", sz);
939 return 128;
940}
941
942/*
943 * Initialization of interface; set up initialization block
944 * and transmit/receive descriptor rings.
945 */
946int
947cas_init(struct ifnet *ifp)
948{
949 struct cas_softc *sc = (struct cas_softc *)ifp->if_softc;
950 bus_space_tag_t t = sc->sc_memt;
951 bus_space_handle_t h = sc->sc_memh;
952 int s;
953 u_int max_frame_size;
954 u_int32_t v;
955
956 s = splnet()splraise(0x7);
957
958 DPRINTF(sc, ("%s: cas_init: calling stop\n", sc->sc_dev.dv_xname));
959 /*
960 * Initialization sequence. The numbered steps below correspond
961 * to the sequence outlined in section 6.3.5.1 in the Ethernet
962 * Channel Engine manual (part of the PCIO manual).
963 * See also the STP2002-STQ document from Sun Microsystems.
964 */
965
966 /* step 1 & 2. Reset the Ethernet Channel */
967 cas_stop(ifp, 0);
968 cas_reset(sc);
969 DPRINTF(sc, ("%s: cas_init: restarting\n", sc->sc_dev.dv_xname));
970
971 /* Re-initialize the MIF */
972 cas_mifinit(sc);
973
974 /* step 3. Setup data structures in host memory */
975 cas_meminit(sc);
976
977 /* step 4. TX MAC registers & counters */
978 cas_init_regs(sc);
979 max_frame_size = ETHER_MAX_LEN1518 + ETHER_VLAN_ENCAP_LEN4;
980 v = (max_frame_size) | (0x2000 << 16) /* Burst size */;
981 bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v)((t)->write_4((h), (0x6054), (v)));
982
983 /* step 5. RX MAC registers & counters */
984 cas_iff(sc);
985
986 /* step 6 & 7. Program Descriptor Ring Base Addresses */
987 KASSERT((CAS_CDTXADDR(sc, 0) & 0x1fff) == 0)(((((sc)->sc_cddmamap->dm_segs[0].ds_addr + __builtin_offsetof
(struct cas_control_data, ccd_txdescs[((0))])) & 0x1fff) ==
0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_cas.c"
, 987, "(CAS_CDTXADDR(sc, 0) & 0x1fff) == 0"))
;
988 bus_space_write_4(t, h, CAS_TX_RING_PTR_HI,((t)->write_4((h), (0x2074), ((((uint64_t)((sc)->sc_cddmamap
->dm_segs[0].ds_addr + __builtin_offsetof(struct cas_control_data
, ccd_txdescs[((0))]))) >> 32))))
989 (((uint64_t)CAS_CDTXADDR(sc,0)) >> 32))((t)->write_4((h), (0x2074), ((((uint64_t)((sc)->sc_cddmamap
->dm_segs[0].ds_addr + __builtin_offsetof(struct cas_control_data
, ccd_txdescs[((0))]))) >> 32))))
;
990 bus_space_write_4(t, h, CAS_TX_RING_PTR_LO, CAS_CDTXADDR(sc, 0))((t)->write_4((h), (0x2070), (((sc)->sc_cddmamap->dm_segs
[0].ds_addr + __builtin_offsetof(struct cas_control_data, ccd_txdescs
[((0))])))))
;
991
992 KASSERT((CAS_CDRXADDR(sc, 0) & 0x1fff) == 0)(((((sc)->sc_cddmamap->dm_segs[0].ds_addr + __builtin_offsetof
(struct cas_control_data, ccd_rxdescs[((0))])) & 0x1fff) ==
0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_cas.c"
, 992, "(CAS_CDRXADDR(sc, 0) & 0x1fff) == 0"))
;
993 bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI,((t)->write_4((h), (0x402c), ((((uint64_t)((sc)->sc_cddmamap
->dm_segs[0].ds_addr + __builtin_offsetof(struct cas_control_data
, ccd_rxdescs[((0))]))) >> 32))))
994 (((uint64_t)CAS_CDRXADDR(sc,0)) >> 32))((t)->write_4((h), (0x402c), ((((uint64_t)((sc)->sc_cddmamap
->dm_segs[0].ds_addr + __builtin_offsetof(struct cas_control_data
, ccd_rxdescs[((0))]))) >> 32))))
;
995 bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO, CAS_CDRXADDR(sc, 0))((t)->write_4((h), (0x4028), (((sc)->sc_cddmamap->dm_segs
[0].ds_addr + __builtin_offsetof(struct cas_control_data, ccd_rxdescs
[((0))])))))
;
996
997 KASSERT((CAS_CDRXCADDR(sc, 0) & 0x1fff) == 0)(((((sc)->sc_cddmamap->dm_segs[0].ds_addr + __builtin_offsetof
(struct cas_control_data, ccd_rxcomps[((0))])) & 0x1fff) ==
0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_cas.c"
, 997, "(CAS_CDRXCADDR(sc, 0) & 0x1fff) == 0"))
;
998 bus_space_write_4(t, h, CAS_RX_CRING_PTR_HI,((t)->write_4((h), (0x4034), ((((uint64_t)((sc)->sc_cddmamap
->dm_segs[0].ds_addr + __builtin_offsetof(struct cas_control_data
, ccd_rxcomps[((0))]))) >> 32))))
999 (((uint64_t)CAS_CDRXCADDR(sc,0)) >> 32))((t)->write_4((h), (0x4034), ((((uint64_t)((sc)->sc_cddmamap
->dm_segs[0].ds_addr + __builtin_offsetof(struct cas_control_data
, ccd_rxcomps[((0))]))) >> 32))))
;
1000 bus_space_write_4(t, h, CAS_RX_CRING_PTR_LO, CAS_CDRXCADDR(sc, 0))((t)->write_4((h), (0x4030), (((sc)->sc_cddmamap->dm_segs
[0].ds_addr + __builtin_offsetof(struct cas_control_data, ccd_rxcomps
[((0))])))))
;
1001
1002 if (CAS_PLUS(sc)(sc->sc_rev > 0x10)) {
1003 KASSERT((CAS_CDRXADDR2(sc, 0) & 0x1fff) == 0)(((((sc)->sc_cddmamap->dm_segs[0].ds_addr + __builtin_offsetof
(struct cas_control_data, ccd_rxdescs2[((0))])) & 0x1fff)
== 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_cas.c"
, 1003, "(CAS_CDRXADDR2(sc, 0) & 0x1fff) == 0"))
;
1004 bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI2,((t)->write_4((h), (0x4204), ((((uint64_t)((sc)->sc_cddmamap
->dm_segs[0].ds_addr + __builtin_offsetof(struct cas_control_data
, ccd_rxdescs2[((0))]))) >> 32))))
1005 (((uint64_t)CAS_CDRXADDR2(sc,0)) >> 32))((t)->write_4((h), (0x4204), ((((uint64_t)((sc)->sc_cddmamap
->dm_segs[0].ds_addr + __builtin_offsetof(struct cas_control_data
, ccd_rxdescs2[((0))]))) >> 32))))
;
1006 bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO2,((t)->write_4((h), (0x4200), (((sc)->sc_cddmamap->dm_segs
[0].ds_addr + __builtin_offsetof(struct cas_control_data, ccd_rxdescs2
[((0))])))))
1007 CAS_CDRXADDR2(sc, 0))((t)->write_4((h), (0x4200), (((sc)->sc_cddmamap->dm_segs
[0].ds_addr + __builtin_offsetof(struct cas_control_data, ccd_rxdescs2
[((0))])))))
;
1008 }
1009
1010 /* step 8. Global Configuration & Interrupt Mask */
1011 bus_space_write_4(t, h, CAS_INTMASK,((t)->write_4((h), (0x0010), (~(0x000000001|0x000000002| 0x000000008
| 0x000000010|0x000000020| 0x000000040| 0x000000080|0x000002000
| 0x000010000|0x000020000| 0x000040000))))
1012 ~(CAS_INTR_TX_INTME|CAS_INTR_TX_EMPTY|((t)->write_4((h), (0x0010), (~(0x000000001|0x000000002| 0x000000008
| 0x000000010|0x000000020| 0x000000040| 0x000000080|0x000002000
| 0x000010000|0x000020000| 0x000040000))))
1013 CAS_INTR_TX_TAG_ERR|((t)->write_4((h), (0x0010), (~(0x000000001|0x000000002| 0x000000008
| 0x000000010|0x000000020| 0x000000040| 0x000000080|0x000002000
| 0x000010000|0x000020000| 0x000040000))))
1014 CAS_INTR_RX_DONE|CAS_INTR_RX_NOBUF|((t)->write_4((h), (0x0010), (~(0x000000001|0x000000002| 0x000000008
| 0x000000010|0x000000020| 0x000000040| 0x000000080|0x000002000
| 0x000010000|0x000020000| 0x000040000))))
1015 CAS_INTR_RX_TAG_ERR|((t)->write_4((h), (0x0010), (~(0x000000001|0x000000002| 0x000000008
| 0x000000010|0x000000020| 0x000000040| 0x000000080|0x000002000
| 0x000010000|0x000020000| 0x000040000))))
1016 CAS_INTR_RX_COMP_FULL|CAS_INTR_PCS|((t)->write_4((h), (0x0010), (~(0x000000001|0x000000002| 0x000000008
| 0x000000010|0x000000020| 0x000000040| 0x000000080|0x000002000
| 0x000010000|0x000020000| 0x000040000))))
1017 CAS_INTR_MAC_CONTROL|CAS_INTR_MIF|((t)->write_4((h), (0x0010), (~(0x000000001|0x000000002| 0x000000008
| 0x000000010|0x000000020| 0x000000040| 0x000000080|0x000002000
| 0x000010000|0x000020000| 0x000040000))))
1018 CAS_INTR_BERR))((t)->write_4((h), (0x0010), (~(0x000000001|0x000000002| 0x000000008
| 0x000000010|0x000000020| 0x000000040| 0x000000080|0x000002000
| 0x000010000|0x000020000| 0x000040000))))
;
1019 bus_space_write_4(t, h, CAS_MAC_RX_MASK,((t)->write_4((h), (0x6024), (0x00000001|0x00000004)))
1020 CAS_MAC_RX_DONE|CAS_MAC_RX_FRAME_CNT)((t)->write_4((h), (0x6024), (0x00000001|0x00000004)));
1021 bus_space_write_4(t, h, CAS_MAC_TX_MASK, CAS_MAC_TX_XMIT_DONE)((t)->write_4((h), (0x6020), (0x00000001)));
1022 bus_space_write_4(t, h, CAS_MAC_CONTROL_MASK, 0)((t)->write_4((h), (0x6028), (0))); /* XXXX */
1023
1024 /* step 9. ETX Configuration: use mostly default values */
1025
1026 /* Enable DMA */
1027 v = cas_ringsize(CAS_NTXDESC(64 * 16) /*XXX*/) << 10;
1028 bus_space_write_4(t, h, CAS_TX_CONFIG,((t)->write_4((h), (0x2004), (v|0x00000001|(1<<24)|(
1<<29))))
1029 v|CAS_TX_CONFIG_TXDMA_EN|(1<<24)|(1<<29))((t)->write_4((h), (0x2004), (v|0x00000001|(1<<24)|(
1<<29))))
;
1030 bus_space_write_4(t, h, CAS_TX_KICK, 0)((t)->write_4((h), (0x2040), (0)));
1031
1032 /* step 10. ERX Configuration */
1033
1034 /* Encode Receive Descriptor ring size */
1035 v = cas_ringsize(CAS_NRXDESC128) << CAS_RX_CONFIG_RXDRNG_SZ_SHIFT1;
1036 if (CAS_PLUS(sc)(sc->sc_rev > 0x10))
1037 v |= cas_ringsize(32) << CAS_RX_CONFIG_RXDRNG2_SZ_SHIFT16;
1038
1039 /* Encode Receive Completion ring size */
1040 v |= cas_cringsize(CAS_NRXCOMP256) << CAS_RX_CONFIG_RXCRNG_SZ_SHIFT5;
1041
1042 /* Enable DMA */
1043 bus_space_write_4(t, h, CAS_RX_CONFIG,((t)->write_4((h), (0x4000), (v|(2<<10)|0x00000001))
)
1044 v|(2<<CAS_RX_CONFIG_FBOFF_SHFT)|CAS_RX_CONFIG_RXDMA_EN)((t)->write_4((h), (0x4000), (v|(2<<10)|0x00000001))
)
;
1045
1046 /*
1047 * The following value is for an OFF Threshold of about 3/4 full
1048 * and an ON Threshold of 1/4 full.
1049 */
1050 bus_space_write_4(t, h, CAS_RX_PAUSE_THRESH,((t)->write_4((h), (0x4020), ((3 * sc->sc_rxfifosize / 256
) | ((sc->sc_rxfifosize / 256) << 12))))
1051 (3 * sc->sc_rxfifosize / 256) |((t)->write_4((h), (0x4020), ((3 * sc->sc_rxfifosize / 256
) | ((sc->sc_rxfifosize / 256) << 12))))
1052 ((sc->sc_rxfifosize / 256) << 12))((t)->write_4((h), (0x4020), ((3 * sc->sc_rxfifosize / 256
) | ((sc->sc_rxfifosize / 256) << 12))))
;
1053 bus_space_write_4(t, h, CAS_RX_BLANKING, (6 << 12) | 6)((t)->write_4((h), (0x4044), ((6 << 12) | 6)));
1054
1055 /* step 11. Configure Media */
1056 mii_mediachg(&sc->sc_mii);
1057
1058 /* step 12. RX_MAC Configuration Register */
1059 v = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG)((t)->read_4((h), (0x6034)));
1060 v |= CAS_MAC_RX_ENABLE0x00000001 | CAS_MAC_RX_STRIP_CRC0x00000004;
1061 bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, v)((t)->write_4((h), (0x6034), (v)));
1062
1063 /* step 14. Issue Transmit Pending command */
1064
1065 /* step 15. Give the receiver a swift kick */
1066 bus_space_write_4(t, h, CAS_RX_KICK, CAS_NRXDESC-4)((t)->write_4((h), (0x4024), (128 -4)));
1067 if (CAS_PLUS(sc)(sc->sc_rev > 0x10))
1068 bus_space_write_4(t, h, CAS_RX_KICK2, 4)((t)->write_4((h), (0x4220), (4)));
1069
1070 /* Start the one second timer. */
1071 timeout_add_sec(&sc->sc_tick_ch, 1);
1072
1073 ifp->if_flags |= IFF_RUNNING0x40;
1074 ifq_clr_oactive(&ifp->if_snd);
1075 ifp->if_timer = 0;
1076 splx(s)spllower(s);
1077
1078 return (0);
1079}
1080
1081void
1082cas_init_regs(struct cas_softc *sc)
1083{
1084 bus_space_tag_t t = sc->sc_memt;
1085 bus_space_handle_t h = sc->sc_memh;
1086 u_int32_t v, r;
1087
1088 /* These regs are not cleared on reset */
1089 sc->sc_inited = 0;
1090 if (!sc->sc_inited) {
1091 /* Load recommended values */
1092 bus_space_write_4(t, h, CAS_MAC_IPG0, 0x00)((t)->write_4((h), (0x6040), (0x00)));
1093 bus_space_write_4(t, h, CAS_MAC_IPG1, 0x08)((t)->write_4((h), (0x6044), (0x08)));
1094 bus_space_write_4(t, h, CAS_MAC_IPG2, 0x04)((t)->write_4((h), (0x6048), (0x04)));
1095
1096 bus_space_write_4(t, h, CAS_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN)((t)->write_4((h), (0x6050), (64)));
1097 /* Max frame and max burst size */
1098 v = ETHER_MAX_LEN1518 | (0x2000 << 16) /* Burst size */;
1099 bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v)((t)->write_4((h), (0x6054), (v)));
1100
1101 bus_space_write_4(t, h, CAS_MAC_PREAMBLE_LEN, 0x07)((t)->write_4((h), (0x6058), (0x07)));
1102 bus_space_write_4(t, h, CAS_MAC_JAM_SIZE, 0x04)((t)->write_4((h), (0x605c), (0x04)));
1103 bus_space_write_4(t, h, CAS_MAC_ATTEMPT_LIMIT, 0x10)((t)->write_4((h), (0x6060), (0x10)));
1104 bus_space_write_4(t, h, CAS_MAC_CONTROL_TYPE, 0x8088)((t)->write_4((h), (0x6064), (0x8088)));
1105 bus_space_write_4(t, h, CAS_MAC_RANDOM_SEED,((t)->write_4((h), (0x61cc), (((sc->sc_arpcom.ac_enaddr
[5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff)))
1106 ((sc->sc_arpcom.ac_enaddr[5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff)((t)->write_4((h), (0x61cc), (((sc->sc_arpcom.ac_enaddr
[5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff)))
;
1107
1108 /* Secondary MAC addresses set to 0:0:0:0:0:0 */
1109 for (r = CAS_MAC_ADDR30x608c; r < CAS_MAC_ADDR420x6128; r += 4)
1110 bus_space_write_4(t, h, r, 0)((t)->write_4((h), (r), (0)));
1111
1112 /* MAC control addr set to 0:1:c2:0:1:80 */
1113 bus_space_write_4(t, h, CAS_MAC_ADDR42, 0x0001)((t)->write_4((h), (0x6128), (0x0001)));
1114 bus_space_write_4(t, h, CAS_MAC_ADDR43, 0xc200)((t)->write_4((h), (0x612c), (0xc200)));
1115 bus_space_write_4(t, h, CAS_MAC_ADDR44, 0x0180)((t)->write_4((h), (0x6130), (0x0180)));
1116
1117 /* MAC filter addr set to 0:0:0:0:0:0 */
1118 bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER0, 0)((t)->write_4((h), (0x614c), (0)));
1119 bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER1, 0)((t)->write_4((h), (0x6150), (0)));
1120 bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER2, 0)((t)->write_4((h), (0x6154), (0)));
1121
1122 bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK1_2, 0)((t)->write_4((h), (0x6158), (0)));
1123 bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK0, 0)((t)->write_4((h), (0x615c), (0)));
1124
1125 /* Hash table initialized to 0 */
1126 for (r = CAS_MAC_HASH00x6160; r <= CAS_MAC_HASH150x619c; r += 4)
1127 bus_space_write_4(t, h, r, 0)((t)->write_4((h), (r), (0)));
1128
1129 sc->sc_inited = 1;
1130 }
1131
1132 /* Counters need to be zeroed */
1133 bus_space_write_4(t, h, CAS_MAC_NORM_COLL_CNT, 0)((t)->write_4((h), (0x61a0), (0)));
1134 bus_space_write_4(t, h, CAS_MAC_FIRST_COLL_CNT, 0)((t)->write_4((h), (0x61a4), (0)));
1135 bus_space_write_4(t, h, CAS_MAC_EXCESS_COLL_CNT, 0)((t)->write_4((h), (0x61a8), (0)));
1136 bus_space_write_4(t, h, CAS_MAC_LATE_COLL_CNT, 0)((t)->write_4((h), (0x61ac), (0)));
1137 bus_space_write_4(t, h, CAS_MAC_DEFER_TMR_CNT, 0)((t)->write_4((h), (0x61b0), (0)));
1138 bus_space_write_4(t, h, CAS_MAC_PEAK_ATTEMPTS, 0)((t)->write_4((h), (0x61b4), (0)));
1139 bus_space_write_4(t, h, CAS_MAC_RX_FRAME_COUNT, 0)((t)->write_4((h), (0x61b8), (0)));
1140 bus_space_write_4(t, h, CAS_MAC_RX_LEN_ERR_CNT, 0)((t)->write_4((h), (0x61bc), (0)));
1141 bus_space_write_4(t, h, CAS_MAC_RX_ALIGN_ERR, 0)((t)->write_4((h), (0x61c0), (0)));
1142 bus_space_write_4(t, h, CAS_MAC_RX_CRC_ERR_CNT, 0)((t)->write_4((h), (0x61c4), (0)));
1143 bus_space_write_4(t, h, CAS_MAC_RX_CODE_VIOL, 0)((t)->write_4((h), (0x61c8), (0)));
1144
1145 /* Un-pause stuff */
1146 bus_space_write_4(t, h, CAS_MAC_SEND_PAUSE_CMD, 0)((t)->write_4((h), (0x6008), (0)));
1147
1148 /*
1149 * Set the station address.
1150 */
1151 bus_space_write_4(t, h, CAS_MAC_ADDR0,((t)->write_4((h), (0x6080), ((sc->sc_arpcom.ac_enaddr[
4]<<8) | sc->sc_arpcom.ac_enaddr[5])))
1152 (sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5])((t)->write_4((h), (0x6080), ((sc->sc_arpcom.ac_enaddr[
4]<<8) | sc->sc_arpcom.ac_enaddr[5])))
;
1153 bus_space_write_4(t, h, CAS_MAC_ADDR1,((t)->write_4((h), (0x6084), ((sc->sc_arpcom.ac_enaddr[
2]<<8) | sc->sc_arpcom.ac_enaddr[3])))
1154 (sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3])((t)->write_4((h), (0x6084), ((sc->sc_arpcom.ac_enaddr[
2]<<8) | sc->sc_arpcom.ac_enaddr[3])))
;
1155 bus_space_write_4(t, h, CAS_MAC_ADDR2,((t)->write_4((h), (0x6088), ((sc->sc_arpcom.ac_enaddr[
0]<<8) | sc->sc_arpcom.ac_enaddr[1])))
1156 (sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1])((t)->write_4((h), (0x6088), ((sc->sc_arpcom.ac_enaddr[
0]<<8) | sc->sc_arpcom.ac_enaddr[1])))
;
1157}
1158
1159/*
1160 * Receive interrupt.
1161 */
1162int
1163cas_rint(struct cas_softc *sc)
1164{
1165 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1166 bus_space_tag_t t = sc->sc_memt;
1167 bus_space_handle_t h = sc->sc_memh;
1168 struct cas_rxsoft *rxs;
1169 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
1170 struct mbuf *m;
1171 u_int64_t word[4];
1172 int len, off, idx;
1173 int i, skip;
1174 caddr_t cp;
1175
1176 for (i = sc->sc_rxptr;; i = CAS_NEXTRX(i + skip)((i + skip + 1) & (256 - 1))) {
1177 CAS_CDRXCSYNC(sc, i,(*((sc)->sc_dmatag)->_dmamap_sync)(((sc)->sc_dmatag)
, ((sc)->sc_cddmamap), (__builtin_offsetof(struct cas_control_data
, ccd_rxcomps[((i))])), (sizeof(struct cas_desc)), ((0x02|0x08
)))
1178 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*((sc)->sc_dmatag)->_dmamap_sync)(((sc)->sc_dmatag)
, ((sc)->sc_cddmamap), (__builtin_offsetof(struct cas_control_data
, ccd_rxcomps[((i))])), (sizeof(struct cas_desc)), ((0x02|0x08
)))
;
1179
1180 word[0] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[0])((__uint64_t)(sc->sc_control_data->ccd_rxcomps[i].cc_word
[0]))
;
1181 word[1] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[1])((__uint64_t)(sc->sc_control_data->ccd_rxcomps[i].cc_word
[1]))
;
1182 word[2] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[2])((__uint64_t)(sc->sc_control_data->ccd_rxcomps[i].cc_word
[2]))
;
1183 word[3] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[3])((__uint64_t)(sc->sc_control_data->ccd_rxcomps[i].cc_word
[3]))
;
1184
1185 /* Stop if the hardware still owns the descriptor. */
1186 if ((word[0] & CAS_RC0_TYPE0xc000000000000000ULL) == 0 || word[3] & CAS_RC3_OWN0x0000080000000000ULL)
1187 break;
1188
1189 len = CAS_RC1_HDR_LEN(word[1])(((word[1]) & 0x00000ff800000000ULL) >> 35);
1190 if (len > 0) {
1191 off = CAS_RC1_HDR_OFF(word[1])(((word[1]) & 0x0003f00000000000ULL) >> 44);
1192 idx = CAS_RC1_HDR_IDX(word[1])(((word[1]) & 0xfffc000000000000ULL) >> 50);
1193 rxs = &sc->sc_rxsoft[idx];
1194
1195 DPRINTF(sc, ("hdr at idx %d, off %d, len %d\n",
1196 idx, off, len));
1197
1198 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (rxs
->rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (
0x02))
1199 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (rxs
->rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (
0x02))
;
1200
1201 cp = rxs->rxs_kva + off * 256 + ETHER_ALIGN2;
1202 m = m_devget(cp, len, ETHER_ALIGN2);
1203
1204 if (word[0] & CAS_RC0_RELEASE_HDR0x2000000000000000ULL)
1205 cas_add_rxbuf(sc, idx);
1206
1207 if (m != NULL((void *)0)) {
1208 ml_enqueue(&ml, m);
1209 } else
1210 ifp->if_ierrorsif_data.ifi_ierrors++;
1211 }
1212
1213 len = CAS_RC0_DATA_LEN(word[0])(((word[0]) & 0x0000000007ffe000ULL) >> 13);
1214 if (len > 0) {
1215 off = CAS_RC0_DATA_OFF(word[0])(((word[0]) & 0x000001fff8000000ULL) >> 27);
1216 idx = CAS_RC0_DATA_IDX(word[0])(((word[0]) & 0x007ffe0000000000ULL) >> 41);
1217 rxs = &sc->sc_rxsoft[idx];
1218
1219 DPRINTF(sc, ("data at idx %d, off %d, len %d\n",
1220 idx, off, len));
1221
1222 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (rxs
->rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (
0x02))
1223 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (rxs
->rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (
0x02))
;
1224
1225 /* XXX We should not be copying the packet here. */
1226 cp = rxs->rxs_kva + off + ETHER_ALIGN2;
1227 m = m_devget(cp, len, ETHER_ALIGN2);
1228
1229 if (word[0] & CAS_RC0_RELEASE_DATA0x1000000000000000ULL)
1230 cas_add_rxbuf(sc, idx);
1231
1232 if (m != NULL((void *)0)) {
1233 ml_enqueue(&ml, m);
1234 } else
1235 ifp->if_ierrorsif_data.ifi_ierrors++;
1236 }
1237
1238 if (word[0] & CAS_RC0_SPLIT0x0400000000000000ULL)
1239 printf("split packet\n");
1240
1241 skip = CAS_RC0_SKIP(word[0])(((word[0]) & 0x0180000000000000ULL) >> 55);
1242 }
1243
1244 while (sc->sc_rxptr != i) {
1245 sc->sc_rxcompssc_control_data->ccd_rxcomps[sc->sc_rxptr].cc_word[0] = 0;
1246 sc->sc_rxcompssc_control_data->ccd_rxcomps[sc->sc_rxptr].cc_word[1] = 0;
1247 sc->sc_rxcompssc_control_data->ccd_rxcomps[sc->sc_rxptr].cc_word[2] = 0;
1248 sc->sc_rxcompssc_control_data->ccd_rxcomps[sc->sc_rxptr].cc_word[3] =
1249 CAS_DMA_WRITE(CAS_RC3_OWN)((__uint64_t)(0x0000080000000000ULL));
1250 CAS_CDRXCSYNC(sc, sc->sc_rxptr,(*((sc)->sc_dmatag)->_dmamap_sync)(((sc)->sc_dmatag)
, ((sc)->sc_cddmamap), (__builtin_offsetof(struct cas_control_data
, ccd_rxcomps[((sc->sc_rxptr))])), (sizeof(struct cas_desc
)), ((0x01|0x04)))
1251 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*((sc)->sc_dmatag)->_dmamap_sync)(((sc)->sc_dmatag)
, ((sc)->sc_cddmamap), (__builtin_offsetof(struct cas_control_data
, ccd_rxcomps[((sc->sc_rxptr))])), (sizeof(struct cas_desc
)), ((0x01|0x04)))
;
1252
1253 sc->sc_rxptr = CAS_NEXTRX(sc->sc_rxptr)((sc->sc_rxptr + 1) & (256 - 1));
1254 }
1255
1256 bus_space_write_4(t, h, CAS_RX_COMP_TAIL, sc->sc_rxptr)((t)->write_4((h), (0x4040), (sc->sc_rxptr)));
1257
1258 DPRINTF(sc, ("cas_rint: done sc->rxptr %d, complete %d\n",
1259 sc->sc_rxptr, bus_space_read_4(t, h, CAS_RX_COMPLETION)));
1260
1261 if_input(ifp, &ml);
1262
1263 return (1);
1264}
1265
1266/*
1267 * cas_add_rxbuf:
1268 *
1269 * Add a receive buffer to the indicated descriptor.
1270 */
1271int
1272cas_add_rxbuf(struct cas_softc *sc, int idx)
1273{
1274 bus_space_tag_t t = sc->sc_memt;
1275 bus_space_handle_t h = sc->sc_memh;
1276
1277 CAS_INIT_RXDESC(sc, sc->sc_rxdptr, idx)do { struct cas_rxsoft *__rxs = &sc->sc_rxsoft[(idx)];
struct cas_desc *__rxd = &sc->sc_control_data->ccd_rxdescs
[(sc->sc_rxdptr)]; __rxd->cd_addr = ((__uint64_t)(__rxs
->rxs_dmamap->dm_segs[0].ds_addr)); __rxd->cd_flags =
((__uint64_t)((idx))); (*(((sc))->sc_dmatag)->_dmamap_sync
)((((sc))->sc_dmatag), (((sc))->sc_cddmamap), (__builtin_offsetof
(struct cas_control_data, ccd_rxdescs[(((sc->sc_rxdptr)))]
)), (sizeof(struct cas_desc)), ((0x01|0x04))); } while (0)
;
1278
1279 if ((sc->sc_rxdptr % 4) == 0)
1280 bus_space_write_4(t, h, CAS_RX_KICK, sc->sc_rxdptr)((t)->write_4((h), (0x4024), (sc->sc_rxdptr)));
1281
1282 if (++sc->sc_rxdptr == CAS_NRXDESC128)
1283 sc->sc_rxdptr = 0;
1284
1285 return (0);
1286}
1287
1288int
1289cas_eint(struct cas_softc *sc, u_int status)
1290{
1291 if ((status & CAS_INTR_MIF0x000020000) != 0) {
1292#ifdef CAS_DEBUG
1293 printf("%s: link status changed\n", sc->sc_dev.dv_xname);
1294#endif
1295 return (1);
1296 }
1297
1298 printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, CAS_INTR_BITS"\020" "\1INTME\2TXEMPTY\3TXDONE\4TX_TAG_ERR" "\5RXDONE\6RXNOBUF\7RX_TAG_ERR"
"\10RX_COMP_FULL" "\16PCS\17TXMAC\20RXMAC" "\21MACCONTROL\22MIF\23BERR"
);
1299 return (1);
1300}
1301
1302int
1303cas_pint(struct cas_softc *sc)
1304{
1305 bus_space_tag_t t = sc->sc_memt;
1306 bus_space_handle_t seb = sc->sc_memh;
1307 u_int32_t status;
1308
1309 status = bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS)((t)->read_4((seb), (0x9018)));
1310 status |= bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS)((t)->read_4((seb), (0x9018)));
1311#ifdef CAS_DEBUG
1312 if (status)
1313 printf("%s: link status changed\n", sc->sc_dev.dv_xname);
1314#endif
1315 return (1);
1316}
1317
1318int
1319cas_intr(void *v)
1320{
1321 struct cas_softc *sc = (struct cas_softc *)v;
1322 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1323 bus_space_tag_t t = sc->sc_memt;
1324 bus_space_handle_t seb = sc->sc_memh;
1325 u_int32_t status;
1326 int r = 0;
1327
1328 status = bus_space_read_4(t, seb, CAS_STATUS)((t)->read_4((seb), (0x000c)));
1329 DPRINTF(sc, ("%s: cas_intr: cplt %xstatus %b\n",
1330 sc->sc_dev.dv_xname, (status>>19), status, CAS_INTR_BITS));
1331
1332 if ((status & CAS_INTR_PCS0x000002000) != 0)
1333 r |= cas_pint(sc);
1334
1335 if ((status & (CAS_INTR_TX_TAG_ERR0x000000008 | CAS_INTR_RX_TAG_ERR0x000000040 |
1336 CAS_INTR_RX_COMP_FULL0x000000080 | CAS_INTR_BERR0x000040000)) != 0)
1337 r |= cas_eint(sc, status);
1338
1339 if ((status & (CAS_INTR_TX_EMPTY0x000000002 | CAS_INTR_TX_INTME0x000000001)) != 0)
1340 r |= cas_tint(sc, status);
1341
1342 if ((status & (CAS_INTR_RX_DONE0x000000010 | CAS_INTR_RX_NOBUF0x000000020)) != 0)
1343 r |= cas_rint(sc);
1344
1345 /* We should eventually do more than just print out error stats. */
1346 if (status & CAS_INTR_TX_MAC0x000004000) {
1347 int txstat = bus_space_read_4(t, seb, CAS_MAC_TX_STATUS)((t)->read_4((seb), (0x6010)));
1348#ifdef CAS_DEBUG
1349 if (txstat & ~CAS_MAC_TX_XMIT_DONE0x00000001)
1350 printf("%s: MAC tx fault, status %x\n",
1351 sc->sc_dev.dv_xname, txstat);
1352#endif
1353 if (txstat & (CAS_MAC_TX_UNDERRUN0x00000002 | CAS_MAC_TX_PKT_TOO_LONG0x00000004)) {
1354 KERNEL_LOCK()_kernel_lock();
1355 cas_init(ifp);
1356 KERNEL_UNLOCK()_kernel_unlock();
1357 }
1358 }
1359 if (status & CAS_INTR_RX_MAC0x000008000) {
1360 int rxstat = bus_space_read_4(t, seb, CAS_MAC_RX_STATUS)((t)->read_4((seb), (0x6014)));
1361#ifdef CAS_DEBUG
1362 if (rxstat & ~CAS_MAC_RX_DONE0x00000001)
1363 printf("%s: MAC rx fault, status %x\n",
1364 sc->sc_dev.dv_xname, rxstat);
1365#endif
1366 /*
1367 * On some chip revisions CAS_MAC_RX_OVERFLOW happen often
1368 * due to a silicon bug so handle them silently.
1369 */
1370 if (rxstat & CAS_MAC_RX_OVERFLOW0x00000002) {
1371 KERNEL_LOCK()_kernel_lock();
1372 ifp->if_ierrorsif_data.ifi_ierrors++;
1373 cas_init(ifp);
1374 KERNEL_UNLOCK()_kernel_unlock();
1375 }
1376#ifdef CAS_DEBUG
1377 else if (rxstat & ~(CAS_MAC_RX_DONE0x00000001 | CAS_MAC_RX_FRAME_CNT0x00000004))
1378 printf("%s: MAC rx fault, status %x\n",
1379 sc->sc_dev.dv_xname, rxstat);
1380#endif
1381 }
1382 return (r);
1383}
1384
1385
1386void
1387cas_watchdog(struct ifnet *ifp)
1388{
1389 struct cas_softc *sc = ifp->if_softc;
1390
1391 DPRINTF(sc, ("cas_watchdog: CAS_RX_CONFIG %x CAS_MAC_RX_STATUS %x "
1392 "CAS_MAC_RX_CONFIG %x\n",
1393 bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_RX_CONFIG),
1394 bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_STATUS),
1395 bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_CONFIG)));
1396
1397 log(LOG_ERR3, "%s: device timeout\n", sc->sc_dev.dv_xname);
1398 ++ifp->if_oerrorsif_data.ifi_oerrors;
1399
1400 /* Try to get more packets going. */
1401 cas_init(ifp);
1402}
1403
1404/*
1405 * Initialize the MII Management Interface
1406 */
1407void
1408cas_mifinit(struct cas_softc *sc)
1409{
1410 bus_space_tag_t t = sc->sc_memt;
1411 bus_space_handle_t mif = sc->sc_memh;
1412
1413 /* Configure the MIF in frame mode */
1414 sc->sc_mif_config = bus_space_read_4(t, mif, CAS_MIF_CONFIG)((t)->read_4((mif), (0x6210)));
1415 sc->sc_mif_config &= ~CAS_MIF_CONFIG_BB_ENA0x00000004;
1416 bus_space_write_4(t, mif, CAS_MIF_CONFIG, sc->sc_mif_config)((t)->write_4((mif), (0x6210), (sc->sc_mif_config)));
1417}
1418
1419/*
1420 * MII interface
1421 *
1422 * The Cassini MII interface supports at least three different operating modes:
1423 *
1424 * Bitbang mode is implemented using data, clock and output enable registers.
1425 *
1426 * Frame mode is implemented by loading a complete frame into the frame
1427 * register and polling the valid bit for completion.
1428 *
1429 * Polling mode uses the frame register but completion is indicated by
1430 * an interrupt.
1431 *
1432 */
1433int
1434cas_mii_readreg(struct device *self, int phy, int reg)
1435{
1436 struct cas_softc *sc = (void *)self;
1437 bus_space_tag_t t = sc->sc_memt;
1438 bus_space_handle_t mif = sc->sc_memh;
1439 int n;
1440 u_int32_t v;
1441
1442#ifdef CAS_DEBUG
1443 if (sc->sc_debug)
1444 printf("cas_mii_readreg: phy %d reg %d\n", phy, reg);
1445#endif
1446
1447 /* Construct the frame command */
1448 v = (reg << CAS_MIF_REG_SHIFT18) | (phy << CAS_MIF_PHY_SHIFT23) |
1449 CAS_MIF_FRAME_READ0x60020000;
1450
1451 bus_space_write_4(t, mif, CAS_MIF_FRAME, v)((t)->write_4((mif), (0x620c), (v)));
1452 for (n = 0; n < 100; n++) {
1453 DELAY(1)(*delay_func)(1);
1454 v = bus_space_read_4(t, mif, CAS_MIF_FRAME)((t)->read_4((mif), (0x620c)));
1455 if (v & CAS_MIF_FRAME_TA00x00010000)
1456 return (v & CAS_MIF_FRAME_DATA0x0000ffff);
1457 }
1458
1459 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1460 return (0);
1461}
1462
1463void
1464cas_mii_writereg(struct device *self, int phy, int reg, int val)
1465{
1466 struct cas_softc *sc = (void *)self;
1467 bus_space_tag_t t = sc->sc_memt;
1468 bus_space_handle_t mif = sc->sc_memh;
1469 int n;
1470 u_int32_t v;
1471
1472#ifdef CAS_DEBUG
1473 if (sc->sc_debug)
1474 printf("cas_mii_writereg: phy %d reg %d val %x\n",
1475 phy, reg, val);
1476#endif
1477
1478 /* Construct the frame command */
1479 v = CAS_MIF_FRAME_WRITE0x50020000 |
1480 (phy << CAS_MIF_PHY_SHIFT23) |
1481 (reg << CAS_MIF_REG_SHIFT18) |
1482 (val & CAS_MIF_FRAME_DATA0x0000ffff);
1483
1484 bus_space_write_4(t, mif, CAS_MIF_FRAME, v)((t)->write_4((mif), (0x620c), (v)));
1485 for (n = 0; n < 100; n++) {
1486 DELAY(1)(*delay_func)(1);
1487 v = bus_space_read_4(t, mif, CAS_MIF_FRAME)((t)->read_4((mif), (0x620c)));
1488 if (v & CAS_MIF_FRAME_TA00x00010000)
1489 return;
1490 }
1491
1492 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1493}
1494
1495void
1496cas_mii_statchg(struct device *dev)
1497{
1498 struct cas_softc *sc = (void *)dev;
1499#ifdef CAS_DEBUG
1500 uint64_t instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media)(((sc->sc_mii.mii_media.ifm_cur->ifm_media) & 0xff00000000000000ULL
) >> 56)
;
1501#endif
1502 bus_space_tag_t t = sc->sc_memt;
1503 bus_space_handle_t mac = sc->sc_memh;
1504 u_int32_t v;
1505
1506#ifdef CAS_DEBUG
1507 if (sc->sc_debug)
1508 printf("cas_mii_statchg: status change: phy = %d\n",
1509 sc->sc_phys[instance]);
1510#endif
1511
1512 /* Set tx full duplex options */
1513 bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, 0)((t)->write_4((mac), (0x6030), (0)));
1514 delay(10000)(*delay_func)(10000); /* reg must be cleared and delay before changing. */
1515 v = CAS_MAC_TX_ENA_IPG00x00000008|CAS_MAC_TX_NGU0x00000010|CAS_MAC_TX_NGU_LIMIT0x00000020|
1516 CAS_MAC_TX_ENABLE0x00000001;
1517 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active)((sc->sc_mii.mii_media_active) & (0x00000000ffff0000ULL
|0x00ffff0000000000ULL))
& IFM_FDX0x0000010000000000ULL) != 0) {
1518 v |= CAS_MAC_TX_IGN_CARRIER0x00000002|CAS_MAC_TX_IGN_COLLIS0x00000004;
1519 }
1520 bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, v)((t)->write_4((mac), (0x6030), (v)));
1521
1522 /* XIF Configuration */
1523 v = CAS_MAC_XIF_TX_MII_ENA0x00000001;
1524 v |= CAS_MAC_XIF_LINK_LED0x00000020;
1525
1526 /* MII needs echo disable if half duplex. */
1527 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active)((sc->sc_mii.mii_media_active) & (0x00000000ffff0000ULL
|0x00ffff0000000000ULL))
& IFM_FDX0x0000010000000000ULL) != 0)
1528 /* turn on full duplex LED */
1529 v |= CAS_MAC_XIF_FDPLX_LED0x00000040;
1530 else
1531 /* half duplex -- disable echo */
1532 v |= CAS_MAC_XIF_ECHO_DISABL0x00000004;
1533
1534 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)((sc->sc_mii.mii_media_active) & 0x00000000000000ffULL
)
) {
1535 case IFM_1000_T16: /* Gigabit using GMII interface */
1536 case IFM_1000_SX11:
1537 v |= CAS_MAC_XIF_GMII_MODE0x00000008;
1538 break;
1539 default:
1540 v &= ~CAS_MAC_XIF_GMII_MODE0x00000008;
1541 }
1542 bus_space_write_4(t, mac, CAS_MAC_XIF_CONFIG, v)((t)->write_4((mac), (0x603c), (v)));
1543}
1544
1545int
1546cas_pcs_readreg(struct device *self, int phy, int reg)
1547{
1548 struct cas_softc *sc = (void *)self;
1549 bus_space_tag_t t = sc->sc_memt;
1550 bus_space_handle_t pcs = sc->sc_memh;
1551
1552#ifdef CAS_DEBUG
1553 if (sc->sc_debug)
1554 printf("cas_pcs_readreg: phy %d reg %d\n", phy, reg);
1555#endif
1556
1557 if (phy != CAS_PHYAD_EXTERNAL0)
1558 return (0);
1559
1560 switch (reg) {
1561 case MII_BMCR0x00:
1562 reg = CAS_MII_CONTROL0x9000;
1563 break;
1564 case MII_BMSR0x01:
1565 reg = CAS_MII_STATUS0x9004;
1566 break;
1567 case MII_ANAR0x04:
1568 reg = CAS_MII_ANAR0x9008;
1569 break;
1570 case MII_ANLPAR0x05:
1571 reg = CAS_MII_ANLPAR0x900c;
1572 break;
1573 case MII_EXTSR0x0f:
1574 return (EXTSR_1000XFDX0x8000|EXTSR_1000XHDX0x4000);
1575 default:
1576 return (0);
1577 }
1578
1579 return bus_space_read_4(t, pcs, reg)((t)->read_4((pcs), (reg)));
1580}
1581
1582void
1583cas_pcs_writereg(struct device *self, int phy, int reg, int val)
1584{
1585 struct cas_softc *sc = (void *)self;
1586 bus_space_tag_t t = sc->sc_memt;
1587 bus_space_handle_t pcs = sc->sc_memh;
1588 int reset = 0;
1589
1590#ifdef CAS_DEBUG
1591 if (sc->sc_debug)
1592 printf("cas_pcs_writereg: phy %d reg %d val %x\n",
1593 phy, reg, val);
1594#endif
1595
1596 if (phy != CAS_PHYAD_EXTERNAL0)
1597 return;
1598
1599 if (reg == MII_ANAR0x04)
1600 bus_space_write_4(t, pcs, CAS_MII_CONFIG, 0)((t)->write_4((pcs), (0x9010), (0)));
1601
1602 switch (reg) {
1603 case MII_BMCR0x00:
1604 reset = (val & CAS_MII_CONTROL_RESET0x00008000);
1605 reg = CAS_MII_CONTROL0x9000;
1606 break;
1607 case MII_BMSR0x01:
1608 reg = CAS_MII_STATUS0x9004;
1609 break;
1610 case MII_ANAR0x04:
1611 reg = CAS_MII_ANAR0x9008;
1612 break;
1613 case MII_ANLPAR0x05:
1614 reg = CAS_MII_ANLPAR0x900c;
1615 break;
1616 default:
1617 return;
1618 }
1619
1620 bus_space_write_4(t, pcs, reg, val)((t)->write_4((pcs), (reg), (val)));
1621
1622 if (reset)
1623 cas_bitwait(sc, pcs, CAS_MII_CONTROL0x9000, CAS_MII_CONTROL_RESET0x00008000, 0);
1624
1625 if (reg == CAS_MII_ANAR0x9008 || reset)
1626 bus_space_write_4(t, pcs, CAS_MII_CONFIG,((t)->write_4((pcs), (0x9010), (0x00000001)))
1627 CAS_MII_CONFIG_ENABLE)((t)->write_4((pcs), (0x9010), (0x00000001)));
1628}
1629
1630int
1631cas_mediachange(struct ifnet *ifp)
1632{
1633 struct cas_softc *sc = ifp->if_softc;
1634 struct mii_data *mii = &sc->sc_mii;
1635
1636 if (mii->mii_instance) {
1637 struct mii_softc *miisc;
1638 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)for((miisc) = ((&mii->mii_phys)->lh_first); (miisc)
!= ((void *)0); (miisc) = ((miisc)->mii_list.le_next))
1639 mii_phy_reset(miisc);
1640 }
1641
1642 return (mii_mediachg(&sc->sc_mii));
1643}
1644
1645void
1646cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1647{
1648 struct cas_softc *sc = ifp->if_softc;
1649
1650 mii_pollstat(&sc->sc_mii);
1651 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1652 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1653}
1654
1655/*
1656 * Process an ioctl request.
1657 */
1658int
1659cas_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1660{
1661 struct cas_softc *sc = ifp->if_softc;
1662 struct ifreq *ifr = (struct ifreq *)data;
1663 int s, error = 0;
1664
1665 s = splnet()splraise(0x7);
1666
1667 switch (cmd) {
1668 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
1669 ifp->if_flags |= IFF_UP0x1;
1670 if ((ifp->if_flags & IFF_RUNNING0x40) == 0)
1671 cas_init(ifp);
1672 break;
1673
1674 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
1675 if (ifp->if_flags & IFF_UP0x1) {
1676 if (ifp->if_flags & IFF_RUNNING0x40)
1677 error = ENETRESET52;
1678 else
1679 cas_init(ifp);
1680 } else {
1681 if (ifp->if_flags & IFF_RUNNING0x40)
1682 cas_stop(ifp, 1);
1683 }
1684#ifdef CAS_DEBUG
1685 sc->sc_debug = (ifp->if_flags & IFF_DEBUG0x4) != 0 ? 1 : 0;
1686#endif
1687 break;
1688
1689 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
1690 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
1691 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mediasc_mii.mii_media, cmd);
1692 break;
1693
1694 default:
1695 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1696 }
1697
1698 if (error == ENETRESET52) {
1699 if (ifp->if_flags & IFF_RUNNING0x40)
1700 cas_iff(sc);
1701 error = 0;
1702 }
1703
1704 splx(s)spllower(s);
1705 return (error);
1706}
1707
1708void
1709cas_iff(struct cas_softc *sc)
1710{
1711 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1712 struct arpcom *ac = &sc->sc_arpcom;
1713 struct ether_multi *enm;
1714 struct ether_multistep step;
1715 bus_space_tag_t t = sc->sc_memt;
1716 bus_space_handle_t h = sc->sc_memh;
1717 u_int32_t crc, hash[16], rxcfg;
1718 int i;
1719
1720 rxcfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG)((t)->read_4((h), (0x6034)));
1721 rxcfg &= ~(CAS_MAC_RX_HASH_FILTER0x00000020 | CAS_MAC_RX_PROMISCUOUS0x00000008 |
1722 CAS_MAC_RX_PROMISC_GRP0x00000010);
1723 ifp->if_flags &= ~IFF_ALLMULTI0x200;
1724
1725 if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0) {
1726 ifp->if_flags |= IFF_ALLMULTI0x200;
1727 if (ifp->if_flags & IFF_PROMISC0x100)
1728 rxcfg |= CAS_MAC_RX_PROMISCUOUS0x00000008;
1729 else
1730 rxcfg |= CAS_MAC_RX_PROMISC_GRP0x00000010;
1731 } else {
1732 /*
1733 * Set up multicast address filter by passing all multicast
1734 * addresses through a crc generator, and then using the
1735 * high order 8 bits as an index into the 256 bit logical
1736 * address filter. The high order 4 bits selects the word,
1737 * while the other 4 bits select the bit within the word
1738 * (where bit 0 is the MSB).
1739 */
1740
1741 rxcfg |= CAS_MAC_RX_HASH_FILTER0x00000020;
1742
1743 /* Clear hash table */
1744 for (i = 0; i < 16; i++)
1745 hash[i] = 0;
1746
1747 ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first
); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step
)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while
( 0)
;
1748 while (enm != NULL((void *)0)) {
1749 crc = ether_crc32_le(enm->enm_addrlo,
1750 ETHER_ADDR_LEN6);
1751
1752 /* Just want the 8 most significant bits. */
1753 crc >>= 24;
1754
1755 /* Set the corresponding bit in the filter. */
1756 hash[crc >> 4] |= 1 << (15 - (crc & 15));
1757
1758 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
1759 }
1760
1761 /* Now load the hash table into the chip (if we are using it) */
1762 for (i = 0; i < 16; i++) {
1763 bus_space_write_4(t, h,((t)->write_4((h), (0x6160 + i * (0x6164 - 0x6160)), (hash
[i])))
1764 CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0),((t)->write_4((h), (0x6160 + i * (0x6164 - 0x6160)), (hash
[i])))
1765 hash[i])((t)->write_4((h), (0x6160 + i * (0x6164 - 0x6160)), (hash
[i])))
;
1766 }
1767 }
1768
1769 bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, rxcfg)((t)->write_4((h), (0x6034), (rxcfg)));
1770}
1771
1772int
1773cas_encap(struct cas_softc *sc, struct mbuf *m, int *used)
1774{
1775 u_int64_t flags;
1776 u_int32_t first, cur, frag, i;
1777 bus_dmamap_t map;
1778
1779 cur = frag = (sc->sc_tx_prod + *used) % CAS_NTXDESC(64 * 16);
1780 map = sc->sc_txd[cur].sd_map;
1781
1782 switch (bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag
), (map), (m), (0x0001))
) {
1783 case 0:
1784 break;
1785 case EFBIG27:
1786 if (m_defrag(m, M_DONTWAIT0x0002) == 0 &&
1787 bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag
), (map), (m), (0x0001))
1788 BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag
), (map), (m), (0x0001))
== 0)
1789 break;
1790 /* FALLTHROUGH */
1791 default:
1792 return (ENOBUFS55);
1793 }
1794
1795 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (map
), (0), (map->dm_mapsize), (0x04))
1796 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (map
), (0), (map->dm_mapsize), (0x04))
;
1797
1798 first = cur;
1799 for (i = 0; i < map->dm_nsegs; i++) {
1800 sc->sc_txdescssc_control_data->ccd_txdescs[frag].cd_addr =
1801 CAS_DMA_WRITE(map->dm_segs[i].ds_addr)((__uint64_t)(map->dm_segs[i].ds_addr));
1802 flags = (map->dm_segs[i].ds_len & CAS_TD_BUFSIZE0x0000000000007fffLL) |
1803 (i == 0 ? CAS_TD_START_OF_PACKET0x0000000080000000LL : 0) |
1804 ((i == (map->dm_nsegs - 1)) ? CAS_TD_END_OF_PACKET0x0000000040000000LL : 0);
1805 sc->sc_txdescssc_control_data->ccd_txdescs[frag].cd_flags = CAS_DMA_WRITE(flags)((__uint64_t)(flags));
1806 bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc
->sc_cddmamap), (__builtin_offsetof(struct cas_control_data
, ccd_txdescs[(frag)])), (sizeof(struct cas_desc)), (0x04))
1807 CAS_CDTXOFF(frag), sizeof(struct cas_desc),(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc
->sc_cddmamap), (__builtin_offsetof(struct cas_control_data
, ccd_txdescs[(frag)])), (sizeof(struct cas_desc)), (0x04))
1808 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc
->sc_cddmamap), (__builtin_offsetof(struct cas_control_data
, ccd_txdescs[(frag)])), (sizeof(struct cas_desc)), (0x04))
;
1809 cur = frag;
1810 if (++frag == CAS_NTXDESC(64 * 16))
1811 frag = 0;
1812 }
1813
1814 sc->sc_txd[first].sd_map = sc->sc_txd[cur].sd_map;
1815 sc->sc_txd[cur].sd_map = map;
1816 sc->sc_txd[cur].sd_mbuf = m;
1817
1818 bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_TX_KICK, frag)((sc->sc_memt)->write_4((sc->sc_memh), (0x2040), (frag
)))
;
1819
1820 *used += map->dm_nsegs;
1821
1822 /* sync descriptors */
1823
1824 return (0);
1825}
1826
1827/*
1828 * Transmit interrupt.
1829 */
1830int
1831cas_tint(struct cas_softc *sc, u_int32_t status)
1832{
1833 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1834 struct cas_sxd *sd;
1835 u_int32_t cons, comp;
1836 int freed, used;
1837
1838 comp = bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_TX_COMPLETION)((sc->sc_memt)->read_4((sc->sc_memh), (0x2050)));
1839 cons = sc->sc_tx_cons;
1840 freed = 0;
1841 while (cons != comp) {
1842 sd = &sc->sc_txd[cons];
1843 if (sd->sd_mbuf != NULL((void *)0)) {
1844 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sd
->sd_map), (0), (sd->sd_map->dm_mapsize), (0x08))
1845 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sd
->sd_map), (0), (sd->sd_map->dm_mapsize), (0x08))
;
1846 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), (
sd->sd_map))
;
1847 m_freem(sd->sd_mbuf);
1848 sd->sd_mbuf = NULL((void *)0);
1849 }
1850 freed++;
1851 if (++cons == CAS_NTXDESC(64 * 16))
1852 cons = 0;
1853 }
1854 sc->sc_tx_cons = cons;
1855
1856 used = atomic_sub_int_nv(&sc->sc_tx_cnt, freed)_atomic_sub_int_nv(&sc->sc_tx_cnt, freed);
1857 if (used < CAS_NTXDESC(64 * 16) - 2)
1858 ifq_clr_oactive(&ifp->if_snd);
1859 if (used == 0)
1860 ifp->if_timer = 0;
1861
1862 if (!ifq_empty(&ifp->if_snd)(((&ifp->if_snd)->ifq_len) == 0)) {
1863 KERNEL_LOCK()_kernel_lock();
1864 cas_start(ifp);
1865 KERNEL_UNLOCK()_kernel_unlock();
1866 }
1867
1868 return (1);
1869}
1870
1871void
1872cas_start(struct ifnet *ifp)
1873{
1874 struct cas_softc *sc = ifp->if_softc;
1875 struct mbuf *m = NULL((void *)0);
1876 int used;
1877
1878 if (!(ifp->if_flags & IFF_RUNNING0x40) || ifq_is_oactive(&ifp->if_snd))
1879 return;
1880
1881 used = 0;
1882 while (1) {
1883 if ((sc->sc_tx_cnt + used + CAS_NTXSEGS16) >= (CAS_NTXDESC(64 * 16) - 2)) {
1884 ifq_set_oactive(&ifp->if_snd);
1885 break;
1886 }
1887
1888 m = ifq_dequeue(&ifp->if_snd);
1889 if (m == NULL((void *)0))
1890 break;
1891
1892 if (cas_encap(sc, m, &used)) {
1893 m_freem(m);
1894 continue;
1895 }
1896
1897#if NBPFILTER1 > 0
1898 if (ifp->if_bpf)
1899 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
1900#endif
1901 }
1902
1903 if (used != 0) {
1904 ifp->if_timer = 5;
1905 sc->sc_tx_prod = (sc->sc_tx_prod + used) % CAS_NTXDESC(64 * 16);
1906 atomic_add_int(&sc->sc_tx_cnt, used)_atomic_add_int(&sc->sc_tx_cnt, used);
1907 }
1908}