Bug Summary

File:dev/ic/hme.c
Warning:line 464, column 2
Value stored to 'p' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name hme.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/ic/hme.c
1/* $OpenBSD: hme.c,v 1.83 2020/12/12 11:48:52 jan Exp $ */
2/* $NetBSD: hme.c,v 1.21 2001/07/07 15:59:37 thorpej Exp $ */
3
4/*-
5 * Copyright (c) 1999 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Paul Kranenburg.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * HME Ethernet module driver.
35 */
36
37#include "bpfilter.h"
38
39#undef HMEDEBUG
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/kernel.h>
44#include <sys/mbuf.h>
45#include <sys/syslog.h>
46#include <sys/socket.h>
47#include <sys/device.h>
48#include <sys/malloc.h>
49#include <sys/ioctl.h>
50#include <sys/errno.h>
51
52#include <net/if.h>
53#include <net/if_media.h>
54
55#include <netinet/in.h>
56#include <netinet/if_ether.h>
57
58#if NBPFILTER1 > 0
59#include <net/bpf.h>
60#endif
61
62#include <dev/mii/mii.h>
63#include <dev/mii/miivar.h>
64
65#include <machine/bus.h>
66
67#include <dev/ic/hmereg.h>
68#include <dev/ic/hmevar.h>
69
70struct cfdriver hme_cd = {
71 NULL((void *)0), "hme", DV_IFNET
72};
73
74#define HME_RX_OFFSET2 2
75
76void hme_start(struct ifnet *);
77void hme_stop(struct hme_softc *, int);
78int hme_ioctl(struct ifnet *, u_long, caddr_t);
79void hme_tick(void *);
80void hme_watchdog(struct ifnet *);
81void hme_init(struct hme_softc *);
82void hme_meminit(struct hme_softc *);
83void hme_mifinit(struct hme_softc *);
84void hme_reset(struct hme_softc *);
85void hme_iff(struct hme_softc *);
86void hme_fill_rx_ring(struct hme_softc *);
87int hme_newbuf(struct hme_softc *, struct hme_sxd *);
88
89/* MII methods & callbacks */
90static int hme_mii_readreg(struct device *, int, int);
91static void hme_mii_writereg(struct device *, int, int, int);
92static void hme_mii_statchg(struct device *);
93
94int hme_mediachange(struct ifnet *);
95void hme_mediastatus(struct ifnet *, struct ifmediareq *);
96
97int hme_eint(struct hme_softc *, u_int);
98int hme_rint(struct hme_softc *);
99int hme_tint(struct hme_softc *);
100
101void
102hme_config(struct hme_softc *sc)
103{
104 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
105 struct mii_data *mii = &sc->sc_mii;
106 struct mii_softc *child;
107 bus_dma_tag_t dmatag = sc->sc_dmatag;
108 bus_dma_segment_t seg;
109 bus_size_t size;
110 int rseg, error, i;
111
112 /*
113 * HME common initialization.
114 *
115 * hme_softc fields that must be initialized by the front-end:
116 *
117 * the bus tag:
118 * sc_bustag
119 *
120 * the dma bus tag:
121 * sc_dmatag
122 *
123 * the bus handles:
124 * sc_seb (Shared Ethernet Block registers)
125 * sc_erx (Receiver Unit registers)
126 * sc_etx (Transmitter Unit registers)
127 * sc_mac (MAC registers)
128 * sc_mif (Management Interface registers)
129 *
130 * the maximum bus burst size:
131 * sc_burst
132 *
133 * the local Ethernet address:
134 * sc_arpcom.ac_enaddr
135 *
136 */
137
138 /* Make sure the chip is stopped. */
139 hme_stop(sc, 0);
140
141 for (i = 0; i < HME_TX_RING_SIZE64; i++) {
142 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, HME_TX_NSEGS,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
(1 << 11)), (16), ((1 << 11)), (0), (0x0001 | 0x0002
), (&sc->sc_txd[i].sd_map))
143 MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
(1 << 11)), (16), ((1 << 11)), (0), (0x0001 | 0x0002
), (&sc->sc_txd[i].sd_map))
144 &sc->sc_txd[i].sd_map)(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
(1 << 11)), (16), ((1 << 11)), (0), (0x0001 | 0x0002
), (&sc->sc_txd[i].sd_map))
!= 0) {
145 sc->sc_txd[i].sd_map = NULL((void *)0);
146 goto fail;
147 }
148 }
149 for (i = 0; i < HME_RX_RING_SIZE64; i++) {
150 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
(1 << 11)), (1), ((1 << 11)), (0), (0x0001 | 0x0002
), (&sc->sc_rxd[i].sd_map))
151 MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
(1 << 11)), (1), ((1 << 11)), (0), (0x0001 | 0x0002
), (&sc->sc_rxd[i].sd_map))
152 &sc->sc_rxd[i].sd_map)(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
(1 << 11)), (1), ((1 << 11)), (0), (0x0001 | 0x0002
), (&sc->sc_rxd[i].sd_map))
!= 0) {
153 sc->sc_rxd[i].sd_map = NULL((void *)0);
154 goto fail;
155 }
156 }
157 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, MCLBYTES, 0,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
(1 << 11)), (1), ((1 << 11)), (0), (0x0001 | 0x0002
), (&sc->sc_rxmap_spare))
158 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_rxmap_spare)(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
(1 << 11)), (1), ((1 << 11)), (0), (0x0001 | 0x0002
), (&sc->sc_rxmap_spare))
!= 0) {
159 sc->sc_rxmap_spare = NULL((void *)0);
160 goto fail;
161 }
162
163 /*
164 * Allocate DMA capable memory
165 * Buffer descriptors must be aligned on a 2048 byte boundary;
166 * take this into account when calculating the size. Note that
167 * the maximum number of descriptors (256) occupies 2048 bytes,
168 * so we allocate that much regardless of the number of descriptors.
169 */
170 size = (HME_XD_SIZE8 * HME_RX_RING_MAX256) + /* RX descriptors */
171 (HME_XD_SIZE8 * HME_TX_RING_MAX256); /* TX descriptors */
172
173 /* Allocate DMA buffer */
174 if ((error = bus_dmamem_alloc(dmatag, size, 2048, 0, &seg, 1, &rseg,(*(dmatag)->_dmamem_alloc)((dmatag), (size), (2048), (0), (
&seg), (1), (&rseg), (0x0001))
175 BUS_DMA_NOWAIT)(*(dmatag)->_dmamem_alloc)((dmatag), (size), (2048), (0), (
&seg), (1), (&rseg), (0x0001))
) != 0) {
176 printf("\n%s: DMA buffer alloc error %d\n",
177 sc->sc_dev.dv_xname, error);
178 return;
179 }
180
181 /* Map DMA memory in CPU addressable space */
182 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,(*(dmatag)->_dmamem_map)((dmatag), (&seg), (rseg), (size
), (&sc->sc_rb.rb_membase), (0x0001|0x0004))
183 &sc->sc_rb.rb_membase, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)(*(dmatag)->_dmamem_map)((dmatag), (&seg), (rseg), (size
), (&sc->sc_rb.rb_membase), (0x0001|0x0004))
) != 0) {
184 printf("\n%s: DMA buffer map error %d\n",
185 sc->sc_dev.dv_xname, error);
186 bus_dmamap_unload(dmatag, sc->sc_dmamap)(*(dmatag)->_dmamap_unload)((dmatag), (sc->sc_dmamap));
187 bus_dmamem_free(dmatag, &seg, rseg)(*(dmatag)->_dmamem_free)((dmatag), (&seg), (rseg));
188 return;
189 }
190
191 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,(*(dmatag)->_dmamap_create)((dmatag), (size), (1), (size),
(0), (0x0001), (&sc->sc_dmamap))
192 BUS_DMA_NOWAIT, &sc->sc_dmamap)(*(dmatag)->_dmamap_create)((dmatag), (size), (1), (size),
(0), (0x0001), (&sc->sc_dmamap))
) != 0) {
193 printf("\n%s: DMA map create error %d\n",
194 sc->sc_dev.dv_xname, error);
195 return;
196 }
197
198 /* Load the buffer */
199 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,(*(dmatag)->_dmamap_load)((dmatag), (sc->sc_dmamap), (sc
->sc_rb.rb_membase), (size), (((void *)0)), (0x0001|0x0004
))
200 sc->sc_rb.rb_membase, size, NULL,(*(dmatag)->_dmamap_load)((dmatag), (sc->sc_dmamap), (sc
->sc_rb.rb_membase), (size), (((void *)0)), (0x0001|0x0004
))
201 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)(*(dmatag)->_dmamap_load)((dmatag), (sc->sc_dmamap), (sc
->sc_rb.rb_membase), (size), (((void *)0)), (0x0001|0x0004
))
) != 0) {
202 printf("\n%s: DMA buffer map load error %d\n",
203 sc->sc_dev.dv_xname, error);
204 bus_dmamem_free(dmatag, &seg, rseg)(*(dmatag)->_dmamem_free)((dmatag), (&seg), (rseg));
205 return;
206 }
207 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
208
209 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
210
211 /* Initialize ifnet structure. */
212 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
213 ifp->if_softc = sc;
214 ifp->if_start = hme_start;
215 ifp->if_ioctl = hme_ioctl;
216 ifp->if_watchdog = hme_watchdog;
217 ifp->if_flags =
218 IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
219 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010;
220
221 /* Initialize ifmedia structures and MII info */
222 mii->mii_ifp = ifp;
223 mii->mii_readreg = hme_mii_readreg;
224 mii->mii_writereg = hme_mii_writereg;
225 mii->mii_statchg = hme_mii_statchg;
226
227 ifmedia_init(&mii->mii_media, IFM_IMASK0xff00000000000000ULL,
228 hme_mediachange, hme_mediastatus);
229
230 hme_mifinit(sc);
231
232 if (sc->sc_tcvr == -1)
233 mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY-1,
234 MII_OFFSET_ANY-1, 0);
235 else
236 mii_attach(&sc->sc_dev, mii, 0xffffffff, sc->sc_tcvr,
237 MII_OFFSET_ANY-1, 0);
238
239 child = LIST_FIRST(&mii->mii_phys)((&mii->mii_phys)->lh_first);
240 if (child == NULL((void *)0)) {
241 /* No PHY attached */
242 ifmedia_add(&sc->sc_mediasc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_MANUAL1ULL, 0, NULL((void *)0));
243 ifmedia_set(&sc->sc_mediasc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_MANUAL1ULL);
244 } else {
245 /*
246 * Walk along the list of attached MII devices and
247 * establish an `MII instance' to `phy number'
248 * mapping. We'll use this mapping in media change
249 * requests to determine which phy to use to program
250 * the MIF configuration register.
251 */
252 for (; child != NULL((void *)0); child = LIST_NEXT(child, mii_list)((child)->mii_list.le_next)) {
253 /*
254 * Note: we support just two PHYs: the built-in
255 * internal device and an external on the MII
256 * connector.
257 */
258 if (child->mii_phy > 1 || child->mii_inst > 1) {
259 printf("%s: cannot accommodate MII device %s"
260 " at phy %d, instance %lld\n",
261 sc->sc_dev.dv_xname,
262 child->mii_dev.dv_xname,
263 child->mii_phy, child->mii_inst);
264 continue;
265 }
266
267 sc->sc_phys[child->mii_inst] = child->mii_phy;
268 }
269
270 /*
271 * XXX - we can really do the following ONLY if the
272 * phy indeed has the auto negotiation capability!!
273 */
274 ifmedia_set(&sc->sc_mediasc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL);
275 }
276
277 /* Attach the interface. */
278 if_attach(ifp);
279 ether_ifattach(ifp);
280
281 timeout_set(&sc->sc_tick_ch, hme_tick, sc);
282 return;
283
284fail:
285 if (sc->sc_rxmap_spare != NULL((void *)0))
286 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxmap_spare)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag),
(sc->sc_rxmap_spare))
;
287 for (i = 0; i < HME_TX_RING_SIZE64; i++)
288 if (sc->sc_txd[i].sd_map != NULL((void *)0))
289 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_txd[i].sd_map)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag),
(sc->sc_txd[i].sd_map))
;
290 for (i = 0; i < HME_RX_RING_SIZE64; i++)
291 if (sc->sc_rxd[i].sd_map != NULL((void *)0))
292 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxd[i].sd_map)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag),
(sc->sc_rxd[i].sd_map))
;
293}
294
295void
296hme_unconfig(struct hme_softc *sc)
297{
298 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
299 int i;
300
301 hme_stop(sc, 1);
302
303 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxmap_spare)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag),
(sc->sc_rxmap_spare))
;
304 for (i = 0; i < HME_TX_RING_SIZE64; i++)
305 if (sc->sc_txd[i].sd_map != NULL((void *)0))
306 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_txd[i].sd_map)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag),
(sc->sc_txd[i].sd_map))
;
307 for (i = 0; i < HME_RX_RING_SIZE64; i++)
308 if (sc->sc_rxd[i].sd_map != NULL((void *)0))
309 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxd[i].sd_map)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag),
(sc->sc_rxd[i].sd_map))
;
310
311 /* Detach all PHYs */
312 mii_detach(&sc->sc_mii, MII_PHY_ANY-1, MII_OFFSET_ANY-1);
313
314 /* Delete all remaining media. */
315 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY((uint64_t) -1));
316
317 ether_ifdetach(ifp);
318 if_detach(ifp);
319}
320
321void
322hme_tick(void *arg)
323{
324 struct hme_softc *sc = arg;
325 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
326 bus_space_tag_t t = sc->sc_bustag;
327 bus_space_handle_t mac = sc->sc_mac;
328 int s;
329
330 s = splnet()splraise(0x7);
331 /*
332 * Unload collision counters
333 */
334 ifp->if_collisionsif_data.ifi_collisions +=
335 bus_space_read_4(t, mac, HME_MACI_NCCNT)((t)->read_4((mac), ((144*4)))) +
336 bus_space_read_4(t, mac, HME_MACI_FCCNT)((t)->read_4((mac), ((145*4)))) +
337 bus_space_read_4(t, mac, HME_MACI_EXCNT)((t)->read_4((mac), ((146*4)))) +
338 bus_space_read_4(t, mac, HME_MACI_LTCNT)((t)->read_4((mac), ((147*4))));
339
340 /*
341 * then clear the hardware counters.
342 */
343 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0)((t)->write_4((mac), ((144*4)), (0)));
344 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0)((t)->write_4((mac), ((145*4)), (0)));
345 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0)((t)->write_4((mac), ((146*4)), (0)));
346 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0)((t)->write_4((mac), ((147*4)), (0)));
347
348 /*
349 * If buffer allocation fails, the receive ring may become
350 * empty. There is no receive interrupt to recover from that.
351 */
352 if (if_rxr_inuse(&sc->sc_rx_ring)((&sc->sc_rx_ring)->rxr_alive) == 0)
353 hme_fill_rx_ring(sc);
354
355 mii_tick(&sc->sc_mii);
356 splx(s)spllower(s);
357
358 timeout_add_sec(&sc->sc_tick_ch, 1);
359}
360
361void
362hme_reset(struct hme_softc *sc)
363{
364 int s;
365
366 s = splnet()splraise(0x7);
367 hme_init(sc);
368 splx(s)spllower(s);
369}
370
371void
372hme_stop(struct hme_softc *sc, int softonly)
373{
374 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
375 bus_space_tag_t t = sc->sc_bustag;
376 bus_space_handle_t seb = sc->sc_seb;
377 int n;
378
379 timeout_del(&sc->sc_tick_ch);
380
381 /*
382 * Mark the interface down and cancel the watchdog timer.
383 */
384 ifp->if_flags &= ~IFF_RUNNING0x40;
385 ifq_clr_oactive(&ifp->if_snd);
386 ifp->if_timer = 0;
387
388 if (!softonly) {
389 mii_down(&sc->sc_mii);
390
391 /* Mask all interrupts */
392 bus_space_write_4(t, seb, HME_SEBI_IMASK, 0xffffffff)((t)->write_4((seb), ((65*4)), (0xffffffff)));
393
394 /* Reset transmitter and receiver */
395 bus_space_write_4(t, seb, HME_SEBI_RESET,((t)->write_4((seb), ((0*4)), ((0x00000001 | 0x00000002)))
)
396 (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX))((t)->write_4((seb), ((0*4)), ((0x00000001 | 0x00000002)))
)
;
397
398 for (n = 0; n < 20; n++) {
399 u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET)((t)->read_4((seb), ((0*4))));
400 if ((v & (HME_SEB_RESET_ETX0x00000001 | HME_SEB_RESET_ERX0x00000002)) == 0)
401 break;
402 DELAY(20)(*delay_func)(20);
403 }
404 if (n >= 20)
405 printf("%s: hme_stop: reset failed\n", sc->sc_dev.dv_xname);
406 }
407
408 for (n = 0; n < HME_TX_RING_SIZE64; n++) {
409 if (sc->sc_txd[n].sd_mbuf != NULL((void *)0)) {
410 bus_dmamap_sync(sc->sc_dmatag, sc->sc_txd[n].sd_map,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc
->sc_txd[n].sd_map), (0), (sc->sc_txd[n].sd_map->dm_mapsize
), (0x08))
411 0, sc->sc_txd[n].sd_map->dm_mapsize,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc
->sc_txd[n].sd_map), (0), (sc->sc_txd[n].sd_map->dm_mapsize
), (0x08))
412 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc
->sc_txd[n].sd_map), (0), (sc->sc_txd[n].sd_map->dm_mapsize
), (0x08))
;
413 bus_dmamap_unload(sc->sc_dmatag, sc->sc_txd[n].sd_map)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), (
sc->sc_txd[n].sd_map))
;
414 m_freem(sc->sc_txd[n].sd_mbuf);
415 sc->sc_txd[n].sd_mbuf = NULL((void *)0);
416 }
417 }
418 sc->sc_tx_prod = sc->sc_tx_cons = sc->sc_tx_cnt = 0;
419
420 for (n = 0; n < HME_RX_RING_SIZE64; n++) {
421 if (sc->sc_rxd[n].sd_mbuf != NULL((void *)0)) {
422 bus_dmamap_sync(sc->sc_dmatag, sc->sc_rxd[n].sd_map,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc
->sc_rxd[n].sd_map), (0), (sc->sc_rxd[n].sd_map->dm_mapsize
), (0x02))
423 0, sc->sc_rxd[n].sd_map->dm_mapsize,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc
->sc_rxd[n].sd_map), (0), (sc->sc_rxd[n].sd_map->dm_mapsize
), (0x02))
424 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sc
->sc_rxd[n].sd_map), (0), (sc->sc_rxd[n].sd_map->dm_mapsize
), (0x02))
;
425 bus_dmamap_unload(sc->sc_dmatag, sc->sc_rxd[n].sd_map)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), (
sc->sc_rxd[n].sd_map))
;
426 m_freem(sc->sc_rxd[n].sd_mbuf);
427 sc->sc_rxd[n].sd_mbuf = NULL((void *)0);
428 }
429 }
430 sc->sc_rx_prod = sc->sc_rx_cons = 0;
431}
432
433void
434hme_meminit(struct hme_softc *sc)
435{
436 bus_addr_t dma;
437 caddr_t p;
438 unsigned int i;
439 struct hme_ring *hr = &sc->sc_rb;
440
441 p = hr->rb_membase;
442 dma = hr->rb_dmabase;
443
444 /*
445 * Allocate transmit descriptors
446 */
447 hr->rb_txd = p;
448 hr->rb_txddma = dma;
449 p += HME_TX_RING_SIZE64 * HME_XD_SIZE8;
450 dma += HME_TX_RING_SIZE64 * HME_XD_SIZE8;
451 /* We have reserved descriptor space until the next 2048 byte boundary.*/
452 dma = (bus_addr_t)roundup((u_long)dma, 2048)(((((u_long)dma)+((2048)-1))/(2048))*(2048));
453 p = (caddr_t)roundup((u_long)p, 2048)(((((u_long)p)+((2048)-1))/(2048))*(2048));
454
455 /*
456 * Allocate receive descriptors
457 */
458 hr->rb_rxd = p;
459 hr->rb_rxddma = dma;
460 p += HME_RX_RING_SIZE64 * HME_XD_SIZE8;
461 dma += HME_RX_RING_SIZE64 * HME_XD_SIZE8;
462 /* Again move forward to the next 2048 byte boundary.*/
463 dma = (bus_addr_t)roundup((u_long)dma, 2048)(((((u_long)dma)+((2048)-1))/(2048))*(2048));
464 p = (caddr_t)roundup((u_long)p, 2048)(((((u_long)p)+((2048)-1))/(2048))*(2048));
Value stored to 'p' is never read
465
466 /*
467 * Initialize transmit descriptors
468 */
469 for (i = 0; i < HME_TX_RING_SIZE64; i++) {
470 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0)do { *((u_int32_t *)((hr->rb_txd) + ((i) * 8) + 4)) = ((sc
->sc_pci) ? ((__uint32_t)(0)) : (0)); } while(0)
;
471 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0)do { *((u_int32_t *)((hr->rb_txd) + ((i) * 8) + 0)) = ((sc
->sc_pci) ? ((__uint32_t)(0)) : (0)); } while(0)
;
472 sc->sc_txd[i].sd_mbuf = NULL((void *)0);
473 }
474
475 /*
476 * Initialize receive descriptors
477 */
478 for (i = 0; i < HME_RX_RING_SIZE64; i++) {
479 HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i, 0)do { *((u_int32_t *)((hr->rb_rxd) + ((i) * 8) + 4)) = ((sc
->sc_pci) ? ((__uint32_t)(0)) : (0)); } while(0)
;
480 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i, 0)do { *((u_int32_t *)((hr->rb_rxd) + ((i) * 8) + 0)) = ((sc
->sc_pci) ? ((__uint32_t)(0)) : (0)); } while(0)
;
481 sc->sc_rxd[i].sd_mbuf = NULL((void *)0);
482 }
483
484 if_rxr_init(&sc->sc_rx_ring, 2, HME_RX_RING_SIZE64);
485 hme_fill_rx_ring(sc);
486}
487
488/*
489 * Initialization of interface; set up initialization block
490 * and transmit/receive descriptor rings.
491 */
492void
493hme_init(struct hme_softc *sc)
494{
495 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
496 bus_space_tag_t t = sc->sc_bustag;
497 bus_space_handle_t seb = sc->sc_seb;
498 bus_space_handle_t etx = sc->sc_etx;
499 bus_space_handle_t erx = sc->sc_erx;
500 bus_space_handle_t mac = sc->sc_mac;
501 u_int8_t *ea;
502 u_int32_t v;
503
504 /*
505 * Initialization sequence. The numbered steps below correspond
506 * to the sequence outlined in section 6.3.5.1 in the Ethernet
507 * Channel Engine manual (part of the PCIO manual).
508 * See also the STP2002-STQ document from Sun Microsystems.
509 */
510
511 /* step 1 & 2. Reset the Ethernet Channel */
512 hme_stop(sc, 0);
513
514 /* Re-initialize the MIF */
515 hme_mifinit(sc);
516
517 /* step 3. Setup data structures in host memory */
518 hme_meminit(sc);
519
520 /* step 4. TX MAC registers & counters */
521 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0)((t)->write_4((mac), ((144*4)), (0)));
522 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0)((t)->write_4((mac), ((145*4)), (0)));
523 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0)((t)->write_4((mac), ((146*4)), (0)));
524 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0)((t)->write_4((mac), ((147*4)), (0)));
525 bus_space_write_4(t, mac, HME_MACI_TXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)((t)->write_4((mac), ((140*4)), (1518 + 4)));
526
527 /* Load station MAC address */
528 ea = sc->sc_arpcom.ac_enaddr;
529 bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1])((t)->write_4((mac), ((200*4)), ((ea[0] << 8) | ea[1
])))
;
530 bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3])((t)->write_4((mac), ((199*4)), ((ea[2] << 8) | ea[3
])))
;
531 bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5])((t)->write_4((mac), ((198*4)), ((ea[4] << 8) | ea[5
])))
;
532
533 /*
534 * Init seed for backoff
535 * (source suggested by manual: low 10 bits of MAC address)
536 */
537 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
538 bus_space_write_4(t, mac, HME_MACI_RANDSEED, v)((t)->write_4((mac), ((148*4)), (v)));
539
540
541 /* Note: Accepting power-on default for other MAC registers here.. */
542
543
544 /* step 5. RX MAC registers & counters */
545 hme_iff(sc);
546
547 /* step 6 & 7. Program Descriptor Ring Base Addresses */
548 bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma)((t)->write_4((etx), ((2*4)), (sc->sc_rb.rb_txddma)));
549 bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE)((t)->write_4((etx), ((11*4)), (64)));
550
551 bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma)((t)->write_4((erx), ((1*4)), (sc->sc_rb.rb_rxddma)));
552 bus_space_write_4(t, mac, HME_MACI_RXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)((t)->write_4((mac), ((196*4)), (1518 + 4)));
553
554 /* step 8. Global Configuration & Interrupt Mask */
555 bus_space_write_4(t, seb, HME_SEBI_IMASK,((t)->write_4((seb), ((65*4)), (~(0x01000000 | 0x00010000 |
0x02000000 | 0x10000000 | 0x00000002 | (0x80000000 | 0x40000000
| 0x20000000 | 0x10000000 | 0x08000000 | 0x04000000 | 0x00400000
| 0x00200000 | 0x00100000 | 0x00080000 | 0x00040000 | 0x00020000
| 0x00008000 | 0x00004000 | 0x00002000 | 0x00001000 | 0x00000800
| 0x00000400| 0x00000200| 0x00000080 | 0x00000040 | 0x00000010
| 0x00000008| 0x00000004)))))
556 ~(HME_SEB_STAT_HOSTTOTX | HME_SEB_STAT_RXTOHOST |((t)->write_4((seb), ((65*4)), (~(0x01000000 | 0x00010000 |
0x02000000 | 0x10000000 | 0x00000002 | (0x80000000 | 0x40000000
| 0x20000000 | 0x10000000 | 0x08000000 | 0x04000000 | 0x00400000
| 0x00200000 | 0x00100000 | 0x00080000 | 0x00040000 | 0x00020000
| 0x00008000 | 0x00004000 | 0x00002000 | 0x00001000 | 0x00000800
| 0x00000400| 0x00000200| 0x00000080 | 0x00000040 | 0x00000010
| 0x00000008| 0x00000004)))))
557 HME_SEB_STAT_TXALL | HME_SEB_STAT_TXPERR |((t)->write_4((seb), ((65*4)), (~(0x01000000 | 0x00010000 |
0x02000000 | 0x10000000 | 0x00000002 | (0x80000000 | 0x40000000
| 0x20000000 | 0x10000000 | 0x08000000 | 0x04000000 | 0x00400000
| 0x00200000 | 0x00100000 | 0x00080000 | 0x00040000 | 0x00020000
| 0x00008000 | 0x00004000 | 0x00002000 | 0x00001000 | 0x00000800
| 0x00000400| 0x00000200| 0x00000080 | 0x00000040 | 0x00000010
| 0x00000008| 0x00000004)))))
558 HME_SEB_STAT_RCNTEXP | HME_SEB_STAT_ALL_ERRORS))((t)->write_4((seb), ((65*4)), (~(0x01000000 | 0x00010000 |
0x02000000 | 0x10000000 | 0x00000002 | (0x80000000 | 0x40000000
| 0x20000000 | 0x10000000 | 0x08000000 | 0x04000000 | 0x00400000
| 0x00200000 | 0x00100000 | 0x00080000 | 0x00040000 | 0x00020000
| 0x00008000 | 0x00004000 | 0x00002000 | 0x00001000 | 0x00000800
| 0x00000400| 0x00000200| 0x00000080 | 0x00000040 | 0x00000010
| 0x00000008| 0x00000004)))))
;
559
560 switch (sc->sc_burst) {
561 default:
562 v = 0;
563 break;
564 case 16:
565 v = HME_SEB_CFG_BURST160x00000000;
566 break;
567 case 32:
568 v = HME_SEB_CFG_BURST320x00000001;
569 break;
570 case 64:
571 v = HME_SEB_CFG_BURST640x00000002;
572 break;
573 }
574 bus_space_write_4(t, seb, HME_SEBI_CFG, v)((t)->write_4((seb), ((1*4)), (v)));
575
576 /* step 9. ETX Configuration: use mostly default values */
577
578 /* Enable DMA */
579 v = bus_space_read_4(t, etx, HME_ETXI_CFG)((t)->read_4((etx), ((1*4))));
580 v |= HME_ETX_CFG_DMAENABLE0x00000001;
581 bus_space_write_4(t, etx, HME_ETXI_CFG, v)((t)->write_4((etx), ((1*4)), (v)));
582
583 /* Transmit Descriptor ring size: in increments of 16 */
584 bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE / 16 - 1)((t)->write_4((etx), ((11*4)), (64 / 16 - 1)));
585
586 /* step 10. ERX Configuration */
587 v = bus_space_read_4(t, erx, HME_ERXI_CFG)((t)->read_4((erx), ((0*4))));
588 v &= ~HME_ERX_CFG_RINGSIZE2560x00000600;
589#if HME_RX_RING_SIZE64 == 32
590 v |= HME_ERX_CFG_RINGSIZE320x00000000;
591#elif HME_RX_RING_SIZE64 == 64
592 v |= HME_ERX_CFG_RINGSIZE640x00000200;
593#elif HME_RX_RING_SIZE64 == 128
594 v |= HME_ERX_CFG_RINGSIZE1280x00000400;
595#elif HME_RX_RING_SIZE64 == 256
596 v |= HME_ERX_CFG_RINGSIZE2560x00000600;
597#else
598# error "RX ring size must be 32, 64, 128, or 256"
599#endif
600 /* Enable DMA */
601 v |= HME_ERX_CFG_DMAENABLE0x00000001 | (HME_RX_OFFSET2 << 3);
602 bus_space_write_4(t, erx, HME_ERXI_CFG, v)((t)->write_4((erx), ((0*4)), (v)));
603
604 /* step 11. XIF Configuration */
605 v = bus_space_read_4(t, mac, HME_MACI_XIF)((t)->read_4((mac), ((0*4))));
606 v |= HME_MAC_XIF_OE0x00000001;
607 bus_space_write_4(t, mac, HME_MACI_XIF, v)((t)->write_4((mac), ((0*4)), (v)));
608
609 /* step 12. RX_MAC Configuration Register */
610 v = bus_space_read_4(t, mac, HME_MACI_RXCFG)((t)->read_4((mac), ((195*4))));
611 v |= HME_MAC_RXCFG_ENABLE0x00000001;
612 bus_space_write_4(t, mac, HME_MACI_RXCFG, v)((t)->write_4((mac), ((195*4)), (v)));
613
614 /* step 13. TX_MAC Configuration Register */
615 v = bus_space_read_4(t, mac, HME_MACI_TXCFG)((t)->read_4((mac), ((131*4))));
616 v |= (HME_MAC_TXCFG_ENABLE0x00000001 | HME_MAC_TXCFG_DGIVEUP0x00000400);
617 bus_space_write_4(t, mac, HME_MACI_TXCFG, v)((t)->write_4((mac), ((131*4)), (v)));
618
619 /* Set the current media. */
620 mii_mediachg(&sc->sc_mii);
621
622 /* Start the one second timer. */
623 timeout_add_sec(&sc->sc_tick_ch, 1);
624
625 ifp->if_flags |= IFF_RUNNING0x40;
626 ifq_clr_oactive(&ifp->if_snd);
627
628 hme_start(ifp);
629}
630
631void
632hme_start(struct ifnet *ifp)
633{
634 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
635 struct hme_ring *hr = &sc->sc_rb;
636 struct mbuf *m;
637 u_int32_t flags;
638 bus_dmamap_t map;
639 u_int32_t frag, cur, i;
640 int error;
641
642 if (!(ifp->if_flags & IFF_RUNNING0x40) || ifq_is_oactive(&ifp->if_snd))
643 return;
644
645 while (sc->sc_txd[sc->sc_tx_prod].sd_mbuf == NULL((void *)0)) {
646 m = ifq_deq_begin(&ifp->if_snd);
647 if (m == NULL((void *)0))
648 break;
649
650 /*
651 * Encapsulate this packet and start it going...
652 * or fail...
653 */
654
655 cur = frag = sc->sc_tx_prod;
656 map = sc->sc_txd[cur].sd_map;
657
658 error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag
), (map), (m), (0x0001))
659 BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag
), (map), (m), (0x0001))
;
660 if (error != 0 && error != EFBIG27)
661 goto drop;
662 if (error != 0) {
663 /* Too many fragments, linearize. */
664 if (m_defrag(m, M_DONTWAIT0x0002))
665 goto drop;
666 error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag
), (map), (m), (0x0001))
667 BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag
), (map), (m), (0x0001))
;
668 if (error != 0)
669 goto drop;
670 }
671
672 if ((HME_TX_RING_SIZE64 - (sc->sc_tx_cnt + map->dm_nsegs)) < 5) {
673 bus_dmamap_unload(sc->sc_dmatag, map)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), (
map))
;
674 ifq_deq_rollback(&ifp->if_snd, m);
675 ifq_set_oactive(&ifp->if_snd);
676 break;
677 }
678
679 /* We are now committed to transmitting the packet. */
680 ifq_deq_commit(&ifp->if_snd, m);
681
682#if NBPFILTER1 > 0
683 /*
684 * If BPF is listening on this interface, let it see the
685 * packet before we commit it to the wire.
686 */
687 if (ifp->if_bpf)
688 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
689#endif
690
691 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (map
), (0), (map->dm_mapsize), (0x04))
692 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (map
), (0), (map->dm_mapsize), (0x04))
;
693
694 for (i = 0; i < map->dm_nsegs; i++) {
695 flags = HME_XD_ENCODE_TSIZE(map->dm_segs[i].ds_len)(((map->dm_segs[i].ds_len) << 0) & 0x00003fff);
696 if (i == 0)
697 flags |= HME_XD_SOP0x40000000;
698 else
699 flags |= HME_XD_OWN0x80000000;
700
701 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, frag,do { *((u_int32_t *)((hr->rb_txd) + ((frag) * 8) + 4)) = (
(sc->sc_pci) ? ((__uint32_t)(map->dm_segs[i].ds_addr)) :
(map->dm_segs[i].ds_addr)); } while(0)
702 map->dm_segs[i].ds_addr)do { *((u_int32_t *)((hr->rb_txd) + ((frag) * 8) + 4)) = (
(sc->sc_pci) ? ((__uint32_t)(map->dm_segs[i].ds_addr)) :
(map->dm_segs[i].ds_addr)); } while(0)
;
703 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, frag, flags)do { *((u_int32_t *)((hr->rb_txd) + ((frag) * 8) + 0)) = (
(sc->sc_pci) ? ((__uint32_t)(flags)) : (flags)); } while(0
)
;
704
705 cur = frag;
706 if (++frag == HME_TX_RING_SIZE64)
707 frag = 0;
708 }
709
710 /* Set end of packet on last descriptor. */
711 flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, cur)(sc->sc_pci) ? ((__uint32_t)(*((u_int32_t *)((hr->rb_txd
) + ((cur) * 8) + 0)))) : (*((u_int32_t *)((hr->rb_txd) + (
(cur) * 8) + 0)))
;
712 flags |= HME_XD_EOP0x20000000;
713 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, cur, flags)do { *((u_int32_t *)((hr->rb_txd) + ((cur) * 8) + 0)) = ((
sc->sc_pci) ? ((__uint32_t)(flags)) : (flags)); } while(0)
;
714
715 sc->sc_tx_cnt += map->dm_nsegs;
716 sc->sc_txd[sc->sc_tx_prod].sd_map = sc->sc_txd[cur].sd_map;
717 sc->sc_txd[cur].sd_map = map;
718 sc->sc_txd[cur].sd_mbuf = m;
719
720 /* Give first frame over to the hardware. */
721 flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, sc->sc_tx_prod)(sc->sc_pci) ? ((__uint32_t)(*((u_int32_t *)((hr->rb_txd
) + ((sc->sc_tx_prod) * 8) + 0)))) : (*((u_int32_t *)((hr->
rb_txd) + ((sc->sc_tx_prod) * 8) + 0)))
;
722 flags |= HME_XD_OWN0x80000000;
723 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, sc->sc_tx_prod, flags)do { *((u_int32_t *)((hr->rb_txd) + ((sc->sc_tx_prod) *
8) + 0)) = ((sc->sc_pci) ? ((__uint32_t)(flags)) : (flags
)); } while(0)
;
724
725 bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING,((sc->sc_bustag)->write_4((sc->sc_etx), ((0*4)), (0x00000001
)))
726 HME_ETX_TP_DMAWAKEUP)((sc->sc_bustag)->write_4((sc->sc_etx), ((0*4)), (0x00000001
)))
;
727 sc->sc_tx_prod = frag;
728
729 ifp->if_timer = 5;
730 }
731
732 return;
733
734 drop:
735 ifq_deq_commit(&ifp->if_snd, m);
736 m_freem(m);
737 ifp->if_oerrorsif_data.ifi_oerrors++;
738}
739
740/*
741 * Transmit interrupt.
742 */
743int
744hme_tint(struct hme_softc *sc)
745{
746 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
747 unsigned int ri, txflags;
748 struct hme_sxd *sd;
749 int cnt = sc->sc_tx_cnt;
750
751 /* Fetch current position in the transmit ring */
752 ri = sc->sc_tx_cons;
753 sd = &sc->sc_txd[ri];
754
755 for (;;) {
756 if (cnt <= 0)
757 break;
758
759 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri)(sc->sc_pci) ? ((__uint32_t)(*((u_int32_t *)((sc->sc_rb
.rb_txd) + ((ri) * 8) + 0)))) : (*((u_int32_t *)((sc->sc_rb
.rb_txd) + ((ri) * 8) + 0)))
;
760
761 if (txflags & HME_XD_OWN0x80000000)
762 break;
763
764 ifq_clr_oactive(&ifp->if_snd);
765
766 if (sd->sd_mbuf != NULL((void *)0)) {
767 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sd
->sd_map), (0), (sd->sd_map->dm_mapsize), (0x08))
768 0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sd
->sd_map), (0), (sd->sd_map->dm_mapsize), (0x08))
;
769 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), (
sd->sd_map))
;
770 m_freem(sd->sd_mbuf);
771 sd->sd_mbuf = NULL((void *)0);
772 }
773
774 if (++ri == HME_TX_RING_SIZE64) {
775 ri = 0;
776 sd = sc->sc_txd;
777 } else
778 sd++;
779
780 --cnt;
781 }
782
783 sc->sc_tx_cnt = cnt;
784 ifp->if_timer = cnt > 0 ? 5 : 0;
785
786 /* Update ring */
787 sc->sc_tx_cons = ri;
788
789 hme_start(ifp);
790
791 return (1);
792}
793
794/*
795 * Receive interrupt.
796 */
797int
798hme_rint(struct hme_softc *sc)
799{
800 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
801 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
802 struct mbuf *m;
803 struct hme_sxd *sd;
804 unsigned int ri, len;
805 u_int32_t flags;
806
807 ri = sc->sc_rx_cons;
808 sd = &sc->sc_rxd[ri];
809
810 /*
811 * Process all buffers with valid data.
812 */
813 while (if_rxr_inuse(&sc->sc_rx_ring)((&sc->sc_rx_ring)->rxr_alive) > 0) {
814 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri)(sc->sc_pci) ? ((__uint32_t)(*((u_int32_t *)((sc->sc_rb
.rb_rxd) + ((ri) * 8) + 0)))) : (*((u_int32_t *)((sc->sc_rb
.rb_rxd) + ((ri) * 8) + 0)))
;
815 if (flags & HME_XD_OWN0x80000000)
816 break;
817
818 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sd
->sd_map), (0), (sd->sd_map->dm_mapsize), (0x02))
819 0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (sd
->sd_map), (0), (sd->sd_map->dm_mapsize), (0x02))
;
820 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), (
sd->sd_map))
;
821
822 m = sd->sd_mbuf;
823 sd->sd_mbuf = NULL((void *)0);
824
825 if (++ri == HME_RX_RING_SIZE64) {
826 ri = 0;
827 sd = sc->sc_rxd;
828 } else
829 sd++;
830
831 if_rxr_put(&sc->sc_rx_ring, 1)do { (&sc->sc_rx_ring)->rxr_alive -= (1); } while (
0)
;
832
833 if (flags & HME_XD_OFL0x40000000) {
834 ifp->if_ierrorsif_data.ifi_ierrors++;
835 printf("%s: buffer overflow, ri=%d; flags=0x%x\n",
836 sc->sc_dev.dv_xname, ri, flags);
837 m_freem(m);
838 continue;
839 }
840
841 len = HME_XD_DECODE_RSIZE(flags)(((flags) & 0x3fff0000) >> 16);
842 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len;
843
844 ml_enqueue(&ml, m);
845 }
846
847 if (ifiq_input(&ifp->if_rcv, &ml))
848 if_rxr_livelocked(&sc->sc_rx_ring);
849
850 sc->sc_rx_cons = ri;
851 hme_fill_rx_ring(sc);
852 return (1);
853}
854
855int
856hme_eint(struct hme_softc *sc, u_int status)
857{
858 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
859
860 if (status & HME_SEB_STAT_MIFIRQ0x00800000) {
861 printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
862 status &= ~HME_SEB_STAT_MIFIRQ0x00800000;
863 }
864
865 if (status & HME_SEB_STAT_DTIMEXP0x00008000) {
866 ifp->if_oerrorsif_data.ifi_oerrors++;
867 status &= ~HME_SEB_STAT_DTIMEXP0x00008000;
868 }
869
870 if (status & HME_SEB_STAT_NORXD0x00020000) {
871 ifp->if_ierrorsif_data.ifi_ierrors++;
872 status &= ~HME_SEB_STAT_NORXD0x00020000;
873 }
874
875 status &= ~(HME_SEB_STAT_RXTOHOST0x00010000 | HME_SEB_STAT_GOTFRAME0x00000001 |
876 HME_SEB_STAT_SENTFRAME0x00000100 | HME_SEB_STAT_HOSTTOTX0x01000000 |
877 HME_SEB_STAT_TXALL0x02000000);
878
879 if (status == 0)
880 return (1);
881
882#ifdef HME_DEBUG
883 printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, HME_SEB_STAT_BITS"\020\1RX\2RCNT\3ACNT\4CCNT\5LCNT\6RFIFO\7CVCNT\10STST" "\11TX\12TFIFO\13MAXPKT\14NCNT\15ECNT\16LCCNT\17FCNT"
"\20DTIME\21RXHOST\22NORXD\23RXE\24EXLATE\25RXP\26RXT\27EOP"
"\30MIF\31TXHOST\32TXALL\33TXE\34TXL\35TXP\36TXT\37SLV" "\40SLVP"
);
884#endif
885 return (1);
886}
887
888int
889hme_intr(void *v)
890{
891 struct hme_softc *sc = (struct hme_softc *)v;
892 bus_space_tag_t t = sc->sc_bustag;
893 bus_space_handle_t seb = sc->sc_seb;
894 u_int32_t status;
895 int r = 0;
896
897 status = bus_space_read_4(t, seb, HME_SEBI_STAT)((t)->read_4((seb), ((64*4))));
898 if (status == 0xffffffff)
899 return (0);
900
901 if ((status & HME_SEB_STAT_ALL_ERRORS(0x80000000 | 0x40000000 | 0x20000000 | 0x10000000 | 0x08000000
| 0x04000000 | 0x00400000 | 0x00200000 | 0x00100000 | 0x00080000
| 0x00040000 | 0x00020000 | 0x00008000 | 0x00004000 | 0x00002000
| 0x00001000 | 0x00000800 | 0x00000400| 0x00000200| 0x00000080
| 0x00000040 | 0x00000010 | 0x00000008| 0x00000004)
) != 0)
902 r |= hme_eint(sc, status);
903
904 if ((status & (HME_SEB_STAT_TXALL0x02000000 | HME_SEB_STAT_HOSTTOTX0x01000000)) != 0)
905 r |= hme_tint(sc);
906
907 if ((status & HME_SEB_STAT_RXTOHOST0x00010000) != 0)
908 r |= hme_rint(sc);
909
910 return (r);
911}
912
913
914void
915hme_watchdog(struct ifnet *ifp)
916{
917 struct hme_softc *sc = ifp->if_softc;
918
919 log(LOG_ERR3, "%s: device timeout\n", sc->sc_dev.dv_xname);
920 ifp->if_oerrorsif_data.ifi_oerrors++;
921
922 hme_reset(sc);
923}
924
925/*
926 * Initialize the MII Management Interface
927 */
928void
929hme_mifinit(struct hme_softc *sc)
930{
931 bus_space_tag_t t = sc->sc_bustag;
932 bus_space_handle_t mif = sc->sc_mif;
933 bus_space_handle_t mac = sc->sc_mac;
934 int phy;
935 u_int32_t v;
936
937 v = bus_space_read_4(t, mif, HME_MIFI_CFG)((t)->read_4((mif), ((4*4))));
938 phy = HME_PHYAD_EXTERNAL0;
939 if (v & HME_MIF_CFG_MDI10x00000200)
940 phy = sc->sc_tcvr = HME_PHYAD_EXTERNAL0;
941 else if (v & HME_MIF_CFG_MDI00x00000100)
942 phy = sc->sc_tcvr = HME_PHYAD_INTERNAL1;
943 else
944 sc->sc_tcvr = -1;
945
946 /* Configure the MIF in frame mode, no poll, current phy select */
947 v = 0;
948 if (phy == HME_PHYAD_EXTERNAL0)
949 v |= HME_MIF_CFG_PHY0x00000001;
950 bus_space_write_4(t, mif, HME_MIFI_CFG, v)((t)->write_4((mif), ((4*4)), (v)));
951
952 /* If an external transceiver is selected, enable its MII drivers */
953 v = bus_space_read_4(t, mac, HME_MACI_XIF)((t)->read_4((mac), ((0*4))));
954 v &= ~HME_MAC_XIF_MIIENABLE0x00000008;
955 if (phy == HME_PHYAD_EXTERNAL0)
956 v |= HME_MAC_XIF_MIIENABLE0x00000008;
957 bus_space_write_4(t, mac, HME_MACI_XIF, v)((t)->write_4((mac), ((0*4)), (v)));
958}
959
960/*
961 * MII interface
962 */
963static int
964hme_mii_readreg(struct device *self, int phy, int reg)
965{
966 struct hme_softc *sc = (struct hme_softc *)self;
967 bus_space_tag_t t = sc->sc_bustag;
968 bus_space_handle_t mif = sc->sc_mif;
969 bus_space_handle_t mac = sc->sc_mac;
970 u_int32_t v, xif_cfg, mifi_cfg;
971 int n;
972
973 if (phy != HME_PHYAD_EXTERNAL0 && phy != HME_PHYAD_INTERNAL1)
974 return (0);
975
976 /* Select the desired PHY in the MIF configuration register */
977 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG)((t)->read_4((mif), ((4*4))));
978 v &= ~HME_MIF_CFG_PHY0x00000001;
979 if (phy == HME_PHYAD_EXTERNAL0)
980 v |= HME_MIF_CFG_PHY0x00000001;
981 bus_space_write_4(t, mif, HME_MIFI_CFG, v)((t)->write_4((mif), ((4*4)), (v)));
982
983 /* Enable MII drivers on external transceiver */
984 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF)((t)->read_4((mac), ((0*4))));
985 if (phy == HME_PHYAD_EXTERNAL0)
986 v |= HME_MAC_XIF_MIIENABLE0x00000008;
987 else
988 v &= ~HME_MAC_XIF_MIIENABLE0x00000008;
989 bus_space_write_4(t, mac, HME_MACI_XIF, v)((t)->write_4((mac), ((0*4)), (v)));
990
991 /* Construct the frame command */
992 v = (MII_COMMAND_START0x01 << HME_MIF_FO_ST_SHIFT30) |
993 HME_MIF_FO_TAMSB0x00020000 |
994 (MII_COMMAND_READ0x02 << HME_MIF_FO_OPC_SHIFT28) |
995 (phy << HME_MIF_FO_PHYAD_SHIFT23) |
996 (reg << HME_MIF_FO_REGAD_SHIFT18);
997
998 bus_space_write_4(t, mif, HME_MIFI_FO, v)((t)->write_4((mif), ((3*4)), (v)));
999 for (n = 0; n < 100; n++) {
1000 DELAY(1)(*delay_func)(1);
1001 v = bus_space_read_4(t, mif, HME_MIFI_FO)((t)->read_4((mif), ((3*4))));
1002 if (v & HME_MIF_FO_TALSB0x00010000) {
1003 v &= HME_MIF_FO_DATA0x0000ffff;
1004 goto out;
1005 }
1006 }
1007
1008 v = 0;
1009 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1010
1011out:
1012 /* Restore MIFI_CFG register */
1013 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg)((t)->write_4((mif), ((4*4)), (mifi_cfg)));
1014 /* Restore XIF register */
1015 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg)((t)->write_4((mac), ((0*4)), (xif_cfg)));
1016 return (v);
1017}
1018
1019static void
1020hme_mii_writereg(struct device *self, int phy, int reg, int val)
1021{
1022 struct hme_softc *sc = (void *)self;
1023 bus_space_tag_t t = sc->sc_bustag;
1024 bus_space_handle_t mif = sc->sc_mif;
1025 bus_space_handle_t mac = sc->sc_mac;
1026 u_int32_t v, xif_cfg, mifi_cfg;
1027 int n;
1028
1029 /* We can at most have two PHYs */
1030 if (phy != HME_PHYAD_EXTERNAL0 && phy != HME_PHYAD_INTERNAL1)
1031 return;
1032
1033 /* Select the desired PHY in the MIF configuration register */
1034 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG)((t)->read_4((mif), ((4*4))));
1035 v &= ~HME_MIF_CFG_PHY0x00000001;
1036 if (phy == HME_PHYAD_EXTERNAL0)
1037 v |= HME_MIF_CFG_PHY0x00000001;
1038 bus_space_write_4(t, mif, HME_MIFI_CFG, v)((t)->write_4((mif), ((4*4)), (v)));
1039
1040 /* Enable MII drivers on external transceiver */
1041 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF)((t)->read_4((mac), ((0*4))));
1042 if (phy == HME_PHYAD_EXTERNAL0)
1043 v |= HME_MAC_XIF_MIIENABLE0x00000008;
1044 else
1045 v &= ~HME_MAC_XIF_MIIENABLE0x00000008;
1046 bus_space_write_4(t, mac, HME_MACI_XIF, v)((t)->write_4((mac), ((0*4)), (v)));
1047
1048 /* Construct the frame command */
1049 v = (MII_COMMAND_START0x01 << HME_MIF_FO_ST_SHIFT30) |
1050 HME_MIF_FO_TAMSB0x00020000 |
1051 (MII_COMMAND_WRITE0x01 << HME_MIF_FO_OPC_SHIFT28) |
1052 (phy << HME_MIF_FO_PHYAD_SHIFT23) |
1053 (reg << HME_MIF_FO_REGAD_SHIFT18) |
1054 (val & HME_MIF_FO_DATA0x0000ffff);
1055
1056 bus_space_write_4(t, mif, HME_MIFI_FO, v)((t)->write_4((mif), ((3*4)), (v)));
1057 for (n = 0; n < 100; n++) {
1058 DELAY(1)(*delay_func)(1);
1059 v = bus_space_read_4(t, mif, HME_MIFI_FO)((t)->read_4((mif), ((3*4))));
1060 if (v & HME_MIF_FO_TALSB0x00010000)
1061 goto out;
1062 }
1063
1064 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1065out:
1066 /* Restore MIFI_CFG register */
1067 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg)((t)->write_4((mif), ((4*4)), (mifi_cfg)));
1068 /* Restore XIF register */
1069 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg)((t)->write_4((mac), ((0*4)), (xif_cfg)));
1070}
1071
1072static void
1073hme_mii_statchg(struct device *dev)
1074{
1075 struct hme_softc *sc = (void *)dev;
1076 bus_space_tag_t t = sc->sc_bustag;
1077 bus_space_handle_t mac = sc->sc_mac;
1078 u_int32_t v;
1079
1080#ifdef HMEDEBUG
1081 if (sc->sc_debug)
1082 printf("hme_mii_statchg: status change\n", phy);
1083#endif
1084
1085 /* Set the MAC Full Duplex bit appropriately */
1086 /* Apparently the hme chip is SIMPLEX if working in full duplex mode,
1087 but not otherwise. */
1088 v = bus_space_read_4(t, mac, HME_MACI_TXCFG)((t)->read_4((mac), ((131*4))));
1089 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active)((sc->sc_mii.mii_media_active) & (0x00000000ffff0000ULL
|0x00ffff0000000000ULL))
& IFM_FDX0x0000010000000000ULL) != 0) {
1090 v |= HME_MAC_TXCFG_FULLDPLX0x00000200;
1091 sc->sc_arpcom.ac_if.if_flags |= IFF_SIMPLEX0x800;
1092 } else {
1093 v &= ~HME_MAC_TXCFG_FULLDPLX0x00000200;
1094 sc->sc_arpcom.ac_if.if_flags &= ~IFF_SIMPLEX0x800;
1095 }
1096 bus_space_write_4(t, mac, HME_MACI_TXCFG, v)((t)->write_4((mac), ((131*4)), (v)));
1097}
1098
1099int
1100hme_mediachange(struct ifnet *ifp)
1101{
1102 struct hme_softc *sc = ifp->if_softc;
1103 bus_space_tag_t t = sc->sc_bustag;
1104 bus_space_handle_t mif = sc->sc_mif;
1105 bus_space_handle_t mac = sc->sc_mac;
1106 uint64_t instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media)(((sc->sc_mii.mii_media.ifm_cur->ifm_media) & 0xff00000000000000ULL
) >> 56)
;
1107 int phy = sc->sc_phys[instance];
1108 u_int32_t v;
1109
1110#ifdef HMEDEBUG
1111 if (sc->sc_debug)
1112 printf("hme_mediachange: phy = %d\n", phy);
1113#endif
1114 if (IFM_TYPE(sc->sc_media.ifm_media)((sc->sc_mii.mii_media.ifm_media) & 0x000000000000ff00ULL
)
!= IFM_ETHER0x0000000000000100ULL)
1115 return (EINVAL22);
1116
1117 /* Select the current PHY in the MIF configuration register */
1118 v = bus_space_read_4(t, mif, HME_MIFI_CFG)((t)->read_4((mif), ((4*4))));
1119 v &= ~HME_MIF_CFG_PHY0x00000001;
1120 if (phy == HME_PHYAD_EXTERNAL0)
1121 v |= HME_MIF_CFG_PHY0x00000001;
1122 bus_space_write_4(t, mif, HME_MIFI_CFG, v)((t)->write_4((mif), ((4*4)), (v)));
1123
1124 /* If an external transceiver is selected, enable its MII drivers */
1125 v = bus_space_read_4(t, mac, HME_MACI_XIF)((t)->read_4((mac), ((0*4))));
1126 v &= ~HME_MAC_XIF_MIIENABLE0x00000008;
1127 if (phy == HME_PHYAD_EXTERNAL0)
1128 v |= HME_MAC_XIF_MIIENABLE0x00000008;
1129 bus_space_write_4(t, mac, HME_MACI_XIF, v)((t)->write_4((mac), ((0*4)), (v)));
1130
1131 return (mii_mediachg(&sc->sc_mii));
1132}
1133
1134void
1135hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1136{
1137 struct hme_softc *sc = ifp->if_softc;
1138
1139 if ((ifp->if_flags & IFF_UP0x1) == 0)
1140 return;
1141
1142 mii_pollstat(&sc->sc_mii);
1143 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1144 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1145}
1146
1147/*
1148 * Process an ioctl request.
1149 */
1150int
1151hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1152{
1153 struct hme_softc *sc = ifp->if_softc;
1154 struct ifreq *ifr = (struct ifreq *)data;
1155 int s, error = 0;
1156
1157 s = splnet()splraise(0x7);
1158
1159 switch (cmd) {
1160 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
1161 ifp->if_flags |= IFF_UP0x1;
1162 if (!(ifp->if_flags & IFF_RUNNING0x40))
1163 hme_init(sc);
1164 break;
1165
1166 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
1167 if (ifp->if_flags & IFF_UP0x1) {
1168 if (ifp->if_flags & IFF_RUNNING0x40)
1169 error = ENETRESET52;
1170 else
1171 hme_init(sc);
1172 } else {
1173 if (ifp->if_flags & IFF_RUNNING0x40)
1174 hme_stop(sc, 0);
1175 }
1176#ifdef HMEDEBUG
1177 sc->sc_debug = (ifp->if_flags & IFF_DEBUG0x4) != 0 ? 1 : 0;
1178#endif
1179 break;
1180
1181 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
1182 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
1183 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mediasc_mii.mii_media, cmd);
1184 break;
1185
1186 case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((170)))
:
1187 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data,
1188 NULL((void *)0), MCLBYTES(1 << 11), &sc->sc_rx_ring);
1189 break;
1190
1191 default:
1192 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1193 }
1194
1195 if (error == ENETRESET52) {
1196 if (ifp->if_flags & IFF_RUNNING0x40)
1197 hme_iff(sc);
1198 error = 0;
1199 }
1200
1201 splx(s)spllower(s);
1202 return (error);
1203}
1204
1205void
1206hme_iff(struct hme_softc *sc)
1207{
1208 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1209 struct arpcom *ac = &sc->sc_arpcom;
1210 struct ether_multi *enm;
1211 struct ether_multistep step;
1212 bus_space_tag_t t = sc->sc_bustag;
1213 bus_space_handle_t mac = sc->sc_mac;
1214 u_int32_t hash[4];
1215 u_int32_t rxcfg, crc;
1216
1217 rxcfg = bus_space_read_4(t, mac, HME_MACI_RXCFG)((t)->read_4((mac), ((195*4))));
1218 rxcfg &= ~(HME_MAC_RXCFG_HENABLE0x00000800 | HME_MAC_RXCFG_PMISC0x00000040);
1219 ifp->if_flags &= ~IFF_ALLMULTI0x200;
1220 /* Clear hash table */
1221 hash[0] = hash[1] = hash[2] = hash[3] = 0;
1222
1223 if (ifp->if_flags & IFF_PROMISC0x100) {
1224 ifp->if_flags |= IFF_ALLMULTI0x200;
1225 rxcfg |= HME_MAC_RXCFG_PMISC0x00000040;
1226 } else if (ac->ac_multirangecnt > 0) {
1227 ifp->if_flags |= IFF_ALLMULTI0x200;
1228 rxcfg |= HME_MAC_RXCFG_HENABLE0x00000800;
1229 hash[0] = hash[1] = hash[2] = hash[3] = 0xffff;
1230 } else {
1231 rxcfg |= HME_MAC_RXCFG_HENABLE0x00000800;
1232
1233 ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first
); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step
)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while
( 0)
;
1234 while (enm != NULL((void *)0)) {
1235 crc = ether_crc32_le(enm->enm_addrlo,
1236 ETHER_ADDR_LEN6) >> 26;
1237
1238 /* Set the corresponding bit in the filter. */
1239 hash[crc >> 4] |= 1 << (crc & 0xf);
1240
1241 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
1242 }
1243 }
1244
1245 /* Now load the hash table into the chip */
1246 bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0])((t)->write_4((mac), ((211*4)), (hash[0])));
1247 bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1])((t)->write_4((mac), ((210*4)), (hash[1])));
1248 bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2])((t)->write_4((mac), ((209*4)), (hash[2])));
1249 bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3])((t)->write_4((mac), ((208*4)), (hash[3])));
1250 bus_space_write_4(t, mac, HME_MACI_RXCFG, rxcfg)((t)->write_4((mac), ((195*4)), (rxcfg)));
1251}
1252
1253void
1254hme_fill_rx_ring(struct hme_softc *sc)
1255{
1256 struct hme_sxd *sd;
1257 u_int slots;
1258
1259 for (slots = if_rxr_get(&sc->sc_rx_ring, HME_RX_RING_SIZE64);
1260 slots > 0; slots--) {
1261 if (hme_newbuf(sc, &sc->sc_rxd[sc->sc_rx_prod]))
1262 break;
1263
1264 sd = &sc->sc_rxd[sc->sc_rx_prod];
1265 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, sc->sc_rx_prod,do { *((u_int32_t *)((sc->sc_rb.rb_rxd) + ((sc->sc_rx_prod
) * 8) + 4)) = ((sc->sc_pci) ? ((__uint32_t)(sd->sd_map
->dm_segs[0].ds_addr)) : (sd->sd_map->dm_segs[0].ds_addr
)); } while(0)
1266 sd->sd_map->dm_segs[0].ds_addr)do { *((u_int32_t *)((sc->sc_rb.rb_rxd) + ((sc->sc_rx_prod
) * 8) + 4)) = ((sc->sc_pci) ? ((__uint32_t)(sd->sd_map
->dm_segs[0].ds_addr)) : (sd->sd_map->dm_segs[0].ds_addr
)); } while(0)
;
1267 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, sc->sc_rx_prod,do { *((u_int32_t *)((sc->sc_rb.rb_rxd) + ((sc->sc_rx_prod
) * 8) + 0)) = ((sc->sc_pci) ? ((__uint32_t)(0x80000000 | (
((1600) << 16) & 0x3fff0000))) : (0x80000000 | (((1600
) << 16) & 0x3fff0000))); } while(0)
1268 HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_RX_PKTSIZE))do { *((u_int32_t *)((sc->sc_rb.rb_rxd) + ((sc->sc_rx_prod
) * 8) + 0)) = ((sc->sc_pci) ? ((__uint32_t)(0x80000000 | (
((1600) << 16) & 0x3fff0000))) : (0x80000000 | (((1600
) << 16) & 0x3fff0000))); } while(0)
;
1269
1270 if (++sc->sc_rx_prod == HME_RX_RING_SIZE64)
1271 sc->sc_rx_prod = 0;
1272 }
1273 if_rxr_put(&sc->sc_rx_ring, slots)do { (&sc->sc_rx_ring)->rxr_alive -= (slots); } while
(0)
;
1274}
1275
1276int
1277hme_newbuf(struct hme_softc *sc, struct hme_sxd *d)
1278{
1279 struct mbuf *m;
1280 bus_dmamap_t map;
1281
1282 /*
1283 * All operations should be on local variables and/or rx spare map
1284 * until we're sure everything is a success.
1285 */
1286
1287 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES)m_clget((((void *)0)), (0x0002), ((1 << 11)));
1288 if (!m)
1289 return (ENOBUFS55);
1290
1291 if (bus_dmamap_load(sc->sc_dmatag, sc->sc_rxmap_spare,(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc
->sc_rxmap_spare), (((caddr_t)((m)->m_hdr.mh_data))), (
(1 << 11) - 2), (((void *)0)), (0x0001))
1292 mtod(m, caddr_t), MCLBYTES - HME_RX_OFFSET, NULL,(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc
->sc_rxmap_spare), (((caddr_t)((m)->m_hdr.mh_data))), (
(1 << 11) - 2), (((void *)0)), (0x0001))
1293 BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc
->sc_rxmap_spare), (((caddr_t)((m)->m_hdr.mh_data))), (
(1 << 11) - 2), (((void *)0)), (0x0001))
!= 0) {
1294 m_freem(m);
1295 return (ENOBUFS55);
1296 }
1297
1298 /*
1299 * At this point we have a new buffer loaded into the spare map.
1300 * Just need to clear out the old mbuf/map and put the new one
1301 * in place.
1302 */
1303
1304 map = d->sd_map;
1305 d->sd_map = sc->sc_rxmap_spare;
1306 sc->sc_rxmap_spare = map;
1307
1308 bus_dmamap_sync(sc->sc_dmatag, d->sd_map, 0, d->sd_map->dm_mapsize,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (d
->sd_map), (0), (d->sd_map->dm_mapsize), (0x01))
1309 BUS_DMASYNC_PREREAD)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (d
->sd_map), (0), (d->sd_map->dm_mapsize), (0x01))
;
1310
1311 m->m_datam_hdr.mh_data += HME_RX_OFFSET2;
1312 d->sd_mbuf = m;
1313 return (0);
1314}