Bug Summary

File:dev/pci/if_pcn.c
Warning:line 564, column 16
Value stored to 'ifp' during its initialization is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name if_pcn.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/if_pcn.c
1/* $OpenBSD: if_pcn.c,v 1.49 2023/11/10 15:51:20 bluhm Exp $ */
2/* $NetBSD: if_pcn.c,v 1.26 2005/05/07 09:15:44 is Exp $ */
3
4/*
5 * Copyright (c) 2001 Wasabi Systems, Inc.
6 * All rights reserved.
7 *
8 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed for the NetBSD Project by
21 * Wasabi Systems, Inc.
22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
24 * written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39/*
40 * Device driver for the AMD PCnet-PCI series of Ethernet
41 * chips:
42 *
43 * * Am79c970 PCnet-PCI Single-Chip Ethernet Controller for PCI
44 * Local Bus
45 *
46 * * Am79c970A PCnet-PCI II Single-Chip Full-Duplex Ethernet Controller
47 * for PCI Local Bus
48 *
49 * * Am79c971 PCnet-FAST Single-Chip Full-Duplex 10/100Mbps
50 * Ethernet Controller for PCI Local Bus
51 *
52 * * Am79c972 PCnet-FAST+ Enhanced 10/100Mbps PCI Ethernet Controller
53 * with OnNow Support
54 *
55 * * Am79c973/Am79c975 PCnet-FAST III Single-Chip 10/100Mbps PCI
56 * Ethernet Controller with Integrated PHY
57 *
58 * This also supports the virtual PCnet-PCI Ethernet interface found
59 * in VMware.
60 *
61 * TODO:
62 *
63 * * Split this into bus-specific and bus-independent portions.
64 * The core could also be used for the ILACC (Am79900) 32-bit
65 * Ethernet chip (XXX only if we use an ILACC-compatible SWSTYLE).
66 */
67
68#include "bpfilter.h"
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/timeout.h>
73#include <sys/mbuf.h>
74#include <sys/malloc.h>
75#include <sys/kernel.h>
76#include <sys/socket.h>
77#include <sys/ioctl.h>
78#include <sys/errno.h>
79#include <sys/device.h>
80#include <sys/queue.h>
81#include <sys/endian.h>
82
83#include <net/if.h>
84#include <net/if_dl.h>
85
86#include <netinet/in.h>
87#include <netinet/if_ether.h>
88
89#include <net/if_media.h>
90
91#if NBPFILTER1 > 0
92#include <net/bpf.h>
93#endif
94
95#include <machine/bus.h>
96#include <machine/intr.h>
97
98#include <dev/mii/miivar.h>
99
100#include <dev/ic/am79900reg.h>
101#include <dev/ic/lancereg.h>
102
103#include <dev/pci/pcireg.h>
104#include <dev/pci/pcivar.h>
105#include <dev/pci/pcidevs.h>
106
107/*
108 * Register definitions for the AMD PCnet-PCI series of Ethernet
109 * chips.
110 *
111 * These are only the registers that we access directly from PCI
112 * space. Everything else (accessed via the RAP + RDP/BDP) is
113 * defined in <dev/ic/lancereg.h>.
114 */
115
116/*
117 * PCI configuration space.
118 */
119
120#define PCN_PCI_CBIO(0x10 + 0x00) (PCI_MAPREG_START0x10 + 0x00)
121#define PCN_PCI_CBMEM(0x10 + 0x04) (PCI_MAPREG_START0x10 + 0x04)
122
123/*
124 * I/O map in Word I/O mode.
125 */
126
127#define PCN16_APROM0x00 0x00
128#define PCN16_RDP0x10 0x10
129#define PCN16_RAP0x12 0x12
130#define PCN16_RESET0x14 0x14
131#define PCN16_BDP0x16 0x16
132
133/*
134 * I/O map in DWord I/O mode.
135 */
136
137#define PCN32_APROM0x00 0x00
138#define PCN32_RDP0x10 0x10
139#define PCN32_RAP0x14 0x14
140#define PCN32_RESET0x18 0x18
141#define PCN32_BDP0x1c 0x1c
142
143/*
144 * Transmit descriptor list size. This is arbitrary, but allocate
145 * enough descriptors for 128 pending transmissions, and 4 segments
146 * per packet. This MUST work out to a power of 2.
147 *
148 * NOTE: We can't have any more than 512 Tx descriptors, SO BE CAREFUL!
149 *
150 * So we play a little trick here. We give each packet up to 16
151 * DMA segments, but only allocate the max of 512 descriptors. The
152 * transmit logic can deal with this, we just are hoping to sneak by.
153 */
154#define PCN_NTXSEGS16 16
155
156#define PCN_TXQUEUELEN128 128
157#define PCN_TXQUEUELEN_MASK(128 - 1) (PCN_TXQUEUELEN128 - 1)
158#define PCN_NTXDESC512 512
159#define PCN_NTXDESC_MASK(512 - 1) (PCN_NTXDESC512 - 1)
160#define PCN_NEXTTX(x)(((x) + 1) & (512 - 1)) (((x) + 1) & PCN_NTXDESC_MASK(512 - 1))
161#define PCN_NEXTTXS(x)(((x) + 1) & (128 - 1)) (((x) + 1) & PCN_TXQUEUELEN_MASK(128 - 1))
162
163/* Tx interrupt every N + 1 packets. */
164#define PCN_TXINTR_MASK7 7
165
166/*
167 * Receive descriptor list size. We have one Rx buffer per incoming
168 * packet, so this logic is a little simpler.
169 */
170#define PCN_NRXDESC128 128
171#define PCN_NRXDESC_MASK(128 - 1) (PCN_NRXDESC128 - 1)
172#define PCN_NEXTRX(x)(((x) + 1) & (128 - 1)) (((x) + 1) & PCN_NRXDESC_MASK(128 - 1))
173
174/*
175 * Control structures are DMA'd to the PCnet chip. We allocate them in
176 * a single clump that maps to a single DMA segment to make several things
177 * easier.
178 */
179struct pcn_control_data {
180 /* The transmit descriptors. */
181 struct letmd pcd_txdescs[PCN_NTXDESC512];
182
183 /* The receive descriptors. */
184 struct lermd pcd_rxdescs[PCN_NRXDESC128];
185
186 /* The init block. */
187 struct leinit pcd_initblock;
188};
189
190#define PCN_CDOFF(x)__builtin_offsetof(struct pcn_control_data, x) offsetof(struct pcn_control_data, x)__builtin_offsetof(struct pcn_control_data, x)
191#define PCN_CDTXOFF(x)__builtin_offsetof(struct pcn_control_data, pcd_txdescs[(x)]) PCN_CDOFF(pcd_txdescs[(x)])__builtin_offsetof(struct pcn_control_data, pcd_txdescs[(x)])
192#define PCN_CDRXOFF(x)__builtin_offsetof(struct pcn_control_data, pcd_rxdescs[(x)]) PCN_CDOFF(pcd_rxdescs[(x)])__builtin_offsetof(struct pcn_control_data, pcd_rxdescs[(x)])
193#define PCN_CDINITOFF__builtin_offsetof(struct pcn_control_data, pcd_initblock) PCN_CDOFF(pcd_initblock)__builtin_offsetof(struct pcn_control_data, pcd_initblock)
194
195/*
196 * Software state for transmit jobs.
197 */
198struct pcn_txsoft {
199 struct mbuf *txs_mbuf; /* head of our mbuf chain */
200 bus_dmamap_t txs_dmamap; /* our DMA map */
201 int txs_firstdesc; /* first descriptor in packet */
202 int txs_lastdesc; /* last descriptor in packet */
203};
204
205/*
206 * Software state for receive jobs.
207 */
208struct pcn_rxsoft {
209 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
210 bus_dmamap_t rxs_dmamap; /* our DMA map */
211};
212
213/*
214 * Description of Rx FIFO watermarks for various revisions.
215 */
216static const char * const pcn_79c970_rcvfw[] = {
217 "16 bytes",
218 "64 bytes",
219 "128 bytes",
220 NULL((void *)0),
221};
222
223static const char * const pcn_79c971_rcvfw[] = {
224 "16 bytes",
225 "64 bytes",
226 "112 bytes",
227 NULL((void *)0),
228};
229
230/*
231 * Description of Tx start points for various revisions.
232 */
233static const char * const pcn_79c970_xmtsp[] = {
234 "8 bytes",
235 "64 bytes",
236 "128 bytes",
237 "248 bytes",
238};
239
240static const char * const pcn_79c971_xmtsp[] = {
241 "20 bytes",
242 "64 bytes",
243 "128 bytes",
244 "248 bytes",
245};
246
247static const char * const pcn_79c971_xmtsp_sram[] = {
248 "44 bytes",
249 "64 bytes",
250 "128 bytes",
251 "store-and-forward",
252};
253
254/*
255 * Description of Tx FIFO watermarks for various revisions.
256 */
257static const char * const pcn_79c970_xmtfw[] = {
258 "16 bytes",
259 "64 bytes",
260 "128 bytes",
261 NULL((void *)0),
262};
263
264static const char * const pcn_79c971_xmtfw[] = {
265 "16 bytes",
266 "64 bytes",
267 "108 bytes",
268 NULL((void *)0),
269};
270
271/*
272 * Software state per device.
273 */
274struct pcn_softc {
275 struct device sc_dev; /* generic device information */
276 bus_space_tag_t sc_st; /* bus space tag */
277 bus_space_handle_t sc_sh; /* bus space handle */
278 bus_dma_tag_t sc_dmat; /* bus DMA tag */
279 struct arpcom sc_arpcom; /* Ethernet common data */
280
281 /* Points to our media routines, etc. */
282 const struct pcn_variant *sc_variant;
283
284 void *sc_ih; /* interrupt cookie */
285
286 struct mii_data sc_mii; /* MII/media information */
287
288 struct timeout sc_tick_timeout; /* tick timeout */
289
290 bus_dmamap_t sc_cddmamap; /* control data DMA map */
291#define sc_cddmasc_cddmamap->dm_segs[0].ds_addr sc_cddmamap->dm_segs[0].ds_addr
292
293 /* Software state for transmit and receive descriptors. */
294 struct pcn_txsoft sc_txsoft[PCN_TXQUEUELEN128];
295 struct pcn_rxsoft sc_rxsoft[PCN_NRXDESC128];
296
297 /* Control data structures */
298 struct pcn_control_data *sc_control_data;
299#define sc_txdescssc_control_data->pcd_txdescs sc_control_data->pcd_txdescs
300#define sc_rxdescssc_control_data->pcd_rxdescs sc_control_data->pcd_rxdescs
301#define sc_initblocksc_control_data->pcd_initblock sc_control_data->pcd_initblock
302
303 const char * const *sc_rcvfw_desc; /* Rx FIFO watermark info */
304 int sc_rcvfw;
305
306 const char * const *sc_xmtsp_desc; /* Tx start point info */
307 int sc_xmtsp;
308
309 const char * const *sc_xmtfw_desc; /* Tx FIFO watermark info */
310 int sc_xmtfw;
311
312 int sc_flags; /* misc. flags; see below */
313 int sc_swstyle; /* the software style in use */
314
315 int sc_txfree; /* number of free Tx descriptors */
316 int sc_txnext; /* next ready Tx descriptor */
317
318 int sc_txsfree; /* number of free Tx jobs */
319 int sc_txsnext; /* next free Tx job */
320 int sc_txsdirty; /* dirty Tx jobs */
321
322 int sc_rxptr; /* next ready Rx descriptor/job */
323
324 uint32_t sc_csr5; /* prototype CSR5 register */
325 uint32_t sc_mode; /* prototype MODE register */
326};
327
328/* sc_flags */
329#define PCN_F_HAS_MII0x0001 0x0001 /* has MII */
330
331#define PCN_CDTXADDR(sc, x)((sc)->sc_cddmamap->dm_segs[0].ds_addr + __builtin_offsetof
(struct pcn_control_data, pcd_txdescs[((x))]))
((sc)->sc_cddmasc_cddmamap->dm_segs[0].ds_addr + PCN_CDTXOFF((x))__builtin_offsetof(struct pcn_control_data, pcd_txdescs[((x))
])
)
332#define PCN_CDRXADDR(sc, x)((sc)->sc_cddmamap->dm_segs[0].ds_addr + __builtin_offsetof
(struct pcn_control_data, pcd_rxdescs[((x))]))
((sc)->sc_cddmasc_cddmamap->dm_segs[0].ds_addr + PCN_CDRXOFF((x))__builtin_offsetof(struct pcn_control_data, pcd_rxdescs[((x))
])
)
333#define PCN_CDINITADDR(sc)((sc)->sc_cddmamap->dm_segs[0].ds_addr + __builtin_offsetof
(struct pcn_control_data, pcd_initblock))
((sc)->sc_cddmasc_cddmamap->dm_segs[0].ds_addr + PCN_CDINITOFF__builtin_offsetof(struct pcn_control_data, pcd_initblock))
334
335#define PCN_CDTXSYNC(sc, x, n, ops)do { int __x, __n; __x = (x); __n = (n); if ((__x + __n) >
512) { (*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat
), ((sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_txdescs[(__x)])), (sizeof(struct letmd) * (512 - __x)),
((ops))); __n -= (512 - __x); __x = 0; } (*((sc)->sc_dmat
)->_dmamap_sync)(((sc)->sc_dmat), ((sc)->sc_cddmamap
), (__builtin_offsetof(struct pcn_control_data, pcd_txdescs[(
__x)])), (sizeof(struct letmd) * __n), ((ops))); } while ( 0)
\
336do { \
337 int __x, __n; \
338 \
339 __x = (x); \
340 __n = (n); \
341 \
342 /* If it will wrap around, sync to the end of the ring. */ \
343 if ((__x + __n) > PCN_NTXDESC512) { \
344 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_txdescs[(__x)])), (sizeof(struct letmd) * (512 - __x)),
((ops)))
345 PCN_CDTXOFF(__x), sizeof(struct letmd) * \(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_txdescs[(__x)])), (sizeof(struct letmd) * (512 - __x)),
((ops)))
346 (PCN_NTXDESC - __x), (ops))(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_txdescs[(__x)])), (sizeof(struct letmd) * (512 - __x)),
((ops)))
; \
347 __n -= (PCN_NTXDESC512 - __x); \
348 __x = 0; \
349 } \
350 \
351 /* Now sync whatever is left. */ \
352 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_txdescs[(__x)])), (sizeof(struct letmd) * __n), ((ops))
)
353 PCN_CDTXOFF(__x), sizeof(struct letmd) * __n, (ops))(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_txdescs[(__x)])), (sizeof(struct letmd) * __n), ((ops))
)
; \
354} while (/*CONSTCOND*/0)
355
356#define PCN_CDRXSYNC(sc, x, ops)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_rxdescs[((x))])), (sizeof(struct lermd)), ((ops)))
\
357 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_rxdescs[((x))])), (sizeof(struct lermd)), ((ops)))
358 PCN_CDRXOFF((x)), sizeof(struct lermd), (ops))(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_rxdescs[((x))])), (sizeof(struct lermd)), ((ops)))
359
360#define PCN_CDINITSYNC(sc, ops)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_initblock)), (sizeof(struct leinit)), ((ops)))
\
361 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_initblock)), (sizeof(struct leinit)), ((ops)))
362 PCN_CDINITOFF, sizeof(struct leinit), (ops))(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_initblock)), (sizeof(struct leinit)), ((ops)))
363
364#define PCN_INIT_RXDESC(sc, x)do { struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];
struct lermd *__rmd = &(sc)->sc_control_data->pcd_rxdescs
[(x)]; struct mbuf *__m = __rxs->rxs_mbuf; __m->m_hdr.mh_data
= __m->M_dat.MH.MH_dat.MH_ext.ext_buf + 2; if ((sc)->sc_swstyle
== 3) { __rmd->rmd2 = ((__uint32_t)(__rxs->rxs_dmamap->
dm_segs[0].ds_addr + 2)); __rmd->rmd0 = 0; } else { __rmd->
rmd2 = 0; __rmd->rmd0 = ((__uint32_t)(__rxs->rxs_dmamap
->dm_segs[0].ds_addr + 2)); } __rmd->rmd1 = ((__uint32_t
)((1<<31)|(0xf<<12)| ((~((1 << 11) - 2) + 1
) & (0xfff)))); (*(((sc))->sc_dmat)->_dmamap_sync)(
(((sc))->sc_dmat), (((sc))->sc_cddmamap), (__builtin_offsetof
(struct pcn_control_data, pcd_rxdescs[(((x)))])), (sizeof(struct
lermd)), ((0x01|0x04)));} while( 0)
\
365do { \
366 struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
367 struct lermd *__rmd = &(sc)->sc_rxdescssc_control_data->pcd_rxdescs[(x)]; \
368 struct mbuf *__m = __rxs->rxs_mbuf; \
369 \
370 /* \
371 * Note: We scoot the packet forward 2 bytes in the buffer \
372 * so that the payload after the Ethernet header is aligned \
373 * to a 4-byte boundary. \
374 */ \
375 __m->m_datam_hdr.mh_data = __m->m_extM_dat.MH.MH_dat.MH_ext.ext_buf + 2; \
376 \
377 if ((sc)->sc_swstyle == LE_B20_SSTYLE_PCNETPCI33) { \
378 __rmd->rmd2 = \
379 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2)((__uint32_t)(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2
))
; \
380 __rmd->rmd0 = 0; \
381 } else { \
382 __rmd->rmd2 = 0; \
383 __rmd->rmd0 = \
384 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2)((__uint32_t)(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2
))
; \
385 } \
386 __rmd->rmd1 = htole32(LE_R1_OWN|LE_R1_ONES| \((__uint32_t)((1<<31)|(0xf<<12)| ((~((1 << 11
) - 2) + 1) & (0xfff))))
387 (LE_BCNT(MCLBYTES - 2) & LE_R1_BCNT_MASK))((__uint32_t)((1<<31)|(0xf<<12)| ((~((1 << 11
) - 2) + 1) & (0xfff))))
; \
388 PCN_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(((sc))->sc_dmat)->_dmamap_sync)((((sc))->sc_dmat)
, (((sc))->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_rxdescs[(((x)))])), (sizeof(struct lermd)), ((0x01|0x04
)))
;\
389} while(/*CONSTCOND*/0)
390
391void pcn_start(struct ifnet *);
392void pcn_watchdog(struct ifnet *);
393int pcn_ioctl(struct ifnet *, u_long, caddr_t);
394int pcn_init(struct ifnet *);
395void pcn_stop(struct ifnet *, int);
396
397void pcn_reset(struct pcn_softc *);
398void pcn_rxdrain(struct pcn_softc *);
399int pcn_add_rxbuf(struct pcn_softc *, int);
400void pcn_tick(void *);
401
402void pcn_spnd(struct pcn_softc *);
403
404void pcn_set_filter(struct pcn_softc *);
405
406int pcn_intr(void *);
407void pcn_txintr(struct pcn_softc *);
408int pcn_rxintr(struct pcn_softc *);
409
410int pcn_mii_readreg(struct device *, int, int);
411void pcn_mii_writereg(struct device *, int, int, int);
412void pcn_mii_statchg(struct device *);
413
414void pcn_79c970_mediainit(struct pcn_softc *);
415int pcn_79c970_mediachange(struct ifnet *);
416void pcn_79c970_mediastatus(struct ifnet *, struct ifmediareq *);
417
418void pcn_79c971_mediainit(struct pcn_softc *);
419int pcn_79c971_mediachange(struct ifnet *);
420void pcn_79c971_mediastatus(struct ifnet *, struct ifmediareq *);
421
422/*
423 * Description of a PCnet-PCI variant. Used to select media access
424 * method, mostly, and to print a nice description of the chip.
425 */
426static const struct pcn_variant {
427 const char *pcv_desc;
428 void (*pcv_mediainit)(struct pcn_softc *);
429 uint16_t pcv_chipid;
430} pcn_variants[] = {
431 { "Am79c970",
432 pcn_79c970_mediainit,
433 PARTID_Am79c9700x2430 },
434
435 { "Am79c970A",
436 pcn_79c970_mediainit,
437 PARTID_Am79c970A0x2621 },
438
439 { "Am79c971",
440 pcn_79c971_mediainit,
441 PARTID_Am79c9710x2623 },
442
443 { "Am79c972",
444 pcn_79c971_mediainit,
445 PARTID_Am79c9720x2624 },
446
447 { "Am79c973",
448 pcn_79c971_mediainit,
449 PARTID_Am79c9730x2625 },
450
451 { "Am79c975",
452 pcn_79c971_mediainit,
453 PARTID_Am79c9750x2627 },
454
455 { "Am79c976",
456 pcn_79c971_mediainit,
457 PARTID_Am79c9760x2628 },
458
459 { "Am79c978",
460 pcn_79c971_mediainit,
461 PARTID_Am79c9780x2626 },
462
463 { "Unknown",
464 pcn_79c971_mediainit,
465 0 },
466};
467
468int pcn_copy_small = 0;
469
470int pcn_match(struct device *, void *, void *);
471void pcn_attach(struct device *, struct device *, void *);
472
473const struct cfattach pcn_ca = {
474 sizeof(struct pcn_softc), pcn_match, pcn_attach,
475};
476
477const struct pci_matchid pcn_devices[] = {
478 { PCI_VENDOR_AMD0x1022, PCI_PRODUCT_AMD_PCNET_PCI0x2000 },
479 { PCI_VENDOR_AMD0x1022, PCI_PRODUCT_AMD_PCHOME_PCI0x2001 }
480};
481
482struct cfdriver pcn_cd = {
483 NULL((void *)0), "pcn", DV_IFNET
484};
485
486/*
487 * Routines to read and write the PCnet-PCI CSR/BCR space.
488 */
489
490static __inline uint32_t
491pcn_csr_read(struct pcn_softc *sc, int reg)
492{
493
494 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg)((sc->sc_st)->write_4((sc->sc_sh), (0x14), (reg)));
495 return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RDP)((sc->sc_st)->read_4((sc->sc_sh), (0x10))));
496}
497
498static __inline void
499pcn_csr_write(struct pcn_softc *sc, int reg, uint32_t val)
500{
501
502 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg)((sc->sc_st)->write_4((sc->sc_sh), (0x14), (reg)));
503 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, val)((sc->sc_st)->write_4((sc->sc_sh), (0x10), (val)));
504}
505
506static __inline uint32_t
507pcn_bcr_read(struct pcn_softc *sc, int reg)
508{
509
510 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg)((sc->sc_st)->write_4((sc->sc_sh), (0x14), (reg)));
511 return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_BDP)((sc->sc_st)->read_4((sc->sc_sh), (0x1c))));
512}
513
514static __inline void
515pcn_bcr_write(struct pcn_softc *sc, int reg, uint32_t val)
516{
517
518 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg)((sc->sc_st)->write_4((sc->sc_sh), (0x14), (reg)));
519 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_BDP, val)((sc->sc_st)->write_4((sc->sc_sh), (0x1c), (val)));
520}
521
522static const struct pcn_variant *
523pcn_lookup_variant(uint16_t chipid)
524{
525 const struct pcn_variant *pcv;
526
527 for (pcv = pcn_variants; pcv->pcv_chipid != 0; pcv++) {
528 if (chipid == pcv->pcv_chipid)
529 return (pcv);
530 }
531
532 /*
533 * This covers unknown chips, which we simply treat like
534 * a generic PCnet-FAST.
535 */
536 return (pcv);
537}
538
539int
540pcn_match(struct device *parent, void *match, void *aux)
541{
542 struct pci_attach_args *pa = aux;
543
544 /*
545 * IBM makes a PCI variant of this card which shows up as a
546 * Trident Microsystems 4DWAVE DX (ethernet network, revision 0x25)
547 * this card is truly a pcn card, so we have a special case match for
548 * it.
549 */
550 if (PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff) == PCI_VENDOR_TRIDENT0x1023 &&
551 PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_TRIDENT_4DWAVE_DX0x2000 &&
552 PCI_CLASS(pa->pa_class)(((pa->pa_class) >> 24) & 0xff) == PCI_CLASS_NETWORK0x02)
553 return(1);
554
555 return (pci_matchbyid((struct pci_attach_args *)aux, pcn_devices,
556 nitems(pcn_devices)(sizeof((pcn_devices)) / sizeof((pcn_devices)[0]))));
557}
558
559void
560pcn_attach(struct device *parent, struct device *self, void *aux)
561{
562 struct pcn_softc *sc = (struct pcn_softc *) self;
563 struct pci_attach_args *pa = aux;
564 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
Value stored to 'ifp' during its initialization is never read
565 pci_chipset_tag_t pc = pa->pa_pc;
566 pci_intr_handle_t ih;
567 const char *intrstr = NULL((void *)0);
568 bus_space_tag_t iot, memt;
569 bus_space_handle_t ioh, memh;
570 bus_dma_segment_t seg;
571 int ioh_valid, memh_valid;
572 int i, rseg, error;
573 uint32_t chipid, reg;
574 uint8_t enaddr[ETHER_ADDR_LEN6];
575
576 timeout_set(&sc->sc_tick_timeout, pcn_tick, sc);
577
578 /*
579 * Map the device.
580 */
581 ioh_valid = (pci_mapreg_map(pa, PCN_PCI_CBIO(0x10 + 0x00), PCI_MAPREG_TYPE_IO0x00000001, 0,
582 &iot, &ioh, NULL((void *)0), NULL((void *)0), 0) == 0);
583 memh_valid = (pci_mapreg_map(pa, PCN_PCI_CBMEM(0x10 + 0x04),
584 PCI_MAPREG_TYPE_MEM0x00000000|PCI_MAPREG_MEM_TYPE_32BIT0x00000000, 0,
585 &memt, &memh, NULL((void *)0), NULL((void *)0), 0) == 0);
586
587 if (memh_valid) {
588 sc->sc_st = memt;
589 sc->sc_sh = memh;
590 } else if (ioh_valid) {
591 sc->sc_st = iot;
592 sc->sc_sh = ioh;
593 } else {
594 printf(": unable to map device registers\n");
595 return;
596 }
597
598 sc->sc_dmat = pa->pa_dmat;
599
600 /* Get it out of power save mode, if needed. */
601 pci_set_powerstate(pc, pa->pa_tag, PCI_PMCSR_STATE_D00x0000);
602
603 /*
604 * Reset the chip to a known state. This also puts the
605 * chip into 32-bit mode.
606 */
607 pcn_reset(sc);
608
609#if !defined(PCN_NO_PROM)
610
611 /*
612 * Read the Ethernet address from the EEPROM.
613 */
614 for (i = 0; i < ETHER_ADDR_LEN6; i++)
615 enaddr[i] = bus_space_read_1(sc->sc_st, sc->sc_sh,((sc->sc_st)->read_1((sc->sc_sh), (0x00 + i)))
616 PCN32_APROM + i)((sc->sc_st)->read_1((sc->sc_sh), (0x00 + i)));
617#else
618 /*
619 * The PROM is not used; instead we assume that the MAC address
620 * has been programmed into the device's physical address
621 * registers by the boot firmware
622 */
623
624 for (i=0; i < 3; i++) {
625 uint32_t val;
626 val = pcn_csr_read(sc, LE_CSR120x000c + i);
627 enaddr[2*i] = val & 0x0ff;
628 enaddr[2*i+1] = (val >> 8) & 0x0ff;
629 }
630#endif
631
632 /*
633 * Now that the device is mapped, attempt to figure out what
634 * kind of chip we have. Note that IDL has all 32 bits of
635 * the chip ID when we're in 32-bit mode.
636 */
637 chipid = pcn_csr_read(sc, LE_CSR880x0058);
638 sc->sc_variant = pcn_lookup_variant(CHIPID_PARTID(chipid)(((chipid) >> 12) & 0xffff));
639
640 /*
641 * Map and establish our interrupt.
642 */
643 if (pci_intr_map(pa, &ih)) {
644 printf(": unable to map interrupt\n");
645 return;
646 }
647 intrstr = pci_intr_string(pc, ih);
648 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET0x4, pcn_intr, sc,
649 self->dv_xname);
650 if (sc->sc_ih == NULL((void *)0)) {
651 printf(": unable to establish interrupt");
652 if (intrstr != NULL((void *)0))
653 printf(" at %s", intrstr);
654 printf("\n");
655 return;
656 }
657
658 /*
659 * Allocate the control data structures, and create and load the
660 * DMA map for it.
661 */
662 if ((error = bus_dmamem_alloc(sc->sc_dmat,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sizeof
(struct pcn_control_data)), ((1 << 12)), (0), (&seg
), (1), (&rseg), (0))
663 sizeof(struct pcn_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sizeof
(struct pcn_control_data)), ((1 << 12)), (0), (&seg
), (1), (&rseg), (0))
664 0)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sizeof
(struct pcn_control_data)), ((1 << 12)), (0), (&seg
), (1), (&rseg), (0))
) != 0) {
665 printf(": unable to allocate control data, error = %d\n",
666 error);
667 return;
668 }
669
670 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&seg
), (rseg), (sizeof(struct pcn_control_data)), ((caddr_t *)&
sc->sc_control_data), (0x0004))
671 sizeof(struct pcn_control_data), (caddr_t *)&sc->sc_control_data,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&seg
), (rseg), (sizeof(struct pcn_control_data)), ((caddr_t *)&
sc->sc_control_data), (0x0004))
672 BUS_DMA_COHERENT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&seg
), (rseg), (sizeof(struct pcn_control_data)), ((caddr_t *)&
sc->sc_control_data), (0x0004))
) != 0) {
673 printf(": unable to map control data, error = %d\n",
674 error);
675 goto fail_1;
676 }
677
678 if ((error = bus_dmamap_create(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sizeof
(struct pcn_control_data)), (1), (sizeof(struct pcn_control_data
)), (0), (0), (&sc->sc_cddmamap))
679 sizeof(struct pcn_control_data), 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sizeof
(struct pcn_control_data)), (1), (sizeof(struct pcn_control_data
)), (0), (0), (&sc->sc_cddmamap))
680 sizeof(struct pcn_control_data), 0, 0, &sc->sc_cddmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sizeof
(struct pcn_control_data)), (1), (sizeof(struct pcn_control_data
)), (0), (0), (&sc->sc_cddmamap))
) != 0) {
681 printf(": unable to create control data DMA map, "
682 "error = %d\n", error);
683 goto fail_2;
684 }
685
686 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_cddmamap), (sc->sc_control_data), (sizeof(struct pcn_control_data
)), (((void *)0)), (0))
687 sc->sc_control_data, sizeof(struct pcn_control_data), NULL,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_cddmamap), (sc->sc_control_data), (sizeof(struct pcn_control_data
)), (((void *)0)), (0))
688 0)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_cddmamap), (sc->sc_control_data), (sizeof(struct pcn_control_data
)), (((void *)0)), (0))
) != 0) {
689 printf(": unable to load control data DMA map, error = %d\n",
690 error);
691 goto fail_3;
692 }
693
694 /* Create the transmit buffer DMA maps. */
695 for (i = 0; i < PCN_TXQUEUELEN128; i++) {
696 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (16), ((1 << 11)), (0), (0), (&sc->sc_txsoft
[i].txs_dmamap))
697 PCN_NTXSEGS, MCLBYTES, 0, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (16), ((1 << 11)), (0), (0), (&sc->sc_txsoft
[i].txs_dmamap))
698 &sc->sc_txsoft[i].txs_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (16), ((1 << 11)), (0), (0), (&sc->sc_txsoft
[i].txs_dmamap))
) != 0) {
699 printf(": unable to create tx DMA map %d, "
700 "error = %d\n", i, error);
701 goto fail_4;
702 }
703 }
704
705 /* Create the receive buffer DMA maps. */
706 for (i = 0; i < PCN_NRXDESC128; i++) {
707 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0), (&sc->sc_rxsoft
[i].rxs_dmamap))
708 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0), (&sc->sc_rxsoft
[i].rxs_dmamap))
) != 0) {
709 printf(": unable to create rx DMA map %d, "
710 "error = %d\n", i, error);
711 goto fail_5;
712 }
713 sc->sc_rxsoft[i].rxs_mbuf = NULL((void *)0);
714 }
715
716 printf(", %s, rev %d: %s, address %s\n", sc->sc_variant->pcv_desc,
717 CHIPID_VER(chipid)(((chipid) >> 28) & 0x7), intrstr, ether_sprintf(enaddr));
718
719 /* Initialize our media structures. */
720 (*sc->sc_variant->pcv_mediainit)(sc);
721
722 /*
723 * Initialize FIFO watermark info.
724 */
725 switch (sc->sc_variant->pcv_chipid) {
726 case PARTID_Am79c9700x2430:
727 case PARTID_Am79c970A0x2621:
728 sc->sc_rcvfw_desc = pcn_79c970_rcvfw;
729 sc->sc_xmtsp_desc = pcn_79c970_xmtsp;
730 sc->sc_xmtfw_desc = pcn_79c970_xmtfw;
731 break;
732
733 default:
734 sc->sc_rcvfw_desc = pcn_79c971_rcvfw;
735 /*
736 * Read BCR25 to determine how much SRAM is
737 * on the board. If > 0, then we the chip
738 * uses different Start Point thresholds.
739 *
740 * Note BCR25 and BCR26 are loaded from the
741 * EEPROM on RST, and unaffected by S_RESET,
742 * so we don't really have to worry about
743 * them except for this.
744 */
745 reg = pcn_bcr_read(sc, LE_BCR250x0019) & 0x00ff;
746 if (reg != 0)
747 sc->sc_xmtsp_desc = pcn_79c971_xmtsp_sram;
748 else
749 sc->sc_xmtsp_desc = pcn_79c971_xmtsp;
750 sc->sc_xmtfw_desc = pcn_79c971_xmtfw;
751 break;
752 }
753
754 /*
755 * Set up defaults -- see the tables above for what these
756 * values mean.
757 *
758 * XXX How should we tune RCVFW and XMTFW?
759 */
760 sc->sc_rcvfw = 1; /* minimum for full-duplex */
761 sc->sc_xmtsp = 1;
762 sc->sc_xmtfw = 0;
763
764 ifp = &sc->sc_arpcom.ac_if;
765 bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN6);
766 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ16);
767 ifp->if_softc = sc;
768 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
769 ifp->if_ioctl = pcn_ioctl;
770 ifp->if_start = pcn_start;
771 ifp->if_watchdog = pcn_watchdog;
772 ifq_init_maxlen(&ifp->if_snd, PCN_NTXDESC512 -1);
773
774 /* Attach the interface. */
775 if_attach(ifp);
776 ether_ifattach(ifp);
777 return;
778
779 /*
780 * Free any resources we've allocated during the failed attach
781 * attempt. Do this in reverse order and fall through.
782 */
783 fail_5:
784 for (i = 0; i < PCN_NRXDESC128; i++) {
785 if (sc->sc_rxsoft[i].rxs_dmamap != NULL((void *)0))
786 bus_dmamap_destroy(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_rxsoft[i].rxs_dmamap))
787 sc->sc_rxsoft[i].rxs_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_rxsoft[i].rxs_dmamap))
;
788 }
789 fail_4:
790 for (i = 0; i < PCN_TXQUEUELEN128; i++) {
791 if (sc->sc_txsoft[i].txs_dmamap != NULL((void *)0))
792 bus_dmamap_destroy(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_txsoft[i].txs_dmamap))
793 sc->sc_txsoft[i].txs_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_txsoft[i].txs_dmamap))
;
794 }
795 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
sc_cddmamap))
;
796 fail_3:
797 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_cddmamap))
;
798 fail_2:
799 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)sc->sc_control_data), (sizeof(struct pcn_control_data)))
800 sizeof(struct pcn_control_data))(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)sc->sc_control_data), (sizeof(struct pcn_control_data)))
;
801 fail_1:
802 bus_dmamem_free(sc->sc_dmat, &seg, rseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
seg), (rseg))
;
803}
804
805/*
806 * pcn_start: [ifnet interface function]
807 *
808 * Start packet transmission on the interface.
809 */
810void
811pcn_start(struct ifnet *ifp)
812{
813 struct pcn_softc *sc = ifp->if_softc;
814 struct mbuf *m0;
815 struct pcn_txsoft *txs;
816 bus_dmamap_t dmamap;
817 int nexttx, lasttx = -1, ofree, seg;
818
819 if (!(ifp->if_flags & IFF_RUNNING0x40) || ifq_is_oactive(&ifp->if_snd))
820 return;
821
822 /*
823 * Remember the previous number of free descriptors and
824 * the first descriptor we'll use.
825 */
826 ofree = sc->sc_txfree;
827
828 /*
829 * Loop through the send queue, setting up transmit descriptors
830 * until we drain the queue, or use up all available transmit
831 * descriptors.
832 */
833 for (;;) {
834 if (sc->sc_txsfree == 0 ||
835 sc->sc_txfree < (PCN_NTXSEGS16 + 1)) {
836 ifq_set_oactive(&ifp->if_snd);
837 break;
838 }
839
840 /* Grab a packet off the queue. */
841 m0 = ifq_dequeue(&ifp->if_snd);
842 if (m0 == NULL((void *)0))
843 break;
844
845 txs = &sc->sc_txsoft[sc->sc_txsnext];
846 dmamap = txs->txs_dmamap;
847
848 switch (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
dmamap), (m0), (0x0001))
849 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
dmamap), (m0), (0x0001))
) {
850 case 0:
851 break;
852 case EFBIG27:
853 if (m_defrag(m0, M_DONTWAIT0x0002) == 0 &&
854 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
dmamap), (m0), (0x0001))
855 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
dmamap), (m0), (0x0001))
== 0)
856 break;
857
858 /* FALLTHROUGH */
859 default:
860 m_freem(m0);
861 continue;
862 }
863
864 /*
865 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
866 */
867
868 /* Sync the DMA map. */
869 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmamap
), (0), (dmamap->dm_mapsize), (0x04))
870 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmamap
), (0), (dmamap->dm_mapsize), (0x04))
;
871
872 /*
873 * Initialize the transmit descriptors.
874 */
875 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI33) {
876 for (nexttx = sc->sc_txnext, seg = 0;
877 seg < dmamap->dm_nsegs;
878 seg++, nexttx = PCN_NEXTTX(nexttx)(((nexttx) + 1) & (512 - 1))) {
879 /*
880 * If this is the first descriptor we're
881 * enqueueing, don't set the OWN bit just
882 * yet. That could cause a race condition.
883 * We'll do it below.
884 */
885 sc->sc_txdescssc_control_data->pcd_txdescs[nexttx].tmd0 = 0;
886 sc->sc_txdescssc_control_data->pcd_txdescs[nexttx].tmd2 =
887 htole32(dmamap->dm_segs[seg].ds_addr)((__uint32_t)(dmamap->dm_segs[seg].ds_addr));
888 sc->sc_txdescssc_control_data->pcd_txdescs[nexttx].tmd1 =
889 htole32(LE_T1_ONES |((__uint32_t)((0xf<<12) | (nexttx == sc->sc_txnext ?
0 : (1<<31)) | ((~(dmamap->dm_segs[seg].ds_len) + 1
) & (0xfff))))
890 (nexttx == sc->sc_txnext ? 0 : LE_T1_OWN) |((__uint32_t)((0xf<<12) | (nexttx == sc->sc_txnext ?
0 : (1<<31)) | ((~(dmamap->dm_segs[seg].ds_len) + 1
) & (0xfff))))
891 (LE_BCNT(dmamap->dm_segs[seg].ds_len) &((__uint32_t)((0xf<<12) | (nexttx == sc->sc_txnext ?
0 : (1<<31)) | ((~(dmamap->dm_segs[seg].ds_len) + 1
) & (0xfff))))
892 LE_T1_BCNT_MASK))((__uint32_t)((0xf<<12) | (nexttx == sc->sc_txnext ?
0 : (1<<31)) | ((~(dmamap->dm_segs[seg].ds_len) + 1
) & (0xfff))))
;
893 lasttx = nexttx;
894 }
895 } else {
896 for (nexttx = sc->sc_txnext, seg = 0;
897 seg < dmamap->dm_nsegs;
898 seg++, nexttx = PCN_NEXTTX(nexttx)(((nexttx) + 1) & (512 - 1))) {
899 /*
900 * If this is the first descriptor we're
901 * enqueueing, don't set the OWN bit just
902 * yet. That could cause a race condition.
903 * We'll do it below.
904 */
905 sc->sc_txdescssc_control_data->pcd_txdescs[nexttx].tmd0 =
906 htole32(dmamap->dm_segs[seg].ds_addr)((__uint32_t)(dmamap->dm_segs[seg].ds_addr));
907 sc->sc_txdescssc_control_data->pcd_txdescs[nexttx].tmd2 = 0;
908 sc->sc_txdescssc_control_data->pcd_txdescs[nexttx].tmd1 =
909 htole32(LE_T1_ONES |((__uint32_t)((0xf<<12) | (nexttx == sc->sc_txnext ?
0 : (1<<31)) | ((~(dmamap->dm_segs[seg].ds_len) + 1
) & (0xfff))))
910 (nexttx == sc->sc_txnext ? 0 : LE_T1_OWN) |((__uint32_t)((0xf<<12) | (nexttx == sc->sc_txnext ?
0 : (1<<31)) | ((~(dmamap->dm_segs[seg].ds_len) + 1
) & (0xfff))))
911 (LE_BCNT(dmamap->dm_segs[seg].ds_len) &((__uint32_t)((0xf<<12) | (nexttx == sc->sc_txnext ?
0 : (1<<31)) | ((~(dmamap->dm_segs[seg].ds_len) + 1
) & (0xfff))))
912 LE_T1_BCNT_MASK))((__uint32_t)((0xf<<12) | (nexttx == sc->sc_txnext ?
0 : (1<<31)) | ((~(dmamap->dm_segs[seg].ds_len) + 1
) & (0xfff))))
;
913 lasttx = nexttx;
914 }
915 }
916
917 KASSERT(lasttx != -1)((lasttx != -1) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_pcn.c"
, 917, "lasttx != -1"))
;
918 /* Interrupt on the packet, if appropriate. */
919 if ((sc->sc_txsnext & PCN_TXINTR_MASK7) == 0)
920 sc->sc_txdescssc_control_data->pcd_txdescs[lasttx].tmd1 |= htole32(LE_T1_LTINT)((__uint32_t)((1<<28)));
921
922 /* Set `start of packet' and `end of packet' appropriately. */
923 sc->sc_txdescssc_control_data->pcd_txdescs[lasttx].tmd1 |= htole32(LE_T1_ENP)((__uint32_t)((1<<24)));
924 sc->sc_txdescssc_control_data->pcd_txdescs[sc->sc_txnext].tmd1 |=
925 htole32(LE_T1_OWN|LE_T1_STP)((__uint32_t)((1<<31)|(1<<25)));
926
927 /* Sync the descriptors we're using. */
928 PCN_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,do { int __x, __n; __x = (sc->sc_txnext); __n = (dmamap->
dm_nsegs); if ((__x + __n) > 512) { (*((sc)->sc_dmat)->
_dmamap_sync)(((sc)->sc_dmat), ((sc)->sc_cddmamap), (__builtin_offsetof
(struct pcn_control_data, pcd_txdescs[(__x)])), (sizeof(struct
letmd) * (512 - __x)), ((0x01|0x04))); __n -= (512 - __x); __x
= 0; } (*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat
), ((sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_txdescs[(__x)])), (sizeof(struct letmd) * __n), ((0x01|
0x04))); } while ( 0)
929 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)do { int __x, __n; __x = (sc->sc_txnext); __n = (dmamap->
dm_nsegs); if ((__x + __n) > 512) { (*((sc)->sc_dmat)->
_dmamap_sync)(((sc)->sc_dmat), ((sc)->sc_cddmamap), (__builtin_offsetof
(struct pcn_control_data, pcd_txdescs[(__x)])), (sizeof(struct
letmd) * (512 - __x)), ((0x01|0x04))); __n -= (512 - __x); __x
= 0; } (*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat
), ((sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_txdescs[(__x)])), (sizeof(struct letmd) * __n), ((0x01|
0x04))); } while ( 0)
;
930
931 /* Kick the transmitter. */
932 pcn_csr_write(sc, LE_CSR00x0000, LE_C0_INEA0x0040|LE_C0_TDMD0x0008);
933
934 /*
935 * Store a pointer to the packet so we can free it later,
936 * and remember what txdirty will be once the packet is
937 * done.
938 */
939 txs->txs_mbuf = m0;
940 txs->txs_firstdesc = sc->sc_txnext;
941 txs->txs_lastdesc = lasttx;
942
943 /* Advance the tx pointer. */
944 sc->sc_txfree -= dmamap->dm_nsegs;
945 sc->sc_txnext = nexttx;
946
947 sc->sc_txsfree--;
948 sc->sc_txsnext = PCN_NEXTTXS(sc->sc_txsnext)(((sc->sc_txsnext) + 1) & (128 - 1));
949
950#if NBPFILTER1 > 0
951 /* Pass the packet to any BPF listeners. */
952 if (ifp->if_bpf)
953 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT(1 << 1));
954#endif /* NBPFILTER > 0 */
955 }
956
957 if (sc->sc_txfree != ofree) {
958 /* Set a watchdog timer in case the chip flakes out. */
959 ifp->if_timer = 5;
960 }
961}
962
963/*
964 * pcn_watchdog: [ifnet interface function]
965 *
966 * Watchdog timer handler.
967 */
968void
969pcn_watchdog(struct ifnet *ifp)
970{
971 struct pcn_softc *sc = ifp->if_softc;
972
973 /*
974 * Since we're not interrupting every packet, sweep
975 * up before we report an error.
976 */
977 pcn_txintr(sc);
978
979 if (sc->sc_txfree != PCN_NTXDESC512) {
980 printf("%s: device timeout (txfree %d txsfree %d)\n",
981 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree);
982 ifp->if_oerrorsif_data.ifi_oerrors++;
983
984 /* Reset the interface. */
985 (void) pcn_init(ifp);
986 }
987
988 /* Try to get more packets going. */
989 pcn_start(ifp);
990}
991
992/*
993 * pcn_ioctl: [ifnet interface function]
994 *
995 * Handle control requests from the operator.
996 */
997int
998pcn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
999{
1000 struct pcn_softc *sc = ifp->if_softc;
1001 struct ifreq *ifr = (struct ifreq *) data;
1002 int s, error = 0;
1003
1004 s = splnet()splraise(0x4);
1005
1006 switch (cmd) {
1007 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
1008 ifp->if_flags |= IFF_UP0x1;
1009 if (!(ifp->if_flags & IFF_RUNNING0x40))
1010 pcn_init(ifp);
1011 break;
1012
1013 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
1014 if (ifp->if_flags & IFF_UP0x1) {
1015 if (ifp->if_flags & IFF_RUNNING0x40)
1016 error = ENETRESET52;
1017 else
1018 pcn_init(ifp);
1019 } else {
1020 if (ifp->if_flags & IFF_RUNNING0x40)
1021 pcn_stop(ifp, 1);
1022 }
1023 break;
1024
1025 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
1026 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
1027 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1028 break;
1029
1030 default:
1031 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1032 }
1033
1034 if (error == ENETRESET52) {
1035 if (ifp->if_flags & IFF_RUNNING0x40)
1036 error = pcn_init(ifp);
1037 else
1038 error = 0;
1039 }
1040
1041 splx(s)spllower(s);
1042 return (error);
1043}
1044
1045/*
1046 * pcn_intr:
1047 *
1048 * Interrupt service routine.
1049 */
1050int
1051pcn_intr(void *arg)
1052{
1053 struct pcn_softc *sc = arg;
1054 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1055 uint32_t csr0;
1056 int wantinit, handled = 0;
1057
1058 for (wantinit = 0; wantinit == 0;) {
1059 csr0 = pcn_csr_read(sc, LE_CSR00x0000);
1060 if ((csr0 & LE_C0_INTR0x0080) == 0)
1061 break;
1062
1063 /* ACK the bits and re-enable interrupts. */
1064 pcn_csr_write(sc, LE_CSR00x0000, csr0 &
1065 (LE_C0_INEA0x0040|LE_C0_BABL0x4000|LE_C0_MISS0x1000|LE_C0_MERR0x0800|LE_C0_RINT0x0400|
1066 LE_C0_TINT0x0200|LE_C0_IDON0x0100));
1067
1068 handled = 1;
1069
1070 if (csr0 & LE_C0_RINT0x0400)
1071 wantinit = pcn_rxintr(sc);
1072
1073 if (csr0 & LE_C0_TINT0x0200)
1074 pcn_txintr(sc);
1075
1076 if (csr0 & LE_C0_ERR0x8000) {
1077 if (csr0 & LE_C0_BABL0x4000)
1078 ifp->if_oerrorsif_data.ifi_oerrors++;
1079 if (csr0 & LE_C0_MISS0x1000)
1080 ifp->if_ierrorsif_data.ifi_ierrors++;
1081 if (csr0 & LE_C0_MERR0x0800) {
1082 printf("%s: memory error\n",
1083 sc->sc_dev.dv_xname);
1084 wantinit = 1;
1085 break;
1086 }
1087 }
1088
1089 if ((csr0 & LE_C0_RXON0x0020) == 0) {
1090 printf("%s: receiver disabled\n",
1091 sc->sc_dev.dv_xname);
1092 ifp->if_ierrorsif_data.ifi_ierrors++;
1093 wantinit = 1;
1094 }
1095
1096 if ((csr0 & LE_C0_TXON0x0010) == 0) {
1097 printf("%s: transmitter disabled\n",
1098 sc->sc_dev.dv_xname);
1099 ifp->if_oerrorsif_data.ifi_oerrors++;
1100 wantinit = 1;
1101 }
1102 }
1103
1104 if (handled) {
1105 if (wantinit)
1106 pcn_init(ifp);
1107
1108 /* Try to get more packets going. */
1109 pcn_start(ifp);
1110 }
1111
1112 return (handled);
1113}
1114
1115/*
1116 * pcn_spnd:
1117 *
1118 * Suspend the chip.
1119 */
1120void
1121pcn_spnd(struct pcn_softc *sc)
1122{
1123 int i;
1124
1125 pcn_csr_write(sc, LE_CSR50x0005, sc->sc_csr5 | LE_C5_SPND0x0001);
1126
1127 for (i = 0; i < 10000; i++) {
1128 if (pcn_csr_read(sc, LE_CSR50x0005) & LE_C5_SPND0x0001)
1129 return;
1130 delay(5)(*delay_func)(5);
1131 }
1132
1133 printf("%s: WARNING: chip failed to enter suspended state\n",
1134 sc->sc_dev.dv_xname);
1135}
1136
1137/*
1138 * pcn_txintr:
1139 *
1140 * Helper; handle transmit interrupts.
1141 */
1142void
1143pcn_txintr(struct pcn_softc *sc)
1144{
1145 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1146 struct pcn_txsoft *txs;
1147 uint32_t tmd1, tmd2, tmd;
1148 int i, j;
1149
1150 /*
1151 * Go through our Tx list and free mbufs for those
1152 * frames which have been transmitted.
1153 */
1154 for (i = sc->sc_txsdirty; sc->sc_txsfree != PCN_TXQUEUELEN128;
1155 i = PCN_NEXTTXS(i)(((i) + 1) & (128 - 1)), sc->sc_txsfree++) {
1156 txs = &sc->sc_txsoft[i];
1157
1158 PCN_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,do { int __x, __n; __x = (txs->txs_firstdesc); __n = (txs->
txs_dmamap->dm_nsegs); if ((__x + __n) > 512) { (*((sc)
->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((sc)->
sc_cddmamap), (__builtin_offsetof(struct pcn_control_data, pcd_txdescs
[(__x)])), (sizeof(struct letmd) * (512 - __x)), ((0x02|0x08)
)); __n -= (512 - __x); __x = 0; } (*((sc)->sc_dmat)->_dmamap_sync
)(((sc)->sc_dmat), ((sc)->sc_cddmamap), (__builtin_offsetof
(struct pcn_control_data, pcd_txdescs[(__x)])), (sizeof(struct
letmd) * __n), ((0x02|0x08))); } while ( 0)
1159 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)do { int __x, __n; __x = (txs->txs_firstdesc); __n = (txs->
txs_dmamap->dm_nsegs); if ((__x + __n) > 512) { (*((sc)
->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((sc)->
sc_cddmamap), (__builtin_offsetof(struct pcn_control_data, pcd_txdescs
[(__x)])), (sizeof(struct letmd) * (512 - __x)), ((0x02|0x08)
)); __n -= (512 - __x); __x = 0; } (*((sc)->sc_dmat)->_dmamap_sync
)(((sc)->sc_dmat), ((sc)->sc_cddmamap), (__builtin_offsetof
(struct pcn_control_data, pcd_txdescs[(__x)])), (sizeof(struct
letmd) * __n), ((0x02|0x08))); } while ( 0)
;
1160
1161 tmd1 = letoh32(sc->sc_txdescs[txs->txs_lastdesc].tmd1)((__uint32_t)(sc->sc_control_data->pcd_txdescs[txs->
txs_lastdesc].tmd1))
;
1162 if (tmd1 & LE_T1_OWN(1<<31))
1163 break;
1164
1165 /*
1166 * Slightly annoying -- we have to loop through the
1167 * descriptors we've used looking for ERR, since it
1168 * can appear on any descriptor in the chain.
1169 */
1170 for (j = txs->txs_firstdesc;; j = PCN_NEXTTX(j)(((j) + 1) & (512 - 1))) {
1171 tmd = letoh32(sc->sc_txdescs[j].tmd1)((__uint32_t)(sc->sc_control_data->pcd_txdescs[j].tmd1)
)
;
1172 if (tmd & LE_T1_ERR(1<<30)) {
1173 ifp->if_oerrorsif_data.ifi_oerrors++;
1174 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI33)
1175 tmd2 = letoh32(sc->sc_txdescs[j].tmd0)((__uint32_t)(sc->sc_control_data->pcd_txdescs[j].tmd0)
)
;
1176 else
1177 tmd2 = letoh32(sc->sc_txdescs[j].tmd2)((__uint32_t)(sc->sc_control_data->pcd_txdescs[j].tmd2)
)
;
1178 if (tmd2 & LE_T2_UFLO(1<<30)) {
1179 if (sc->sc_xmtsp < LE_C80_XMTSP_MAX3) {
1180 sc->sc_xmtsp++;
1181 printf("%s: transmit "
1182 "underrun; new threshold: "
1183 "%s\n",
1184 sc->sc_dev.dv_xname,
1185 sc->sc_xmtsp_desc[
1186 sc->sc_xmtsp]);
1187 pcn_spnd(sc);
1188 pcn_csr_write(sc, LE_CSR800x0050,
1189 LE_C80_RCVFW(sc->sc_rcvfw)((sc->sc_rcvfw) << 12) |
1190 LE_C80_XMTSP(sc->sc_xmtsp)((sc->sc_xmtsp) << 10) |
1191 LE_C80_XMTFW(sc->sc_xmtfw)((sc->sc_xmtfw) << 8));
1192 pcn_csr_write(sc, LE_CSR50x0005,
1193 sc->sc_csr5);
1194 } else {
1195 printf("%s: transmit "
1196 "underrun\n",
1197 sc->sc_dev.dv_xname);
1198 }
1199 } else if (tmd2 & LE_T2_BUFF(1<<31)) {
1200 printf("%s: transmit buffer error\n",
1201 sc->sc_dev.dv_xname);
1202 }
1203 if (tmd2 & LE_T2_LCOL(1<<28))
1204 ifp->if_collisionsif_data.ifi_collisions++;
1205 if (tmd2 & LE_T2_RTRY(1<<26))
1206 ifp->if_collisionsif_data.ifi_collisions += 16;
1207 goto next_packet;
1208 }
1209 if (j == txs->txs_lastdesc)
1210 break;
1211 }
1212 if (tmd1 & LE_T1_ONE(1<<27))
1213 ifp->if_collisionsif_data.ifi_collisions++;
1214 else if (tmd & LE_T1_MORE(1<<28)) {
1215 /* Real number is unknown. */
1216 ifp->if_collisionsif_data.ifi_collisions += 2;
1217 }
1218 next_packet:
1219 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1220 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txs->
txs_dmamap), (0), (txs->txs_dmamap->dm_mapsize), (0x08)
)
1221 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txs->
txs_dmamap), (0), (txs->txs_dmamap->dm_mapsize), (0x08)
)
;
1222 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txs
->txs_dmamap))
;
1223 m_freem(txs->txs_mbuf);
1224 txs->txs_mbuf = NULL((void *)0);
1225 }
1226
1227 /* Update the dirty transmit buffer pointer. */
1228 sc->sc_txsdirty = i;
1229
1230 /*
1231 * If there are no more pending transmissions, cancel the watchdog
1232 * timer.
1233 */
1234 if (sc->sc_txsfree == PCN_TXQUEUELEN128)
1235 ifp->if_timer = 0;
1236
1237 if (ifq_is_oactive(&ifp->if_snd))
1238 ifq_restart(&ifp->if_snd);
1239}
1240
1241/*
1242 * pcn_rxintr:
1243 *
1244 * Helper; handle receive interrupts.
1245 */
1246int
1247pcn_rxintr(struct pcn_softc *sc)
1248{
1249 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1250 struct pcn_rxsoft *rxs;
1251 struct mbuf *m;
1252 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
1253 uint32_t rmd1;
1254 int i, len;
1255 int rv = 0;
1256
1257 for (i = sc->sc_rxptr;; i = PCN_NEXTRX(i)(((i) + 1) & (128 - 1))) {
1258 rxs = &sc->sc_rxsoft[i];
1259
1260 PCN_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_rxdescs[((i))])), (sizeof(struct lermd)), ((0x02|0x08))
)
;
1261
1262 rmd1 = letoh32(sc->sc_rxdescs[i].rmd1)((__uint32_t)(sc->sc_control_data->pcd_rxdescs[i].rmd1)
)
;
1263
1264 if (rmd1 & LE_R1_OWN(1<<31))
1265 break;
1266
1267 /*
1268 * Check for errors and make sure the packet fit into
1269 * a single buffer. We have structured this block of
1270 * code the way it is in order to compress it into
1271 * one test in the common case (no error).
1272 */
1273 if (__predict_false((rmd1 & (LE_R1_STP|LE_R1_ENP|LE_R1_ERR)) !=__builtin_expect((((rmd1 & ((1<<25)|(1<<24)|(
1<<30))) != ((1<<25)|(1<<24))) != 0), 0)
1274 (LE_R1_STP|LE_R1_ENP))__builtin_expect((((rmd1 & ((1<<25)|(1<<24)|(
1<<30))) != ((1<<25)|(1<<24))) != 0), 0)
) {
1275 /* Make sure the packet is in a single buffer. */
1276 if ((rmd1 & (LE_R1_STP(1<<25)|LE_R1_ENP(1<<24))) !=
1277 (LE_R1_STP(1<<25)|LE_R1_ENP(1<<24))) {
1278 printf("%s: packet spilled into next buffer\n",
1279 sc->sc_dev.dv_xname);
1280 rv = 1; /* pcn_intr() will re-init */
1281 goto done;
1282 }
1283
1284 /*
1285 * If the packet had an error, simple recycle the
1286 * buffer.
1287 */
1288 if (rmd1 & LE_R1_ERR(1<<30)) {
1289 ifp->if_ierrorsif_data.ifi_ierrors++;
1290 /*
1291 * If we got an overflow error, chances
1292 * are there will be a CRC error. In
1293 * this case, just print the overflow
1294 * error, and skip the others.
1295 */
1296 if (rmd1 & LE_R1_OFLO(1<<28))
1297 printf("%s: overflow error\n",
1298 sc->sc_dev.dv_xname);
1299 else {
1300#define PRINTIT(x, str) \
1301 if (rmd1 & (x)) \
1302 printf("%s: %s\n", \
1303 sc->sc_dev.dv_xname, str);
1304 PRINTIT(LE_R1_FRAM(1<<29), "framing error");
1305 PRINTIT(LE_R1_CRC(1<<27), "CRC error");
1306 PRINTIT(LE_R1_BUFF(1<<26), "buffer error");
1307 }
1308#undef PRINTIT
1309 PCN_INIT_RXDESC(sc, i)do { struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(i)];
struct lermd *__rmd = &(sc)->sc_control_data->pcd_rxdescs
[(i)]; struct mbuf *__m = __rxs->rxs_mbuf; __m->m_hdr.mh_data
= __m->M_dat.MH.MH_dat.MH_ext.ext_buf + 2; if ((sc)->sc_swstyle
== 3) { __rmd->rmd2 = ((__uint32_t)(__rxs->rxs_dmamap->
dm_segs[0].ds_addr + 2)); __rmd->rmd0 = 0; } else { __rmd->
rmd2 = 0; __rmd->rmd0 = ((__uint32_t)(__rxs->rxs_dmamap
->dm_segs[0].ds_addr + 2)); } __rmd->rmd1 = ((__uint32_t
)((1<<31)|(0xf<<12)| ((~((1 << 11) - 2) + 1
) & (0xfff)))); (*(((sc))->sc_dmat)->_dmamap_sync)(
(((sc))->sc_dmat), (((sc))->sc_cddmamap), (__builtin_offsetof
(struct pcn_control_data, pcd_rxdescs[(((i)))])), (sizeof(struct
lermd)), ((0x01|0x04)));} while( 0)
;
1310 continue;
1311 }
1312 }
1313
1314 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs->
rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x02)
)
1315 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs->
rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x02)
)
;
1316
1317 /*
1318 * No errors; receive the packet.
1319 */
1320 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI33)
1321 len = letoh32(sc->sc_rxdescs[i].rmd0)((__uint32_t)(sc->sc_control_data->pcd_rxdescs[i].rmd0)
)
& LE_R1_BCNT_MASK(0xfff);
1322 else
1323 len = letoh32(sc->sc_rxdescs[i].rmd2)((__uint32_t)(sc->sc_control_data->pcd_rxdescs[i].rmd2)
)
& LE_R1_BCNT_MASK(0xfff);
1324
1325 /*
1326 * The LANCE family includes the CRC with every packet;
1327 * trim it off here.
1328 */
1329 len -= ETHER_CRC_LEN4;
1330
1331 /*
1332 * If the packet is small enough to fit in a
1333 * single header mbuf, allocate one and copy
1334 * the data into it. This greatly reduces
1335 * memory consumption when we receive lots
1336 * of small packets.
1337 *
1338 * Otherwise, we add a new buffer to the receive
1339 * chain. If this fails, we drop the packet and
1340 * recycle the old buffer.
1341 */
1342 if (pcn_copy_small != 0 && len <= (MHLEN((256 - sizeof(struct m_hdr)) - sizeof(struct pkthdr)) - 2)) {
1343 MGETHDR(m, M_DONTWAIT, MT_DATA)m = m_gethdr((0x0002), (1));
1344 if (m == NULL((void *)0))
1345 goto dropit;
1346 m->m_datam_hdr.mh_data += 2;
1347 memcpy(mtod(m, caddr_t),__builtin_memcpy((((caddr_t)((m)->m_hdr.mh_data))), (((caddr_t
)((rxs->rxs_mbuf)->m_hdr.mh_data))), (len))
1348 mtod(rxs->rxs_mbuf, caddr_t), len)__builtin_memcpy((((caddr_t)((m)->m_hdr.mh_data))), (((caddr_t
)((rxs->rxs_mbuf)->m_hdr.mh_data))), (len))
;
1349 PCN_INIT_RXDESC(sc, i)do { struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(i)];
struct lermd *__rmd = &(sc)->sc_control_data->pcd_rxdescs
[(i)]; struct mbuf *__m = __rxs->rxs_mbuf; __m->m_hdr.mh_data
= __m->M_dat.MH.MH_dat.MH_ext.ext_buf + 2; if ((sc)->sc_swstyle
== 3) { __rmd->rmd2 = ((__uint32_t)(__rxs->rxs_dmamap->
dm_segs[0].ds_addr + 2)); __rmd->rmd0 = 0; } else { __rmd->
rmd2 = 0; __rmd->rmd0 = ((__uint32_t)(__rxs->rxs_dmamap
->dm_segs[0].ds_addr + 2)); } __rmd->rmd1 = ((__uint32_t
)((1<<31)|(0xf<<12)| ((~((1 << 11) - 2) + 1
) & (0xfff)))); (*(((sc))->sc_dmat)->_dmamap_sync)(
(((sc))->sc_dmat), (((sc))->sc_cddmamap), (__builtin_offsetof
(struct pcn_control_data, pcd_rxdescs[(((i)))])), (sizeof(struct
lermd)), ((0x01|0x04)));} while( 0)
;
1350 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs->
rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x01)
)
1351 rxs->rxs_dmamap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs->
rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x01)
)
1352 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs->
rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x01)
)
;
1353 } else {
1354 m = rxs->rxs_mbuf;
1355 if (pcn_add_rxbuf(sc, i) != 0) {
1356 dropit:
1357 ifp->if_ierrorsif_data.ifi_ierrors++;
1358 PCN_INIT_RXDESC(sc, i)do { struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(i)];
struct lermd *__rmd = &(sc)->sc_control_data->pcd_rxdescs
[(i)]; struct mbuf *__m = __rxs->rxs_mbuf; __m->m_hdr.mh_data
= __m->M_dat.MH.MH_dat.MH_ext.ext_buf + 2; if ((sc)->sc_swstyle
== 3) { __rmd->rmd2 = ((__uint32_t)(__rxs->rxs_dmamap->
dm_segs[0].ds_addr + 2)); __rmd->rmd0 = 0; } else { __rmd->
rmd2 = 0; __rmd->rmd0 = ((__uint32_t)(__rxs->rxs_dmamap
->dm_segs[0].ds_addr + 2)); } __rmd->rmd1 = ((__uint32_t
)((1<<31)|(0xf<<12)| ((~((1 << 11) - 2) + 1
) & (0xfff)))); (*(((sc))->sc_dmat)->_dmamap_sync)(
(((sc))->sc_dmat), (((sc))->sc_cddmamap), (__builtin_offsetof
(struct pcn_control_data, pcd_rxdescs[(((i)))])), (sizeof(struct
lermd)), ((0x01|0x04)));} while( 0)
;
1359 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs->
rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x01)
)
1360 rxs->rxs_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs->
rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x01)
)
1361 rxs->rxs_dmamap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs->
rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x01)
)
1362 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs->
rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x01)
)
;
1363 continue;
1364 }
1365 }
1366
1367 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len;
1368
1369 ml_enqueue(&ml, m);
1370 }
1371
1372 /* Update the receive pointer. */
1373 sc->sc_rxptr = i;
1374done:
1375 if_input(ifp, &ml);
1376 return (rv);
1377}
1378
1379/*
1380 * pcn_tick:
1381 *
1382 * One second timer, used to tick the MII.
1383 */
1384void
1385pcn_tick(void *arg)
1386{
1387 struct pcn_softc *sc = arg;
1388 int s;
1389
1390 s = splnet()splraise(0x4);
1391 mii_tick(&sc->sc_mii);
1392 splx(s)spllower(s);
1393
1394 timeout_add_sec(&sc->sc_tick_timeout, 1);
1395}
1396
1397/*
1398 * pcn_reset:
1399 *
1400 * Perform a soft reset on the PCnet-PCI.
1401 */
1402void
1403pcn_reset(struct pcn_softc *sc)
1404{
1405
1406 /*
1407 * The PCnet-PCI chip is reset by reading from the
1408 * RESET register. Note that while the NE2100 LANCE
1409 * boards require a write after the read, the PCnet-PCI
1410 * chips do not require this.
1411 *
1412 * Since we don't know if we're in 16-bit or 32-bit
1413 * mode right now, issue both (it's safe) in the
1414 * hopes that one will succeed.
1415 */
1416 (void) bus_space_read_2(sc->sc_st, sc->sc_sh, PCN16_RESET)((sc->sc_st)->read_2((sc->sc_sh), (0x14)));
1417 (void) bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RESET)((sc->sc_st)->read_4((sc->sc_sh), (0x18)));
1418
1419 /* Wait 1ms for it to finish. */
1420 delay(1000)(*delay_func)(1000);
1421
1422 /*
1423 * Select 32-bit I/O mode by issuing a 32-bit write to the
1424 * RDP. Since the RAP is 0 after a reset, writing a 0
1425 * to RDP is safe (since it simply clears CSR0).
1426 */
1427 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, 0)((sc->sc_st)->write_4((sc->sc_sh), (0x10), (0)));
1428}
1429
1430/*
1431 * pcn_init: [ifnet interface function]
1432 *
1433 * Initialize the interface. Must be called at splnet().
1434 */
1435int
1436pcn_init(struct ifnet *ifp)
1437{
1438 struct pcn_softc *sc = ifp->if_softc;
1439 struct pcn_rxsoft *rxs;
1440 uint8_t *enaddr = LLADDR(ifp->if_sadl)((caddr_t)((ifp->if_sadl)->sdl_data + (ifp->if_sadl)
->sdl_nlen))
;
1441 int i, error = 0;
1442 uint32_t reg;
1443
1444 /* Cancel any pending I/O. */
1445 pcn_stop(ifp, 0);
1446
1447 /* Reset the chip to a known state. */
1448 pcn_reset(sc);
1449
1450 /*
1451 * On the Am79c970, select SSTYLE 2, and SSTYLE 3 on everything
1452 * else.
1453 *
1454 * XXX It'd be really nice to use SSTYLE 2 on all the chips,
1455 * because the structure layout is compatible with ILACC,
1456 * but the burst mode is only available in SSTYLE 3, and
1457 * burst mode should provide some performance enhancement.
1458 */
1459 if (sc->sc_variant->pcv_chipid == PARTID_Am79c9700x2430)
1460 sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI22;
1461 else
1462 sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI33;
1463 pcn_bcr_write(sc, LE_BCR200x0014, sc->sc_swstyle);
1464
1465 /* Initialize the transmit descriptor ring. */
1466 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs))__builtin_memset((sc->sc_control_data->pcd_txdescs), (0
), (sizeof(sc->sc_control_data->pcd_txdescs)))
;
1467 PCN_CDTXSYNC(sc, 0, PCN_NTXDESC,do { int __x, __n; __x = (0); __n = (512); if ((__x + __n) >
512) { (*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat
), ((sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_txdescs[(__x)])), (sizeof(struct letmd) * (512 - __x)),
((0x01|0x04))); __n -= (512 - __x); __x = 0; } (*((sc)->sc_dmat
)->_dmamap_sync)(((sc)->sc_dmat), ((sc)->sc_cddmamap
), (__builtin_offsetof(struct pcn_control_data, pcd_txdescs[(
__x)])), (sizeof(struct letmd) * __n), ((0x01|0x04))); } while
( 0)
1468 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)do { int __x, __n; __x = (0); __n = (512); if ((__x + __n) >
512) { (*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat
), ((sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_txdescs[(__x)])), (sizeof(struct letmd) * (512 - __x)),
((0x01|0x04))); __n -= (512 - __x); __x = 0; } (*((sc)->sc_dmat
)->_dmamap_sync)(((sc)->sc_dmat), ((sc)->sc_cddmamap
), (__builtin_offsetof(struct pcn_control_data, pcd_txdescs[(
__x)])), (sizeof(struct letmd) * __n), ((0x01|0x04))); } while
( 0)
;
1469 sc->sc_txfree = PCN_NTXDESC512;
1470 sc->sc_txnext = 0;
1471
1472 /* Initialize the transmit job descriptors. */
1473 for (i = 0; i < PCN_TXQUEUELEN128; i++)
1474 sc->sc_txsoft[i].txs_mbuf = NULL((void *)0);
1475 sc->sc_txsfree = PCN_TXQUEUELEN128;
1476 sc->sc_txsnext = 0;
1477 sc->sc_txsdirty = 0;
1478
1479 /*
1480 * Initialize the receive descriptor and receive job
1481 * descriptor rings.
1482 */
1483 for (i = 0; i < PCN_NRXDESC128; i++) {
1484 rxs = &sc->sc_rxsoft[i];
1485 if (rxs->rxs_mbuf == NULL((void *)0)) {
1486 if ((error = pcn_add_rxbuf(sc, i)) != 0) {
1487 printf("%s: unable to allocate or map rx "
1488 "buffer %d, error = %d\n",
1489 sc->sc_dev.dv_xname, i, error);
1490 /*
1491 * XXX Should attempt to run with fewer receive
1492 * XXX buffers instead of just failing.
1493 */
1494 pcn_rxdrain(sc);
1495 goto out;
1496 }
1497 } else
1498 PCN_INIT_RXDESC(sc, i)do { struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(i)];
struct lermd *__rmd = &(sc)->sc_control_data->pcd_rxdescs
[(i)]; struct mbuf *__m = __rxs->rxs_mbuf; __m->m_hdr.mh_data
= __m->M_dat.MH.MH_dat.MH_ext.ext_buf + 2; if ((sc)->sc_swstyle
== 3) { __rmd->rmd2 = ((__uint32_t)(__rxs->rxs_dmamap->
dm_segs[0].ds_addr + 2)); __rmd->rmd0 = 0; } else { __rmd->
rmd2 = 0; __rmd->rmd0 = ((__uint32_t)(__rxs->rxs_dmamap
->dm_segs[0].ds_addr + 2)); } __rmd->rmd1 = ((__uint32_t
)((1<<31)|(0xf<<12)| ((~((1 << 11) - 2) + 1
) & (0xfff)))); (*(((sc))->sc_dmat)->_dmamap_sync)(
(((sc))->sc_dmat), (((sc))->sc_cddmamap), (__builtin_offsetof
(struct pcn_control_data, pcd_rxdescs[(((i)))])), (sizeof(struct
lermd)), ((0x01|0x04)));} while( 0)
;
1499 }
1500 sc->sc_rxptr = 0;
1501
1502 /* Initialize MODE for the initialization block. */
1503 sc->sc_mode = 0;
1504
1505 /*
1506 * If we have MII, simply select MII in the MODE register,
1507 * and clear ASEL. Otherwise, let ASEL stand (for now),
1508 * and leave PORTSEL alone (it is ignored with ASEL is set).
1509 */
1510 if (sc->sc_flags & PCN_F_HAS_MII0x0001) {
1511 pcn_bcr_write(sc, LE_BCR20x0002,
1512 pcn_bcr_read(sc, LE_BCR20x0002) & ~LE_B2_ASEL0x0002);
1513 sc->sc_mode |= LE_C15_PORTSEL(PORTSEL_MII)((3) << 7);
1514
1515 /*
1516 * Disable MII auto-negotiation. We handle that in
1517 * our own MII layer.
1518 */
1519 pcn_bcr_write(sc, LE_BCR320x0020,
1520 pcn_bcr_read(sc, LE_BCR320x0020) | LE_B32_DANAS0x0080);
1521 }
1522
1523 /* Set the multicast filter in the init block. */
1524 pcn_set_filter(sc);
1525
1526 /*
1527 * Set the Tx and Rx descriptor ring addresses in the init
1528 * block, the TLEN and RLEN other fields of the init block
1529 * MODE register.
1530 */
1531 sc->sc_initblocksc_control_data->pcd_initblock.init_rdra = htole32(PCN_CDRXADDR(sc, 0))((__uint32_t)(((sc)->sc_cddmamap->dm_segs[0].ds_addr + __builtin_offsetof
(struct pcn_control_data, pcd_rxdescs[((0))]))))
;
1532 sc->sc_initblocksc_control_data->pcd_initblock.init_tdra = htole32(PCN_CDTXADDR(sc, 0))((__uint32_t)(((sc)->sc_cddmamap->dm_segs[0].ds_addr + __builtin_offsetof
(struct pcn_control_data, pcd_txdescs[((0))]))))
;
1533 sc->sc_initblocksc_control_data->pcd_initblock.init_mode = htole32(sc->sc_mode |((__uint32_t)(sc->sc_mode | ((ffs(512) - 1) << 28) |
((ffs(128) - 1) << 20)))
1534 ((ffs(PCN_NTXDESC) - 1) << 28) |((__uint32_t)(sc->sc_mode | ((ffs(512) - 1) << 28) |
((ffs(128) - 1) << 20)))
1535 ((ffs(PCN_NRXDESC) - 1) << 20))((__uint32_t)(sc->sc_mode | ((ffs(512) - 1) << 28) |
((ffs(128) - 1) << 20)))
;
1536
1537 /* Set the station address in the init block. */
1538 sc->sc_initblocksc_control_data->pcd_initblock.init_padr[0] = htole32(enaddr[0] |((__uint32_t)(enaddr[0] | (enaddr[1] << 8) | (enaddr[2]
<< 16) | (enaddr[3] << 24)))
1539 (enaddr[1] << 8) | (enaddr[2] << 16) | (enaddr[3] << 24))((__uint32_t)(enaddr[0] | (enaddr[1] << 8) | (enaddr[2]
<< 16) | (enaddr[3] << 24)))
;
1540 sc->sc_initblocksc_control_data->pcd_initblock.init_padr[1] = htole32(enaddr[4] |((__uint32_t)(enaddr[4] | (enaddr[5] << 8)))
1541 (enaddr[5] << 8))((__uint32_t)(enaddr[4] | (enaddr[5] << 8)));
1542
1543 /* Initialize CSR3. */
1544 pcn_csr_write(sc, LE_CSR30x0003, LE_C3_MISSM0x1000|LE_C3_IDONM0x0100|LE_C3_DXSUFLO0x0040);
1545
1546 /* Initialize CSR4. */
1547 pcn_csr_write(sc, LE_CSR40x0004, LE_C4_DMAPLUS0x4000|LE_C4_APAD_XMT0x0800|
1548 LE_C4_MFCOM0x0100|LE_C4_RCVCCOM0x0010|LE_C4_TXSTRTM0x0004);
1549
1550 /* Initialize CSR5. */
1551 sc->sc_csr5 = LE_C5_LTINTEN0x4000|LE_C5_SINTE0x0400;
1552 pcn_csr_write(sc, LE_CSR50x0005, sc->sc_csr5);
1553
1554 /*
1555 * If we have an Am79c971 or greater, initialize CSR7.
1556 *
1557 * XXX Might be nice to use the MII auto-poll interrupt someday.
1558 */
1559 switch (sc->sc_variant->pcv_chipid) {
1560 case PARTID_Am79c9700x2430:
1561 case PARTID_Am79c970A0x2621:
1562 /* Not available on these chips. */
1563 break;
1564
1565 default:
1566 pcn_csr_write(sc, LE_CSR70x0007, LE_C7_FASTSPNDE0x8000);
1567 break;
1568 }
1569
1570 /*
1571 * On the Am79c970A and greater, initialize BCR18 to
1572 * enable burst mode.
1573 *
1574 * Also enable the "no underflow" option on the Am79c971 and
1575 * higher, which prevents the chip from generating transmit
1576 * underflows, yet sill provides decent performance. Note if
1577 * chip is not connected to external SRAM, then we still have
1578 * to handle underflow errors (the NOUFLO bit is ignored in
1579 * that case).
1580 */
1581 reg = pcn_bcr_read(sc, LE_BCR180x0012);
1582 switch (sc->sc_variant->pcv_chipid) {
1583 case PARTID_Am79c9700x2430:
1584 break;
1585
1586 case PARTID_Am79c970A0x2621:
1587 reg |= LE_B18_BREADE0x0040|LE_B18_BWRITE0x0020;
1588 break;
1589
1590 default:
1591 reg |= LE_B18_BREADE0x0040|LE_B18_BWRITE0x0020|LE_B18_NOUFLO0x0800;
1592 break;
1593 }
1594 pcn_bcr_write(sc, LE_BCR180x0012, reg);
1595
1596 /*
1597 * Initialize CSR80 (FIFO thresholds for Tx and Rx).
1598 */
1599 pcn_csr_write(sc, LE_CSR800x0050, LE_C80_RCVFW(sc->sc_rcvfw)((sc->sc_rcvfw) << 12) |
1600 LE_C80_XMTSP(sc->sc_xmtsp)((sc->sc_xmtsp) << 10) | LE_C80_XMTFW(sc->sc_xmtfw)((sc->sc_xmtfw) << 8));
1601
1602 /*
1603 * Send the init block to the chip, and wait for it
1604 * to be processed.
1605 */
1606 PCN_CDINITSYNC(sc, BUS_DMASYNC_PREWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_initblock)), (sizeof(struct leinit)), ((0x04)))
;
1607 pcn_csr_write(sc, LE_CSR10x0001, PCN_CDINITADDR(sc)((sc)->sc_cddmamap->dm_segs[0].ds_addr + __builtin_offsetof
(struct pcn_control_data, pcd_initblock))
& 0xffff);
1608 pcn_csr_write(sc, LE_CSR20x0002, (PCN_CDINITADDR(sc)((sc)->sc_cddmamap->dm_segs[0].ds_addr + __builtin_offsetof
(struct pcn_control_data, pcd_initblock))
>> 16) & 0xffff);
1609 pcn_csr_write(sc, LE_CSR00x0000, LE_C0_INIT0x0001);
1610 delay(100)(*delay_func)(100);
1611 for (i = 0; i < 10000; i++) {
1612 if (pcn_csr_read(sc, LE_CSR00x0000) & LE_C0_IDON0x0100)
1613 break;
1614 delay(10)(*delay_func)(10);
1615 }
1616 PCN_CDINITSYNC(sc, BUS_DMASYNC_POSTWRITE)(*((sc)->sc_dmat)->_dmamap_sync)(((sc)->sc_dmat), ((
sc)->sc_cddmamap), (__builtin_offsetof(struct pcn_control_data
, pcd_initblock)), (sizeof(struct leinit)), ((0x08)))
;
1617 if (i == 10000) {
1618 printf("%s: timeout processing init block\n",
1619 sc->sc_dev.dv_xname);
1620 error = EIO5;
1621 goto out;
1622 }
1623
1624 /* Set the media. */
1625 (void) (*sc->sc_mii.mii_media.ifm_change_cb)(ifp);
1626
1627 /* Enable interrupts and external activity (and ACK IDON). */
1628 pcn_csr_write(sc, LE_CSR00x0000, LE_C0_INEA0x0040|LE_C0_STRT0x0002|LE_C0_IDON0x0100);
1629
1630 if (sc->sc_flags & PCN_F_HAS_MII0x0001) {
1631 /* Start the one second MII clock. */
1632 timeout_add_sec(&sc->sc_tick_timeout, 1);
1633 }
1634
1635 /* ...all done! */
1636 ifp->if_flags |= IFF_RUNNING0x40;
1637 ifq_clr_oactive(&ifp->if_snd);
1638
1639 out:
1640 if (error)
1641 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1642 return (error);
1643}
1644
1645/*
1646 * pcn_rxdrain:
1647 *
1648 * Drain the receive queue.
1649 */
1650void
1651pcn_rxdrain(struct pcn_softc *sc)
1652{
1653 struct pcn_rxsoft *rxs;
1654 int i;
1655
1656 for (i = 0; i < PCN_NRXDESC128; i++) {
1657 rxs = &sc->sc_rxsoft[i];
1658 if (rxs->rxs_mbuf != NULL((void *)0)) {
1659 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (rxs
->rxs_dmamap))
;
1660 m_freem(rxs->rxs_mbuf);
1661 rxs->rxs_mbuf = NULL((void *)0);
1662 }
1663 }
1664}
1665
1666/*
1667 * pcn_stop: [ifnet interface function]
1668 *
1669 * Stop transmission on the interface.
1670 */
1671void
1672pcn_stop(struct ifnet *ifp, int disable)
1673{
1674 struct pcn_softc *sc = ifp->if_softc;
1675 struct pcn_txsoft *txs;
1676 int i;
1677
1678 if (sc->sc_flags & PCN_F_HAS_MII0x0001) {
1679 /* Stop the one second clock. */
1680 timeout_del(&sc->sc_tick_timeout);
1681
1682 /* Down the MII. */
1683 mii_down(&sc->sc_mii);
1684 }
1685
1686 /* Mark the interface as down and cancel the watchdog timer. */
1687 ifp->if_flags &= ~IFF_RUNNING0x40;
1688 ifq_clr_oactive(&ifp->if_snd);
1689 ifp->if_timer = 0;
1690
1691 /* Stop the chip. */
1692 pcn_csr_write(sc, LE_CSR00x0000, LE_C0_STOP0x0004);
1693
1694 /* Release any queued transmit buffers. */
1695 for (i = 0; i < PCN_TXQUEUELEN128; i++) {
1696 txs = &sc->sc_txsoft[i];
1697 if (txs->txs_mbuf != NULL((void *)0)) {
1698 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txs
->txs_dmamap))
;
1699 m_freem(txs->txs_mbuf);
1700 txs->txs_mbuf = NULL((void *)0);
1701 }
1702 }
1703
1704 if (disable)
1705 pcn_rxdrain(sc);
1706}
1707
1708/*
1709 * pcn_add_rxbuf:
1710 *
1711 * Add a receive buffer to the indicated descriptor.
1712 */
1713int
1714pcn_add_rxbuf(struct pcn_softc *sc, int idx)
1715{
1716 struct pcn_rxsoft *rxs = &sc->sc_rxsoft[idx];
1717 struct mbuf *m;
1718 int error;
1719
1720 MGETHDR(m, M_DONTWAIT, MT_DATA)m = m_gethdr((0x0002), (1));
1721 if (m == NULL((void *)0))
1722 return (ENOBUFS55);
1723
1724 MCLGET(m, M_DONTWAIT)(void) m_clget((m), (0x0002), (1 << 11));
1725 if ((m->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0) {
1726 m_freem(m);
1727 return (ENOBUFS55);
1728 }
1729
1730 if (rxs->rxs_mbuf != NULL((void *)0))
1731 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (rxs
->rxs_dmamap))
;
1732
1733 rxs->rxs_mbuf = m;
1734
1735 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (rxs->
rxs_dmamap), (m->M_dat.MH.MH_dat.MH_ext.ext_buf), (m->M_dat
.MH.MH_dat.MH_ext.ext_size), (((void *)0)), (0x0200|0x0001))
1736 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (rxs->
rxs_dmamap), (m->M_dat.MH.MH_dat.MH_ext.ext_buf), (m->M_dat
.MH.MH_dat.MH_ext.ext_size), (((void *)0)), (0x0200|0x0001))
1737 BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (rxs->
rxs_dmamap), (m->M_dat.MH.MH_dat.MH_ext.ext_buf), (m->M_dat
.MH.MH_dat.MH_ext.ext_size), (((void *)0)), (0x0200|0x0001))
;
1738 if (error) {
1739 printf("%s: can't load rx DMA map %d, error = %d\n",
1740 sc->sc_dev.dv_xname, idx, error);
1741 panic("pcn_add_rxbuf");
1742 }
1743
1744 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs->
rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x01)
)
1745 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxs->
rxs_dmamap), (0), (rxs->rxs_dmamap->dm_mapsize), (0x01)
)
;
1746
1747 PCN_INIT_RXDESC(sc, idx)do { struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(idx)
]; struct lermd *__rmd = &(sc)->sc_control_data->pcd_rxdescs
[(idx)]; struct mbuf *__m = __rxs->rxs_mbuf; __m->m_hdr
.mh_data = __m->M_dat.MH.MH_dat.MH_ext.ext_buf + 2; if ((sc
)->sc_swstyle == 3) { __rmd->rmd2 = ((__uint32_t)(__rxs
->rxs_dmamap->dm_segs[0].ds_addr + 2)); __rmd->rmd0 =
0; } else { __rmd->rmd2 = 0; __rmd->rmd0 = ((__uint32_t
)(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2)); } __rmd->
rmd1 = ((__uint32_t)((1<<31)|(0xf<<12)| ((~((1 <<
11) - 2) + 1) & (0xfff)))); (*(((sc))->sc_dmat)->_dmamap_sync
)((((sc))->sc_dmat), (((sc))->sc_cddmamap), (__builtin_offsetof
(struct pcn_control_data, pcd_rxdescs[(((idx)))])), (sizeof(struct
lermd)), ((0x01|0x04)));} while( 0)
;
1748
1749 return (0);
1750}
1751
1752/*
1753 * pcn_set_filter:
1754 *
1755 * Set up the receive filter.
1756 */
1757void
1758pcn_set_filter(struct pcn_softc *sc)
1759{
1760 struct arpcom *ac = &sc->sc_arpcom;
1761 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1762 struct ether_multi *enm;
1763 struct ether_multistep step;
1764 uint32_t crc;
1765
1766 ifp->if_flags &= ~IFF_ALLMULTI0x200;
1767
1768 if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0) {
1769 ifp->if_flags |= IFF_ALLMULTI0x200;
1770 if (ifp->if_flags & IFF_PROMISC0x100)
1771 sc->sc_mode |= LE_C15_PROM0x8000;
1772 sc->sc_initblocksc_control_data->pcd_initblock.init_ladrf[0] =
1773 sc->sc_initblocksc_control_data->pcd_initblock.init_ladrf[1] =
1774 sc->sc_initblocksc_control_data->pcd_initblock.init_ladrf[2] =
1775 sc->sc_initblocksc_control_data->pcd_initblock.init_ladrf[3] = 0xffff;
1776 } else {
1777 sc->sc_initblocksc_control_data->pcd_initblock.init_ladrf[0] =
1778 sc->sc_initblocksc_control_data->pcd_initblock.init_ladrf[1] =
1779 sc->sc_initblocksc_control_data->pcd_initblock.init_ladrf[2] =
1780 sc->sc_initblocksc_control_data->pcd_initblock.init_ladrf[3] = 0;
1781
1782 /*
1783 * Set up the multicast address filter by passing all multicast
1784 * addresses through a CRC generator, and then using the high
1785 * order 6 bits as an index into the 64-bit logical address
1786 * filter. The high order bits select the word, while the rest
1787 * of the bits select the bit within the word.
1788 */
1789 ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first
); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step
)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while
( 0)
;
1790 while (enm != NULL((void *)0)) {
1791 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN6);
1792
1793 /* Just want the 6 most significant bits. */
1794 crc >>= 26;
1795
1796 /* Set the corresponding bit in the filter. */
1797 sc->sc_initblocksc_control_data->pcd_initblock.init_ladrf[crc >> 4] |=
1798 htole16(1 << (crc & 0xf))((__uint16_t)(1 << (crc & 0xf)));
1799
1800 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
1801 }
1802 }
1803}
1804
1805/*
1806 * pcn_79c970_mediainit:
1807 *
1808 * Initialize media for the Am79c970.
1809 */
1810void
1811pcn_79c970_mediainit(struct pcn_softc *sc)
1812{
1813 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK0xff00000000000000ULL, pcn_79c970_mediachange,
1814 pcn_79c970_mediastatus);
1815
1816 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_10_55,
1817 PORTSEL_AUI0, NULL((void *)0));
1818 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A0x2621)
1819 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_10_55|IFM_FDX0x0000010000000000ULL,
1820 PORTSEL_AUI0, NULL((void *)0));
1821
1822 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_10_T3,
1823 PORTSEL_10T1, NULL((void *)0));
1824 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A0x2621)
1825 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_10_T3|IFM_FDX0x0000010000000000ULL,
1826 PORTSEL_10T1, NULL((void *)0));
1827
1828 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL,
1829 0, NULL((void *)0));
1830 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A0x2621)
1831 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL|IFM_FDX0x0000010000000000ULL,
1832 0, NULL((void *)0));
1833
1834 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL);
1835}
1836
1837/*
1838 * pcn_79c970_mediastatus: [ifmedia interface function]
1839 *
1840 * Get the current interface media status (Am79c970 version).
1841 */
1842void
1843pcn_79c970_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1844{
1845 struct pcn_softc *sc = ifp->if_softc;
1846
1847 /*
1848 * The currently selected media is always the active media.
1849 * Note: We have no way to determine what media the AUTO
1850 * process picked.
1851 */
1852 ifmr->ifm_active = sc->sc_mii.mii_media.ifm_media;
1853}
1854
1855/*
1856 * pcn_79c970_mediachange: [ifmedia interface function]
1857 *
1858 * Set hardware to newly-selected media (Am79c970 version).
1859 */
1860int
1861pcn_79c970_mediachange(struct ifnet *ifp)
1862{
1863 struct pcn_softc *sc = ifp->if_softc;
1864 uint32_t reg;
1865
1866 if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media)((sc->sc_mii.mii_media.ifm_media) & 0x00000000000000ffULL
)
== IFM_AUTO0ULL) {
1867 /*
1868 * CSR15:PORTSEL doesn't matter. Just set BCR2:ASEL.
1869 */
1870 reg = pcn_bcr_read(sc, LE_BCR20x0002);
1871 reg |= LE_B2_ASEL0x0002;
1872 pcn_bcr_write(sc, LE_BCR20x0002, reg);
1873 } else {
1874 /*
1875 * Clear BCR2:ASEL and set the new CSR15:PORTSEL value.
1876 */
1877 reg = pcn_bcr_read(sc, LE_BCR20x0002);
1878 reg &= ~LE_B2_ASEL0x0002;
1879 pcn_bcr_write(sc, LE_BCR20x0002, reg);
1880
1881 reg = pcn_csr_read(sc, LE_CSR150x000f);
1882 reg = (reg & ~LE_C15_PORTSEL(PORTSEL_MASK)((3) << 7)) |
1883 LE_C15_PORTSEL(sc->sc_mii.mii_media.ifm_cur->ifm_data)((sc->sc_mii.mii_media.ifm_cur->ifm_data) << 7);
1884 pcn_csr_write(sc, LE_CSR150x000f, reg);
1885 }
1886
1887 if ((sc->sc_mii.mii_media.ifm_media & IFM_FDX0x0000010000000000ULL) != 0) {
1888 reg = LE_B9_FDEN0x0001;
1889 if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media)((sc->sc_mii.mii_media.ifm_media) & 0x00000000000000ffULL
)
== IFM_10_55)
1890 reg |= LE_B9_AUIFD0x0002;
1891 pcn_bcr_write(sc, LE_BCR90x0009, reg);
1892 } else
1893 pcn_bcr_write(sc, LE_BCR90x0009, 0);
1894
1895 return (0);
1896}
1897
1898/*
1899 * pcn_79c971_mediainit:
1900 *
1901 * Initialize media for the Am79c971.
1902 */
1903void
1904pcn_79c971_mediainit(struct pcn_softc *sc)
1905{
1906 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1907
1908 /* We have MII. */
1909 sc->sc_flags |= PCN_F_HAS_MII0x0001;
1910
1911 /*
1912 * The built-in 10BASE-T interface is mapped to the MII
1913 * on the PCNet-FAST. Unfortunately, there's no EEPROM
1914 * word that tells us which PHY to use.
1915 * This driver used to ignore all but the first PHY to
1916 * answer, but this code was removed to support multiple
1917 * external PHYs. As the default instance will be the first
1918 * one to answer, no harm is done by letting the possibly
1919 * non-connected internal PHY show up.
1920 */
1921
1922 /* Initialize our media structures and probe the MII. */
1923 sc->sc_mii.mii_ifp = ifp;
1924 sc->sc_mii.mii_readreg = pcn_mii_readreg;
1925 sc->sc_mii.mii_writereg = pcn_mii_writereg;
1926 sc->sc_mii.mii_statchg = pcn_mii_statchg;
1927 ifmedia_init(&sc->sc_mii.mii_media, 0, pcn_79c971_mediachange,
1928 pcn_79c971_mediastatus);
1929
1930 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY-1,
1931 MII_OFFSET_ANY-1, 0);
1932 if (LIST_FIRST(&sc->sc_mii.mii_phys)((&sc->sc_mii.mii_phys)->lh_first) == NULL((void *)0)) {
1933 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_NONE2ULL, 0, NULL((void *)0));
1934 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_NONE2ULL);
1935 } else
1936 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL);
1937}
1938
1939/*
1940 * pcn_79c971_mediastatus: [ifmedia interface function]
1941 *
1942 * Get the current interface media status (Am79c971 version).
1943 */
1944void
1945pcn_79c971_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1946{
1947 struct pcn_softc *sc = ifp->if_softc;
1948
1949 mii_pollstat(&sc->sc_mii);
1950 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1951 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1952}
1953
1954/*
1955 * pcn_79c971_mediachange: [ifmedia interface function]
1956 *
1957 * Set hardware to newly-selected media (Am79c971 version).
1958 */
1959int
1960pcn_79c971_mediachange(struct ifnet *ifp)
1961{
1962 struct pcn_softc *sc = ifp->if_softc;
1963
1964 if (ifp->if_flags & IFF_UP0x1)
1965 mii_mediachg(&sc->sc_mii);
1966 return (0);
1967}
1968
1969/*
1970 * pcn_mii_readreg: [mii interface function]
1971 *
1972 * Read a PHY register on the MII.
1973 */
1974int
1975pcn_mii_readreg(struct device *self, int phy, int reg)
1976{
1977 struct pcn_softc *sc = (void *) self;
1978 uint32_t rv;
1979
1980 pcn_bcr_write(sc, LE_BCR330x0021, reg | (phy << PHYAD_SHIFT5));
1981 rv = pcn_bcr_read(sc, LE_BCR340x0022) & LE_B34_MIIMD0xffff;
1982 if (rv == 0xffff)
1983 return (0);
1984
1985 return (rv);
1986}
1987
1988/*
1989 * pcn_mii_writereg: [mii interface function]
1990 *
1991 * Write a PHY register on the MII.
1992 */
1993void
1994pcn_mii_writereg(struct device *self, int phy, int reg, int val)
1995{
1996 struct pcn_softc *sc = (void *) self;
1997
1998 pcn_bcr_write(sc, LE_BCR330x0021, reg | (phy << PHYAD_SHIFT5));
1999 pcn_bcr_write(sc, LE_BCR340x0022, val);
2000}
2001
2002/*
2003 * pcn_mii_statchg: [mii interface function]
2004 *
2005 * Callback from MII layer when media changes.
2006 */
2007void
2008pcn_mii_statchg(struct device *self)
2009{
2010 struct pcn_softc *sc = (void *) self;
2011
2012 if ((sc->sc_mii.mii_media_active & IFM_FDX0x0000010000000000ULL) != 0)
2013 pcn_bcr_write(sc, LE_BCR90x0009, LE_B9_FDEN0x0001);
2014 else
2015 pcn_bcr_write(sc, LE_BCR90x0009, 0);
2016}