Bug Summary

File:dev/pci/if_msk.c
Warning:line 1974, column 3
Access to field 'sk_softc' results in a dereference of a null pointer (loaded from variable 'sc_if0')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name if_msk.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/if_msk.c
1/* $OpenBSD: if_msk.c,v 1.143 2023/11/10 15:51:20 bluhm Exp $ */
2
3/*
4 * Copyright (c) 1997, 1998, 1999, 2000
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $
35 */
36
37/*
38 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
39 *
40 * Permission to use, copy, modify, and distribute this software for any
41 * purpose with or without fee is hereby granted, provided that the above
42 * copyright notice and this permission notice appear in all copies.
43 *
44 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
45 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
46 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
47 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
48 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
49 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
50 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
51 */
52
53/*
54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55 * the SK-984x series adapters, both single port and dual port.
56 * References:
57 * The XaQti XMAC II datasheet,
58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59 * The SysKonnect GEnesis manual, http://www.syskonnect.com
60 *
61 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63 * convenience to others until Vitesse corrects this problem:
64 *
65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66 *
67 * Written by Bill Paul <wpaul@ee.columbia.edu>
68 * Department of Electrical Engineering
69 * Columbia University, New York City
70 */
71
72/*
73 * The SysKonnect gigabit ethernet adapters consist of two main
74 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
75 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
76 * components and a PHY while the GEnesis controller provides a PCI
77 * interface with DMA support. Each card may have between 512K and
78 * 2MB of SRAM on board depending on the configuration.
79 *
80 * The SysKonnect GEnesis controller can have either one or two XMAC
81 * chips connected to it, allowing single or dual port NIC configurations.
82 * SysKonnect has the distinction of being the only vendor on the market
83 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
84 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
85 * XMAC registers. This driver takes advantage of these features to allow
86 * both XMACs to operate as independent interfaces.
87 */
88
89#include "bpfilter.h"
90#include "kstat.h"
91
92#include <sys/param.h>
93#include <sys/systm.h>
94#include <sys/sockio.h>
95#include <sys/mbuf.h>
96#include <sys/malloc.h>
97#include <sys/kernel.h>
98#include <sys/socket.h>
99#include <sys/timeout.h>
100#include <sys/device.h>
101#include <sys/queue.h>
102
103#include <net/if.h>
104
105#include <netinet/in.h>
106#include <netinet/if_ether.h>
107
108#include <net/if_media.h>
109
110#if NBPFILTER1 > 0
111#include <net/bpf.h>
112#endif
113
114#if NKSTAT1 > 0
115#include <sys/kstat.h>
116#endif
117
118#include <dev/mii/mii.h>
119#include <dev/mii/miivar.h>
120
121#include <dev/pci/pcireg.h>
122#include <dev/pci/pcivar.h>
123#include <dev/pci/pcidevs.h>
124
125#include <dev/pci/if_skreg.h>
126#include <dev/pci/if_mskvar.h>
127
128#define MSK_STATUS_OWN_SHIFT63 63
129#define MSK_STATUS_OWN_MASK0x1 0x1
130#define MSK_STATUS_OPCODE_SHIFT56 56
131#define MSK_STATUS_OPCODE_MASK0x7f 0x7f
132
133#define MSK_STATUS_OWN(_d)(((_d) >> 63) & 0x1) \
134 (((_d) >> MSK_STATUS_OWN_SHIFT63) & MSK_STATUS_OWN_MASK0x1)
135#define MSK_STATUS_OPCODE(_d)(((_d) >> 56) & 0x7f) \
136 (((_d) >> MSK_STATUS_OPCODE_SHIFT56) & MSK_STATUS_OPCODE_MASK0x7f)
137
138#define MSK_STATUS_OPCODE_RXSTAT0x60 0x60
139#define MSK_STATUS_OPCODE_RXTIMESTAMP0x61 0x61
140#define MSK_STATUS_OPCODE_RXVLAN0x62 0x62
141#define MSK_STATUS_OPCODE_RXCKSUM0x64 0x64
142#define MSK_STATUS_OPCODE_RXCKSUMVLAN(0x62 | 0x64) \
143 (MSK_STATUS_OPCODE_RXVLAN0x62 | MSK_STATUS_OPCODE_RXCKSUM0x64)
144#define MSK_STATUS_OPCODE_RXTIMEVLAN(0x62 | 0x61) \
145 (MSK_STATUS_OPCODE_RXVLAN0x62 | MSK_STATUS_OPCODE_RXTIMESTAMP0x61)
146#define MSK_STATUS_OPCODE_RSS_HASH0x65 0x65
147#define MSK_STATUS_OPCODE_TXIDX0x68 0x68
148#define MSK_STATUS_OPCODE_MACSEC0x6c 0x6c
149#define MSK_STATUS_OPCODE_PUTIDX0x70 0x70
150
151#define MSK_STATUS_RXSTAT_PORT_SHIFT48 48
152#define MSK_STATUS_RXSTAT_PORT_MASK0x1 0x1
153#define MSK_STATUS_RXSTAT_LEN_SHIFT32 32
154#define MSK_STATUS_RXSTAT_LEN_MASK0xffff 0xffff
155#define MSK_STATUS_RXSTAT_STATUS_SHIFT0 0
156#define MSK_STATUS_RXSTAT_STATUS_MASK0xffffffff 0xffffffff
157
158#define MSK_STATUS_RXSTAT_PORT(_d)(((_d) >> 48) & 0x1) \
159 (((_d) >> MSK_STATUS_RXSTAT_PORT_SHIFT48) & MSK_STATUS_RXSTAT_PORT_MASK0x1)
160#define MSK_STATUS_RXSTAT_LEN(_d)(((_d) >> 32) & 0xffff) \
161 (((_d) >> MSK_STATUS_RXSTAT_LEN_SHIFT32) & MSK_STATUS_RXSTAT_LEN_MASK0xffff)
162#define MSK_STATUS_RXSTAT_STATUS(_d)(((_d) >> 0) & 0xffffffff) \
163 (((_d) >> MSK_STATUS_RXSTAT_STATUS_SHIFT0) & MSK_STATUS_RXSTAT_STATUS_MASK0xffffffff)
164
165#define MSK_STATUS_TXIDX_PORTA_SHIFT0 0
166#define MSK_STATUS_TXIDX_PORTA_MASK0xfff 0xfff
167#define MSK_STATUS_TXIDX_PORTB_SHIFT24 24
168#define MSK_STATUS_TXIDX_PORTB_MASK0xfff 0xfff
169
170#define MSK_STATUS_TXIDX_PORTA(_d)(((_d) >> 0) & 0xfff) \
171 (((_d) >> MSK_STATUS_TXIDX_PORTA_SHIFT0) & MSK_STATUS_TXIDX_PORTA_MASK0xfff)
172#define MSK_STATUS_TXIDX_PORTB(_d)(((_d) >> 24) & 0xfff) \
173 (((_d) >> MSK_STATUS_TXIDX_PORTB_SHIFT24) & MSK_STATUS_TXIDX_PORTB_MASK0xfff)
174
175int mskc_probe(struct device *, void *, void *);
176void mskc_attach(struct device *, struct device *self, void *aux);
177int mskc_detach(struct device *, int);
178int mskc_activate(struct device *, int);
179void mskc_reset(struct sk_softc *);
180int msk_probe(struct device *, void *, void *);
181void msk_attach(struct device *, struct device *self, void *aux);
182int msk_detach(struct device *, int);
183int msk_activate(struct device *, int);
184void msk_reset(struct sk_if_softc *);
185int mskcprint(void *, const char *);
186int msk_intr(void *);
187void msk_intr_yukon(struct sk_if_softc *);
188static inline int msk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
189void msk_rxeof(struct sk_if_softc *, struct mbuf_list *, uint16_t, uint32_t);
190void msk_txeof(struct sk_if_softc *, unsigned int);
191static unsigned int msk_encap(struct sk_if_softc *, struct mbuf *, uint32_t);
192void msk_start(struct ifnet *);
193int msk_ioctl(struct ifnet *, u_long, caddr_t);
194void msk_init(void *);
195void msk_init_yukon(struct sk_if_softc *);
196void msk_stop(struct sk_if_softc *, int);
197void msk_watchdog(struct ifnet *);
198int msk_ifmedia_upd(struct ifnet *);
199void msk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
200static int msk_newbuf(struct sk_if_softc *);
201int msk_init_rx_ring(struct sk_if_softc *);
202int msk_init_tx_ring(struct sk_if_softc *);
203void msk_fill_rx_ring(struct sk_if_softc *);
204
205int msk_miibus_readreg(struct device *, int, int);
206void msk_miibus_writereg(struct device *, int, int, int);
207void msk_miibus_statchg(struct device *);
208
209void msk_iff(struct sk_if_softc *);
210void msk_tick(void *);
211void msk_fill_rx_tick(void *);
212
213#ifdef MSK_DEBUG
214#define DPRINTF(x) if (mskdebug) printf x
215#define DPRINTFN(n,x) if (mskdebug >= (n)) printf x
216int mskdebug = 0;
217
218void msk_dump_txdesc(struct msk_tx_desc *, int);
219void msk_dump_mbuf(struct mbuf *);
220void msk_dump_bytes(const char *, int);
221#else
222#define DPRINTF(x)
223#define DPRINTFN(n,x)
224#endif
225
226#if NKSTAT1 > 0
227struct msk_mib {
228 const char *name;
229 uint32_t reg;
230 enum kstat_kv_type type;
231 enum kstat_kv_unit unit;
232};
233
234#define C32 KSTAT_KV_T_COUNTER32
235#define C64 KSTAT_KV_T_COUNTER64
236
237#define PKTS KSTAT_KV_U_PACKETS
238#define BYTES KSTAT_KV_U_BYTES
239#define NONE KSTAT_KV_U_NONE
240
241static const struct msk_mib msk_mib[] = {
242 { "InUnicasts", 0x100, C32, PKTS },
243 { "InBroadcasts", 0x108, C32, PKTS },
244 { "InPause", 0x110, C32, PKTS },
245 { "InMulticasts", 0x118, C32, PKTS },
246 { "InFCSErr", 0x120, C32, PKTS },
247 { "InGoodOctets", 0x130, C64, BYTES },
248 { "InBadOctets", 0x140, C64, BYTES },
249 { "Undersize", 0x150, C32, PKTS },
250 { "Fragments", 0x158, C32, PKTS },
251 { "In64Octets", 0x160, C32, PKTS },
252 { "In127Octets", 0x168, C32, PKTS },
253 { "In255Octets", 0x170, C32, PKTS },
254 { "In511Octets", 0x178, C32, PKTS },
255 { "In1023Octets", 0x180, C32, PKTS },
256 { "In1518Octets", 0x188, C32, PKTS },
257 { "InMaxOctets", 0x190, C32, PKTS },
258 { "OverSize", 0x198, C32, PKTS },
259 { "Jabber", 0x1a8, C32, PKTS },
260 { "Overflow", 0x1b0, C32, PKTS },
261
262 { "OutUnicasts", 0x1c0, C32, PKTS },
263 { "OutBroadcasts", 0x1c8, C32, PKTS },
264 { "OutPause", 0x1d0, C32, PKTS },
265 { "OutMulticasts", 0x1d8, C32, PKTS },
266 { "OutOctets", 0x1e0, C64, BYTES },
267 { "Out64Octets", 0x1f0, C32, PKTS },
268 { "Out127Octets", 0x1f8, C32, PKTS },
269 { "Out255Octets", 0x200, C32, PKTS },
270 { "Out511Octets", 0x208, C32, PKTS },
271 { "Out1023Octets", 0x210, C32, PKTS },
272 { "Out1518Octets", 0x218, C32, PKTS },
273 { "OutMaxOctets", 0x220, C32, PKTS },
274 { "Collisions", 0x230, C32, NONE },
275 { "Late", 0x238, C32, NONE },
276 { "Excessive", 0x240, C32, PKTS },
277 { "Multiple", 0x248, C32, PKTS },
278 { "Single", 0x250, C32, PKTS },
279 { "Underflow", 0x258, C32, PKTS },
280};
281
282#undef C32
283#undef C64
284
285#undef PKTS
286#undef BYTES
287#undef NONE
288
289struct msk_kstat {
290 struct rwlock lock;
291 struct kstat *ks;
292};
293
294static uint32_t msk_mib_read32(struct sk_if_softc *, uint32_t);
295static uint64_t msk_mib_read64(struct sk_if_softc *, uint32_t);
296
297void msk_kstat_attach(struct sk_if_softc *);
298void msk_kstat_detach(struct sk_if_softc *);
299int msk_kstat_read(struct kstat *ks);
300#endif
301
302/* supported device vendors */
303const struct pci_matchid mskc_devices[] = {
304 { PCI_VENDOR_DLINK0x1186, PCI_PRODUCT_DLINK_DGE550SX0x4001 },
305 { PCI_VENDOR_DLINK0x1186, PCI_PRODUCT_DLINK_DGE550T_B10x4b03 },
306 { PCI_VENDOR_DLINK0x1186, PCI_PRODUCT_DLINK_DGE560SX0x4b02 },
307 { PCI_VENDOR_DLINK0x1186, PCI_PRODUCT_DLINK_DGE560T0x4b00 },
308 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_8021CU0x4340 },
309 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_8021X0x4344 },
310 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_8022CU0x4341 },
311 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_8022X0x4345 },
312 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80350x4350 },
313 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80360x4351 },
314 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80380x4352 },
315 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80390x4353 },
316 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80400x4354 },
317 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_8040T0x4355 },
318 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80420x4357 },
319 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80480x435a },
320 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80500x4361 },
321 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80520x4360 },
322 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80530x4362 },
323 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80550x4363 },
324 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_8055_20x436d },
325 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80560x4364 },
326 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80570x4380 },
327 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80580x436a },
328 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80590x4381 },
329 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_8061CU0x4342 },
330 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_8061X0x4346 },
331 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_8062CU0x4343 },
332 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_8062X0x4347 },
333 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80700x4365 },
334 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80710x436b },
335 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80720x436c },
336 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80750x4370 },
337 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_80790x4382 },
338 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_C0320x4367 },
339 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_C0330x4356 },
340 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_C0340x4368 },
341 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_C0360x4366 },
342 { PCI_VENDOR_MARVELL0x11ab, PCI_PRODUCT_MARVELL_YUKON_C0420x4369 },
343 { PCI_VENDOR_SCHNEIDERKOCH0x1148, PCI_PRODUCT_SCHNEIDERKOCH_SK9EXX0x9e00 },
344 { PCI_VENDOR_SCHNEIDERKOCH0x1148, PCI_PRODUCT_SCHNEIDERKOCH_SK9SXX0x9000 }
345};
346
347static inline u_int32_t
348sk_win_read_4(struct sk_softc *sc, u_int32_t reg)
349{
350 return CSR_READ_4(sc, reg)(((sc)->sk_btag)->read_4(((sc)->sk_bhandle), ((reg))
))
;
351}
352
353static inline u_int16_t
354sk_win_read_2(struct sk_softc *sc, u_int32_t reg)
355{
356 return CSR_READ_2(sc, reg)(((sc)->sk_btag)->read_2(((sc)->sk_bhandle), ((reg))
))
;
357}
358
359static inline u_int8_t
360sk_win_read_1(struct sk_softc *sc, u_int32_t reg)
361{
362 return CSR_READ_1(sc, reg)(((sc)->sk_btag)->read_1(((sc)->sk_bhandle), ((reg))
))
;
363}
364
365static inline void
366sk_win_write_4(struct sk_softc *sc, u_int32_t reg, u_int32_t x)
367{
368 CSR_WRITE_4(sc, reg, x)(((sc)->sk_btag)->write_4(((sc)->sk_bhandle), ((reg)
), ((x))))
;
369}
370
371static inline void
372sk_win_write_2(struct sk_softc *sc, u_int32_t reg, u_int16_t x)
373{
374 CSR_WRITE_2(sc, reg, x)(((sc)->sk_btag)->write_2(((sc)->sk_bhandle), ((reg)
), ((x))))
;
375}
376
377static inline void
378sk_win_write_1(struct sk_softc *sc, u_int32_t reg, u_int8_t x)
379{
380 CSR_WRITE_1(sc, reg, x)(((sc)->sk_btag)->write_1(((sc)->sk_bhandle), ((reg)
), ((x))))
;
381}
382
383int
384msk_miibus_readreg(struct device *dev, int phy, int reg)
385{
386 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
387 u_int16_t val;
388 int i;
389
390 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |sk_win_write_2((sc_if)->sk_softc, (((0x0080)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), ((((phy) &
0x1f) << 11) | (((reg) & 0x1f) << 6) | 0x0020
))
391 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ)sk_win_write_2((sc_if)->sk_softc, (((0x0080)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), ((((phy) &
0x1f) << 11) | (((reg) & 0x1f) << 6) | 0x0020
))
;
392
393 for (i = 0; i < SK_TIMEOUT1000; i++) {
394 DELAY(1)(*delay_func)(1);
395 val = SK_YU_READ_2(sc_if, YUKON_SMICR)sk_win_read_2((sc_if)->sk_softc, (((0x0080)) + 0x2800 + ((
((sc_if))->sk_port) * (0x3800 - 0x2800))))
;
396 if (val & YU_SMICR_READ_VALID0x0010)
397 break;
398 }
399
400 if (i == SK_TIMEOUT1000) {
401 printf("%s: phy failed to come ready\n",
402 sc_if->sk_dev.dv_xname);
403 return (0);
404 }
405
406 DPRINTFN(9, ("msk_miibus_readreg: i=%d, timeout=%d\n", i,
407 SK_TIMEOUT));
408
409 val = SK_YU_READ_2(sc_if, YUKON_SMIDR)sk_win_read_2((sc_if)->sk_softc, (((0x0084)) + 0x2800 + ((
((sc_if))->sk_port) * (0x3800 - 0x2800))))
;
410
411 DPRINTFN(9, ("msk_miibus_readreg phy=%d, reg=%#x, val=%#x\n",
412 phy, reg, val));
413
414 return (val);
415}
416
417void
418msk_miibus_writereg(struct device *dev, int phy, int reg, int val)
419{
420 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
421 int i;
422
423 DPRINTFN(9, ("msk_miibus_writereg phy=%d reg=%#x val=%#x\n",
424 phy, reg, val));
425
426 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val)sk_win_write_2((sc_if)->sk_softc, (((0x0084)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), (val))
;
427 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |sk_win_write_2((sc_if)->sk_softc, (((0x0080)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), ((((phy) &
0x1f) << 11) | (((reg) & 0x1f) << 6) | 0x0000
))
428 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE)sk_win_write_2((sc_if)->sk_softc, (((0x0080)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), ((((phy) &
0x1f) << 11) | (((reg) & 0x1f) << 6) | 0x0000
))
;
429
430 for (i = 0; i < SK_TIMEOUT1000; i++) {
431 DELAY(1)(*delay_func)(1);
432 if (!(SK_YU_READ_2(sc_if, YUKON_SMICR)sk_win_read_2((sc_if)->sk_softc, (((0x0080)) + 0x2800 + ((
((sc_if))->sk_port) * (0x3800 - 0x2800))))
& YU_SMICR_BUSY0x0008))
433 break;
434 }
435
436 if (i == SK_TIMEOUT1000)
437 printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname);
438}
439
440void
441msk_miibus_statchg(struct device *dev)
442{
443 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
444 struct mii_data *mii = &sc_if->sk_mii;
445 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
446 int gpcr;
447
448 gpcr = SK_YU_READ_2(sc_if, YUKON_GPCR)sk_win_read_2((sc_if)->sk_softc, (((0x0004)) + 0x2800 + ((
((sc_if))->sk_port) * (0x3800 - 0x2800))))
;
449 gpcr &= (YU_GPCR_TXEN0x1000 | YU_GPCR_RXEN0x0800);
450
451 if (IFM_SUBTYPE(ife->ifm_media)((ife->ifm_media) & 0x00000000000000ffULL) != IFM_AUTO0ULL ||
452 sc_if->sk_softc->sk_type == SK_YUKON_FE_P0xB8) {
453 /* Set speed. */
454 gpcr |= YU_GPCR_SPEED_DIS0x0001;
455 switch (IFM_SUBTYPE(mii->mii_media_active)((mii->mii_media_active) & 0x00000000000000ffULL)) {
456 case IFM_1000_SX11:
457 case IFM_1000_LX14:
458 case IFM_1000_CX15:
459 case IFM_1000_T16:
460 gpcr |= (YU_GPCR_GIG0x0080 | YU_GPCR_SPEED0x0008);
461 break;
462 case IFM_100_TX6:
463 gpcr |= YU_GPCR_SPEED0x0008;
464 break;
465 }
466
467 /* Set duplex. */
468 gpcr |= YU_GPCR_DPLX_DIS0x0004;
469 if ((mii->mii_media_active & IFM_GMASK0x00ffff0000000000ULL) == IFM_FDX0x0000010000000000ULL)
470 gpcr |= YU_GPCR_DUPLEX0x0020;
471
472 /* Disable flow control. */
473 gpcr |= YU_GPCR_FCTL_DIS0x0002;
474 gpcr |= (YU_GPCR_FCTL_TX_DIS0x2000 | YU_GPCR_FCTL_RX_DIS0x0010);
475 }
476
477 SK_YU_WRITE_2(sc_if, YUKON_GPCR, gpcr)sk_win_write_2((sc_if)->sk_softc, (((0x0004)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), (gpcr))
;
478
479 DPRINTFN(9, ("msk_miibus_statchg: gpcr=%x\n",
480 SK_YU_READ_2(((struct sk_if_softc *)dev), YUKON_GPCR)));
481}
482
483void
484msk_iff(struct sk_if_softc *sc_if)
485{
486 struct ifnet *ifp = &sc_if->arpcom.ac_if;
487 struct arpcom *ac = &sc_if->arpcom;
488 struct ether_multi *enm;
489 struct ether_multistep step;
490 u_int32_t hashes[2];
491 u_int16_t rcr;
492 int h;
493
494 rcr = SK_YU_READ_2(sc_if, YUKON_RCR)sk_win_read_2((sc_if)->sk_softc, (((0x000c)) + 0x2800 + ((
((sc_if))->sk_port) * (0x3800 - 0x2800))))
;
495 rcr &= ~(YU_RCR_MUFLEN0x4000 | YU_RCR_UFLEN0x8000);
496 ifp->if_flags &= ~IFF_ALLMULTI0x200;
497
498 /*
499 * Always accept frames destined to our station address.
500 */
501 rcr |= YU_RCR_UFLEN0x8000;
502
503 if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0) {
504 ifp->if_flags |= IFF_ALLMULTI0x200;
505 if (ifp->if_flags & IFF_PROMISC0x100)
506 rcr &= ~YU_RCR_UFLEN0x8000;
507 else
508 rcr |= YU_RCR_MUFLEN0x4000;
509 hashes[0] = hashes[1] = 0xFFFFFFFF;
510 } else {
511 rcr |= YU_RCR_MUFLEN0x4000;
512 /* Program new filter. */
513 bzero(hashes, sizeof(hashes))__builtin_bzero((hashes), (sizeof(hashes)));
514
515 ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first
); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step
)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while
( 0)
;
516 while (enm != NULL((void *)0)) {
517 h = ether_crc32_be(enm->enm_addrlo,
518 ETHER_ADDR_LEN6) & ((1 << SK_HASH_BITS6) - 1);
519
520 if (h < 32)
521 hashes[0] |= (1 << h);
522 else
523 hashes[1] |= (1 << (h - 32));
524
525 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
526 }
527 }
528
529 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff)sk_win_write_2((sc_if)->sk_softc, (((0x0034)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), (hashes[0] &
0xffff))
;
530 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff)sk_win_write_2((sc_if)->sk_softc, (((0x0038)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), ((hashes[0] >>
16) & 0xffff))
;
531 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff)sk_win_write_2((sc_if)->sk_softc, (((0x003c)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), (hashes[1] &
0xffff))
;
532 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff)sk_win_write_2((sc_if)->sk_softc, (((0x0040)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), ((hashes[1] >>
16) & 0xffff))
;
533 SK_YU_WRITE_2(sc_if, YUKON_RCR, rcr)sk_win_write_2((sc_if)->sk_softc, (((0x000c)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), (rcr))
;
534}
535
536int
537msk_init_rx_ring(struct sk_if_softc *sc_if)
538{
539 struct msk_ring_data *rd = sc_if->sk_rdata;
540 struct msk_rx_desc *r;
541
542 memset(rd->sk_rx_ring, 0, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT)__builtin_memset((rd->sk_rx_ring), (0), (sizeof(struct msk_rx_desc
) * 512))
;
543
544 r = &rd->sk_rx_ring[0];
545 r->sk_addr = htole32(0)((__uint32_t)(0));
546 r->sk_opcode = SK_Y2_RXOPC_OWN0x80 | SK_Y2_RXOPC_ADDR640x21;
547
548 sc_if->sk_cdata.sk_rx_prod = 1;
549 sc_if->sk_cdata.sk_rx_cons = 0;
550 sc_if->sk_cdata.sk_rx_hiaddr = 0;
551
552 /*
553 * up to two ring entries per packet, so the effective ring size is
554 * halved
555 */
556 if_rxr_init(&sc_if->sk_cdata.sk_rx_ring, 2, (MSK_RX_RING_CNT512/2) - 1);
557
558 msk_fill_rx_ring(sc_if);
559 return (0);
560}
561
562int
563msk_init_tx_ring(struct sk_if_softc *sc_if)
564{
565 struct sk_softc *sc = sc_if->sk_softc;
566 struct msk_ring_data *rd = sc_if->sk_rdata;
567 struct msk_tx_desc *t;
568 int i;
569
570 memset(rd->sk_tx_ring, 0, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT)__builtin_memset((rd->sk_tx_ring), (0), (sizeof(struct msk_tx_desc
) * 512))
;
571
572 for (i = 0; i < MSK_TX_RING_CNT512; i++) {
573 if (bus_dmamap_create(sc->sc_dmatag, sc_if->sk_pktlen,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
sc_if->sk_pktlen), (30), (sc_if->sk_pktlen), (0), (0x0001
| 0x0002 | 0x2000), (&sc_if->sk_cdata.sk_tx_maps[i]))
574 SK_NTXSEG, sc_if->sk_pktlen, 0,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
sc_if->sk_pktlen), (30), (sc_if->sk_pktlen), (0), (0x0001
| 0x0002 | 0x2000), (&sc_if->sk_cdata.sk_tx_maps[i]))
575 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
sc_if->sk_pktlen), (30), (sc_if->sk_pktlen), (0), (0x0001
| 0x0002 | 0x2000), (&sc_if->sk_cdata.sk_tx_maps[i]))
576 &sc_if->sk_cdata.sk_tx_maps[i])(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
sc_if->sk_pktlen), (30), (sc_if->sk_pktlen), (0), (0x0001
| 0x0002 | 0x2000), (&sc_if->sk_cdata.sk_tx_maps[i]))
)
577 return (ENOBUFS55);
578 }
579
580 t = &rd->sk_tx_ring[0];
581 t->sk_addr = htole32(0)((__uint32_t)(0));
582 t->sk_opcode = SK_Y2_TXOPC_OWN0x80 | SK_Y2_TXOPC_ADDR640x21;
583
584 sc_if->sk_cdata.sk_tx_prod = 1;
585 sc_if->sk_cdata.sk_tx_cons = 0;
586 sc_if->sk_cdata.sk_tx_hiaddr = 0;
587
588 MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT, BUS_DMASYNC_PREWRITE)do { int __x, __n; __x = (0); __n = (512); if ((__x + __n) >
512) { (*((sc_if)->sk_softc->sc_dmatag)->_dmamap_sync
)(((sc_if)->sk_softc->sc_dmatag), ((sc_if)->sk_ring_map
), (__builtin_offsetof(struct msk_ring_data, sk_tx_ring[(__x)
])), (sizeof(struct msk_tx_desc) * (512 - __x)), ((0x04))); __n
-= (512 - __x); __x = 0; } (*((sc_if)->sk_softc->sc_dmatag
)->_dmamap_sync)(((sc_if)->sk_softc->sc_dmatag), ((sc_if
)->sk_ring_map), (__builtin_offsetof(struct msk_ring_data,
sk_tx_ring[((__x))])), (sizeof(struct msk_tx_desc) * __n), (
(0x04))); } while ( 0)
;
589
590 return (0);
591}
592
593static int
594msk_newbuf(struct sk_if_softc *sc_if)
595{
596 struct msk_ring_data *rd = sc_if->sk_rdata;
597 struct msk_rx_desc *r;
598 struct mbuf *m;
599 bus_dmamap_t map;
600 uint64_t addr;
601 uint32_t prod, head;
602 uint32_t hiaddr;
603 unsigned int pktlen = sc_if->sk_pktlen + ETHER_ALIGN2;
604
605 m = MCLGETL(NULL, M_DONTWAIT, pktlen)m_clget((((void *)0)), (0x0002), (pktlen));
606 if (m == NULL((void *)0))
607 return (0);
608 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = pktlen;
609 m_adj(m, ETHER_ALIGN2);
610
611 prod = sc_if->sk_cdata.sk_rx_prod;
612 map = sc_if->sk_cdata.sk_rx_maps[prod];
613
614 if (bus_dmamap_load_mbuf(sc_if->sk_softc->sc_dmatag, map, m,(*(sc_if->sk_softc->sc_dmatag)->_dmamap_load_mbuf)((
sc_if->sk_softc->sc_dmatag), (map), (m), (0x0200|0x0001
))
615 BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc_if->sk_softc->sc_dmatag)->_dmamap_load_mbuf)((
sc_if->sk_softc->sc_dmatag), (map), (m), (0x0200|0x0001
))
!= 0) {
616 m_freem(m);
617 return (0);
618 }
619
620 bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, map, 0,(*(sc_if->sk_softc->sc_dmatag)->_dmamap_sync)((sc_if
->sk_softc->sc_dmatag), (map), (0), (map->dm_mapsize
), (0x01))
621 map->dm_mapsize, BUS_DMASYNC_PREREAD)(*(sc_if->sk_softc->sc_dmatag)->_dmamap_sync)((sc_if
->sk_softc->sc_dmatag), (map), (0), (map->dm_mapsize
), (0x01))
;
622
623 head = prod;
624
625 /* high 32 bits of address */
626 addr = map->dm_segs[0].ds_addr;
627 hiaddr = addr >> 32;
628 if (sc_if->sk_cdata.sk_rx_hiaddr != hiaddr) {
629 r = &rd->sk_rx_ring[prod];
630 htolem32(&r->sk_addr, hiaddr)(*(__uint32_t *)(&r->sk_addr) = ((__uint32_t)(hiaddr))
)
;
631 r->sk_len = htole16(0)((__uint16_t)(0));
632 r->sk_ctl = 0;
633 r->sk_opcode = SK_Y2_RXOPC_OWN0x80 | SK_Y2_RXOPC_ADDR640x21;
634
635 sc_if->sk_cdata.sk_rx_hiaddr = hiaddr;
636
637 SK_INC(prod, MSK_RX_RING_CNT)(prod) = (prod + 1) % 512;
638 }
639
640 r = &rd->sk_rx_ring[prod];
641 htolem32(&r->sk_addr, addr)(*(__uint32_t *)(&r->sk_addr) = ((__uint32_t)(addr)));
642 htolem16(&r->sk_len, map->dm_segs[0].ds_len)(*(__uint16_t *)(&r->sk_len) = ((__uint16_t)(map->dm_segs
[0].ds_len)))
;
643 r->sk_ctl = 0;
644 r->sk_opcode = SK_Y2_RXOPC_OWN0x80 | SK_Y2_RXOPC_PACKET0x41;
645
646 sc_if->sk_cdata.sk_rx_maps[head] = sc_if->sk_cdata.sk_rx_maps[prod];
647 sc_if->sk_cdata.sk_rx_maps[prod] = map;
648
649 sc_if->sk_cdata.sk_rx_mbuf[prod] = m;
650
651 SK_INC(prod, MSK_RX_RING_CNT)(prod) = (prod + 1) % 512;
652 sc_if->sk_cdata.sk_rx_prod = prod;
653
654 return (1);
655}
656
657/*
658 * Set media options.
659 */
660int
661msk_ifmedia_upd(struct ifnet *ifp)
662{
663 struct sk_if_softc *sc_if = ifp->if_softc;
664
665 mii_mediachg(&sc_if->sk_mii);
666 return (0);
667}
668
669/*
670 * Report current media status.
671 */
672void
673msk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
674{
675 struct sk_if_softc *sc_if = ifp->if_softc;
676
677 mii_pollstat(&sc_if->sk_mii);
678 ifmr->ifm_active = sc_if->sk_mii.mii_media_active;
679 ifmr->ifm_status = sc_if->sk_mii.mii_media_status;
680}
681
682int
683msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
684{
685 struct sk_if_softc *sc_if = ifp->if_softc;
686 struct ifreq *ifr = (struct ifreq *) data;
687 struct mii_data *mii;
688 int s, error = 0;
689
690 s = splnet()splraise(0x4);
691
692 switch(command) {
693 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
694 ifp->if_flags |= IFF_UP0x1;
695 if (!(ifp->if_flags & IFF_RUNNING0x40))
696 msk_init(sc_if);
697 break;
698
699 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
700 if (ifp->if_flags & IFF_UP0x1) {
701 if (ifp->if_flags & IFF_RUNNING0x40)
702 error = ENETRESET52;
703 else
704 msk_init(sc_if);
705 } else {
706 if (ifp->if_flags & IFF_RUNNING0x40)
707 msk_stop(sc_if, 0);
708 }
709 break;
710
711 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
712 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
713 mii = &sc_if->sk_mii;
714 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
715 break;
716
717 case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((170)))
:
718 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data,
719 NULL((void *)0), sc_if->sk_pktlen, &sc_if->sk_cdata.sk_rx_ring);
720 break;
721
722 default:
723 error = ether_ioctl(ifp, &sc_if->arpcom, command, data);
724 }
725
726 if (error == ENETRESET52) {
727 if (ifp->if_flags & IFF_RUNNING0x40)
728 msk_iff(sc_if);
729 error = 0;
730 }
731
732 splx(s)spllower(s);
733 return (error);
734}
735
736/*
737 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
738 * IDs against our list and return a device name if we find a match.
739 */
740int
741mskc_probe(struct device *parent, void *match, void *aux)
742{
743 return (pci_matchbyid((struct pci_attach_args *)aux, mskc_devices,
744 nitems(mskc_devices)(sizeof((mskc_devices)) / sizeof((mskc_devices)[0]))));
745}
746
747/*
748 * Force the GEnesis into reset, then bring it out of reset.
749 */
750void
751mskc_reset(struct sk_softc *sc)
752{
753 u_int32_t imtimer_ticks, reg1;
754 int reg;
755 unsigned int i;
756
757 DPRINTFN(2, ("mskc_reset\n"));
758
759 CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_RESET)(((sc)->sk_btag)->write_1(((sc)->sk_bhandle), ((0x0004
)), ((0x0001))))
;
760 CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_RESET)(((sc)->sk_btag)->write_1(((sc)->sk_bhandle), ((0x0004
)), ((0x0004))))
;
761
762 DELAY(1000)(*delay_func)(1000);
763 CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_UNRESET)(((sc)->sk_btag)->write_1(((sc)->sk_bhandle), ((0x0004
)), ((0x0002))))
;
764 DELAY(2)(*delay_func)(2);
765 CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_UNRESET)(((sc)->sk_btag)->write_1(((sc)->sk_bhandle), ((0x0004
)), ((0x0008))))
;
766
767 sk_win_write_1(sc, SK_TESTCTL10x0158, 2);
768
769 if (sc->sk_type == SK_YUKON_EC_U0xB4 || sc->sk_type == SK_YUKON_EX0xB5 ||
770 sc->sk_type >= SK_YUKON_FE_P0xB8) {
771 /* enable all clocks. */
772 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG3)((0x0080) + 0x1c00), 0);
773 reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG4)((0x0084) + 0x1c00));
774 reg1 &= (SK_Y2_REG4_FORCE_ASPM_REQUEST0x00008000|
775 SK_Y2_REG4_ASPM_GPHY_LINK_DOWN0x00004000|
776 SK_Y2_REG4_ASPM_INT_FIFO_EMPTY0x00002000|
777 SK_Y2_REG4_ASPM_CLKRUN_REQUEST0x00001000);
778 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG4)((0x0084) + 0x1c00), reg1);
779
780 reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG5)((0x0088) + 0x1c00));
781 reg1 &= SK_Y2_REG5_TIM_VMAIN_AV_MASK0x18000000;
782 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG5)((0x0088) + 0x1c00), reg1);
783 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_CFGREG1)((0x0094) + 0x1c00), 0);
784
785 /*
786 * Disable status race, workaround for Yukon EC Ultra &
787 * Yukon EX.
788 */
789 reg1 = sk_win_read_4(sc, SK_GPIO0x015C);
790 reg1 |= SK_Y2_GPIO_STAT_RACE_DIS0x00002000;
791 sk_win_write_4(sc, SK_GPIO0x015C, reg1);
792 sk_win_read_4(sc, SK_GPIO0x015C);
793 }
794
795 reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG1)((0x0040) + 0x1c00));
796 if (sc->sk_type == SK_YUKON_XL0xB3 && sc->sk_rev > SK_YUKON_XL_REV_A10x1)
797 reg1 |= (SK_Y2_REG1_PHY1_COMA0x10000000 | SK_Y2_REG1_PHY2_COMA0x20000000);
798 else
799 reg1 &= ~(SK_Y2_REG1_PHY1_COMA0x10000000 | SK_Y2_REG1_PHY2_COMA0x20000000);
800 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG1)((0x0040) + 0x1c00), reg1);
801
802 if (sc->sk_type == SK_YUKON_XL0xB3 && sc->sk_rev > SK_YUKON_XL_REV_A10x1)
803 sk_win_write_1(sc, SK_Y2_CLKGATE0x011D,
804 SK_Y2_CLKGATE_LINK1_GATE_DIS0x04 |
805 SK_Y2_CLKGATE_LINK2_GATE_DIS0x40 |
806 SK_Y2_CLKGATE_LINK1_CORE_DIS0x02 |
807 SK_Y2_CLKGATE_LINK2_CORE_DIS0x20 |
808 SK_Y2_CLKGATE_LINK1_PCI_DIS0x01 | SK_Y2_CLKGATE_LINK2_PCI_DIS0x10);
809 else
810 sk_win_write_1(sc, SK_Y2_CLKGATE0x011D, 0);
811
812 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET)(((sc)->sk_btag)->write_2(((sc)->sk_bhandle), ((0x0f10
)), ((0x0001))))
;
813 CSR_WRITE_2(sc, SK_LINK_CTRL + SK_WIN_LEN, SK_LINK_RESET_SET)(((sc)->sk_btag)->write_2(((sc)->sk_bhandle), ((0x0f10
+ 0x80)), ((0x0001))))
;
814 DELAY(1000)(*delay_func)(1000);
815 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR)(((sc)->sk_btag)->write_2(((sc)->sk_bhandle), ((0x0f10
)), ((0x0002))))
;
816 CSR_WRITE_2(sc, SK_LINK_CTRL + SK_WIN_LEN, SK_LINK_RESET_CLEAR)(((sc)->sk_btag)->write_2(((sc)->sk_bhandle), ((0x0f10
+ 0x80)), ((0x0002))))
;
817
818 if (sc->sk_type == SK_YUKON_EX0xB5 || sc->sk_type == SK_YUKON_SUPR0xB9) {
819 CSR_WRITE_2(sc, SK_GMAC_CTRL, SK_GMAC_BYP_MACSECRX |(((sc)->sk_btag)->write_2(((sc)->sk_bhandle), ((0x0f00
)), ((0x00002000 | 0x00000800 | 0x00000200))))
820 SK_GMAC_BYP_MACSECTX | SK_GMAC_BYP_RETR_FIFO)(((sc)->sk_btag)->write_2(((sc)->sk_bhandle), ((0x0f00
)), ((0x00002000 | 0x00000800 | 0x00000200))))
;
821 }
822
823 sk_win_write_1(sc, SK_TESTCTL10x0158, 1);
824
825 DPRINTFN(2, ("mskc_reset: sk_csr=%x\n", CSR_READ_1(sc, SK_CSR)));
826 DPRINTFN(2, ("mskc_reset: sk_link_ctrl=%x\n",
827 CSR_READ_2(sc, SK_LINK_CTRL)));
828
829 /* Disable ASF */
830 CSR_WRITE_1(sc, SK_Y2_ASF_CSR, SK_Y2_ASF_RESET)(((sc)->sk_btag)->write_1(((sc)->sk_bhandle), ((0x0e68
)), ((0x08))))
;
831 CSR_WRITE_2(sc, SK_CSR, SK_CSR_ASF_OFF)(((sc)->sk_btag)->write_2(((sc)->sk_bhandle), ((0x0004
)), ((0x1000))))
;
832
833 /* Clear I2C IRQ noise */
834 CSR_WRITE_4(sc, SK_I2CHWIRQ, 1)(((sc)->sk_btag)->write_4(((sc)->sk_bhandle), ((0x0168
)), ((1))))
;
835
836 /* Disable hardware timer */
837 CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_STOP)(((sc)->sk_btag)->write_1(((sc)->sk_bhandle), ((0x0138
)), ((0x02))))
;
838 CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_IRQ_CLEAR)(((sc)->sk_btag)->write_1(((sc)->sk_bhandle), ((0x0138
)), ((0x01))))
;
839
840 /* Disable descriptor polling */
841 CSR_WRITE_4(sc, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP)(((sc)->sk_btag)->write_4(((sc)->sk_bhandle), ((0x0e08
)), ((0x0001))))
;
842
843 /* Disable time stamps */
844 CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_STOP)(((sc)->sk_btag)->write_1(((sc)->sk_bhandle), ((0x0e18
)), ((0x02))))
;
845 CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_IRQ_CLEAR)(((sc)->sk_btag)->write_1(((sc)->sk_bhandle), ((0x0e18
)), ((0x01))))
;
846
847 /* Enable RAM interface */
848 sk_win_write_1(sc, SK_RAMCTL0x01A0, SK_RAMCTL_UNRESET0x0002);
849 for (reg = SK_TO00x0190;reg <= SK_TO110x019B; reg++)
850 sk_win_write_1(sc, reg, 36);
851 sk_win_write_1(sc, SK_RAMCTL0x01A0 + (SK_WIN_LEN0x80 / 2), SK_RAMCTL_UNRESET0x0002);
852 for (reg = SK_TO00x0190;reg <= SK_TO110x019B; reg++)
853 sk_win_write_1(sc, reg + (SK_WIN_LEN0x80 / 2), 36);
854
855 /*
856 * Configure interrupt moderation. The moderation timer
857 * defers interrupts specified in the interrupt moderation
858 * timer mask based on the timeout specified in the interrupt
859 * moderation timer init register. Each bit in the timer
860 * register represents one tick, so to specify a timeout in
861 * microseconds, we have to multiply by the correct number of
862 * ticks-per-microsecond.
863 */
864 switch (sc->sk_type) {
865 case SK_YUKON_EC0xB6:
866 case SK_YUKON_EC_U0xB4:
867 case SK_YUKON_EX0xB5:
868 case SK_YUKON_SUPR0xB9:
869 case SK_YUKON_ULTRA20xBA:
870 case SK_YUKON_OPTIMA0xBC:
871 case SK_YUKON_PRM0xBD:
872 case SK_YUKON_OPTIMA20xBE:
873 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC125;
874 break;
875 case SK_YUKON_FE0xB7:
876 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_FE100;
877 break;
878 case SK_YUKON_FE_P0xB8:
879 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_FE_P50;
880 break;
881 case SK_YUKON_XL0xB3:
882 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_XL156;
883 break;
884 default:
885 imtimer_ticks = SK_IMTIMER_TICKS_YUKON78;
886 break;
887 }
888
889 /* Reset status ring. */
890 for (i = 0; i < MSK_STATUS_RING_CNT2048; i++)
891 sc->sk_status_ring[i] = htole64(0)((__uint64_t)(0));
892 sc->sk_status_idx = 0;
893
894 sk_win_write_4(sc, SK_STAT_BMU_CSR0x0e80, SK_STAT_BMU_RESET0x00000001);
895 sk_win_write_4(sc, SK_STAT_BMU_CSR0x0e80, SK_STAT_BMU_UNRESET0x00000002);
896
897 sk_win_write_2(sc, SK_STAT_BMU_LIDX0x0e84, MSK_STATUS_RING_CNT2048 - 1);
898 sk_win_write_4(sc, SK_STAT_BMU_ADDRLO0x0e88,
899 sc->sk_status_map->dm_segs[0].ds_addr);
900 sk_win_write_4(sc, SK_STAT_BMU_ADDRHI0x0e8c,
901 (u_int64_t)sc->sk_status_map->dm_segs[0].ds_addr >> 32);
902 sk_win_write_2(sc, SK_STAT_BMU_TX_THRESH0x0e98, 10);
903 sk_win_write_1(sc, SK_STAT_BMU_FIFOWM0x0eac, 16);
904 sk_win_write_1(sc, SK_STAT_BMU_FIFOIWM0x0ead, 16);
905
906#if 0
907 sk_win_write_4(sc, SK_Y2_LEV_ITIMERINIT0x0eb0, SK_IM_USECS(100)((100) * imtimer_ticks));
908 sk_win_write_4(sc, SK_Y2_TX_ITIMERINIT0x0ec0, SK_IM_USECS(1000)((1000) * imtimer_ticks));
909 sk_win_write_4(sc, SK_Y2_ISR_ITIMERINIT0x0ed0, SK_IM_USECS(20)((20) * imtimer_ticks));
910#else
911 sk_win_write_4(sc, SK_Y2_ISR_ITIMERINIT0x0ed0, SK_IM_USECS(4)((4) * imtimer_ticks));
912#endif
913
914 sk_win_write_4(sc, SK_STAT_BMU_CSR0x0e80, SK_STAT_BMU_ON0x00000008);
915
916 sk_win_write_1(sc, SK_Y2_LEV_ITIMERCTL0x0eb8, SK_IMCTL_START0x04);
917 sk_win_write_1(sc, SK_Y2_TX_ITIMERCTL0x0ec8, SK_IMCTL_START0x04);
918 sk_win_write_1(sc, SK_Y2_ISR_ITIMERCTL0x0ed8, SK_IMCTL_START0x04);
919}
920
921int
922msk_probe(struct device *parent, void *match, void *aux)
923{
924 struct skc_attach_args *sa = aux;
925
926 if (sa->skc_port != SK_PORT_A0 && sa->skc_port != SK_PORT_B1)
927 return (0);
928
929 switch (sa->skc_type) {
930 case SK_YUKON_XL0xB3:
931 case SK_YUKON_EC_U0xB4:
932 case SK_YUKON_EX0xB5:
933 case SK_YUKON_EC0xB6:
934 case SK_YUKON_FE0xB7:
935 case SK_YUKON_FE_P0xB8:
936 case SK_YUKON_SUPR0xB9:
937 case SK_YUKON_ULTRA20xBA:
938 case SK_YUKON_OPTIMA0xBC:
939 case SK_YUKON_PRM0xBD:
940 case SK_YUKON_OPTIMA20xBE:
941 return (1);
942 }
943
944 return (0);
945}
946
947void
948msk_reset(struct sk_if_softc *sc_if)
949{
950 /* GMAC and GPHY Reset */
951 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET)sk_win_write_4(sc_if->sk_softc, 0x0f00 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000001)
;
952 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET)sk_win_write_4(sc_if->sk_softc, 0x0f04 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000001)
;
953 DELAY(1000)(*delay_func)(1000);
954 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_CLEAR)sk_win_write_4(sc_if->sk_softc, 0x0f04 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000002)
;
955 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |sk_win_write_4(sc_if->sk_softc, 0x0f00 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000010 | 0x00000008 | 0x00000002)
956 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR)sk_win_write_4(sc_if->sk_softc, 0x0f00 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000010 | 0x00000008 | 0x00000002)
;
957}
958
959/*
960 * Each XMAC chip is attached as a separate logical IP interface.
961 * Single port cards will have only one logical interface of course.
962 */
963void
964msk_attach(struct device *parent, struct device *self, void *aux)
965{
966 struct sk_if_softc *sc_if = (struct sk_if_softc *)self;
967 struct sk_softc *sc = (struct sk_softc *)parent;
968 struct skc_attach_args *sa = aux;
969 struct ifnet *ifp;
970 caddr_t kva;
971 int i;
972 u_int32_t chunk;
973 int mii_flags;
974 int error;
975
976 sc_if->sk_port = sa->skc_port;
977 sc_if->sk_softc = sc;
978 sc->sk_if[sa->skc_port] = sc_if;
979
980 DPRINTFN(2, ("begin msk_attach: port=%d\n", sc_if->sk_port));
981
982 /*
983 * Get station address for this interface. Note that
984 * dual port cards actually come with three station
985 * addresses: one for each port, plus an extra. The
986 * extra one is used by the SysKonnect driver software
987 * as a 'virtual' station address for when both ports
988 * are operating in failover mode. Currently we don't
989 * use this extra address.
990 */
991 for (i = 0; i < ETHER_ADDR_LEN6; i++)
992 sc_if->arpcom.ac_enaddr[i] =
993 sk_win_read_1(sc, SK_MAC0_00x0100 + (sa->skc_port * 8) + i);
994
995 printf(": address %s\n",
996 ether_sprintf(sc_if->arpcom.ac_enaddr));
997
998 /*
999 * Set up RAM buffer addresses. The Yukon2 has a small amount
1000 * of SRAM on it, somewhere between 4K and 48K. We need to
1001 * divide this up between the transmitter and receiver. We
1002 * give the receiver 2/3 of the memory (rounded down), and the
1003 * transmitter whatever remains.
1004 */
1005 chunk = (2 * (sc->sk_ramsize / sizeof(u_int64_t)) / 3) & ~0xff;
1006 sc_if->sk_rx_ramstart = 0;
1007 sc_if->sk_rx_ramend = sc_if->sk_rx_ramstart + chunk - 1;
1008 chunk = (sc->sk_ramsize / sizeof(u_int64_t)) - chunk;
1009 sc_if->sk_tx_ramstart = sc_if->sk_rx_ramend + 1;
1010 sc_if->sk_tx_ramend = sc_if->sk_tx_ramstart + chunk - 1;
1011
1012 DPRINTFN(2, ("msk_attach: rx_ramstart=%#x rx_ramend=%#x\n"
1013 " tx_ramstart=%#x tx_ramend=%#x\n",
1014 sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend,
1015 sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend));
1016
1017 /* Allocate the descriptor queues. */
1018 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct msk_ring_data),(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), (
sizeof(struct msk_ring_data)), ((1 << 12)), (0), (&
sc_if->sk_ring_seg), (1), (&sc_if->sk_ring_nseg), (
0x0001 | 0x1000))
1019 PAGE_SIZE, 0, &sc_if->sk_ring_seg, 1, &sc_if->sk_ring_nseg,(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), (
sizeof(struct msk_ring_data)), ((1 << 12)), (0), (&
sc_if->sk_ring_seg), (1), (&sc_if->sk_ring_nseg), (
0x0001 | 0x1000))
1020 BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), (
sizeof(struct msk_ring_data)), ((1 << 12)), (0), (&
sc_if->sk_ring_seg), (1), (&sc_if->sk_ring_nseg), (
0x0001 | 0x1000))
) {
1021 printf(": can't alloc rx buffers\n");
1022 goto fail;
1023 }
1024 if (bus_dmamem_map(sc->sc_dmatag, &sc_if->sk_ring_seg,(*(sc->sc_dmatag)->_dmamem_map)((sc->sc_dmatag), (&
sc_if->sk_ring_seg), (sc_if->sk_ring_nseg), (sizeof(struct
msk_ring_data)), (&kva), (0x0001))
1025 sc_if->sk_ring_nseg,(*(sc->sc_dmatag)->_dmamem_map)((sc->sc_dmatag), (&
sc_if->sk_ring_seg), (sc_if->sk_ring_nseg), (sizeof(struct
msk_ring_data)), (&kva), (0x0001))
1026 sizeof(struct msk_ring_data), &kva, BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamem_map)((sc->sc_dmatag), (&
sc_if->sk_ring_seg), (sc_if->sk_ring_nseg), (sizeof(struct
msk_ring_data)), (&kva), (0x0001))
) {
1027 printf(": can't map dma buffers (%lu bytes)\n",
1028 (ulong)sizeof(struct msk_ring_data));
1029 goto fail_1;
1030 }
1031 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct msk_ring_data), 1,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
sizeof(struct msk_ring_data)), (1), (sizeof(struct msk_ring_data
)), (0), (0x0001 | 0x0002 | 0x2000), (&sc_if->sk_ring_map
))
1032 sizeof(struct msk_ring_data), 0,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
sizeof(struct msk_ring_data)), (1), (sizeof(struct msk_ring_data
)), (0), (0x0001 | 0x0002 | 0x2000), (&sc_if->sk_ring_map
))
1033 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
sizeof(struct msk_ring_data)), (1), (sizeof(struct msk_ring_data
)), (0), (0x0001 | 0x0002 | 0x2000), (&sc_if->sk_ring_map
))
1034 &sc_if->sk_ring_map)(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
sizeof(struct msk_ring_data)), (1), (sizeof(struct msk_ring_data
)), (0), (0x0001 | 0x0002 | 0x2000), (&sc_if->sk_ring_map
))
) {
1035 printf(": can't create dma map\n");
1036 goto fail_2;
1037 }
1038 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_ring_map, kva,(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc_if
->sk_ring_map), (kva), (sizeof(struct msk_ring_data)), (((
void *)0)), (0x0001))
1039 sizeof(struct msk_ring_data), NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc_if
->sk_ring_map), (kva), (sizeof(struct msk_ring_data)), (((
void *)0)), (0x0001))
) {
1040 printf(": can't load dma map\n");
1041 goto fail_3;
1042 }
1043 sc_if->sk_rdata = (struct msk_ring_data *)kva;
1044
1045 if (sc->sk_type != SK_YUKON_FE0xB7 &&
1046 sc->sk_type != SK_YUKON_FE_P0xB8)
1047 sc_if->sk_pktlen = SK_JLEN(9018 + 2);
1048 else
1049 sc_if->sk_pktlen = MCLBYTES(1 << 11);
1050
1051 for (i = 0; i < MSK_RX_RING_CNT512; i++) {
1052 if ((error = bus_dmamap_create(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
sc_if->sk_pktlen), (1), (sc_if->sk_pktlen), (0), (0x0001
| 0x0002 | 0x2000), (&sc_if->sk_cdata.sk_rx_maps[i]))
1053 sc_if->sk_pktlen, 1, sc_if->sk_pktlen, 0,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
sc_if->sk_pktlen), (1), (sc_if->sk_pktlen), (0), (0x0001
| 0x0002 | 0x2000), (&sc_if->sk_cdata.sk_rx_maps[i]))
1054 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
sc_if->sk_pktlen), (1), (sc_if->sk_pktlen), (0), (0x0001
| 0x0002 | 0x2000), (&sc_if->sk_cdata.sk_rx_maps[i]))
1055 &sc_if->sk_cdata.sk_rx_maps[i])(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
sc_if->sk_pktlen), (1), (sc_if->sk_pktlen), (0), (0x0001
| 0x0002 | 0x2000), (&sc_if->sk_cdata.sk_rx_maps[i]))
) != 0) {
1056 printf("\n%s: unable to create rx DMA map %d, "
1057 "error = %d\n", sc->sk_dev.dv_xname, i, error);
1058 goto fail_4;
1059 }
1060 }
1061
1062 ifp = &sc_if->arpcom.ac_if;
1063 ifp->if_softc = sc_if;
1064 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
1065 ifp->if_ioctl = msk_ioctl;
1066 ifp->if_start = msk_start;
1067 ifp->if_watchdog = msk_watchdog;
1068 if (sc->sk_type != SK_YUKON_FE0xB7 &&
1069 sc->sk_type != SK_YUKON_FE_P0xB8)
1070 ifp->if_hardmtu = SK_JUMBO_MTU(9018 - ((6 * 2) + 2) - 4);
1071 ifq_init_maxlen(&ifp->if_snd, MSK_TX_RING_CNT512 - 1);
1072 bcopy(sc_if->sk_dev.dv_xname, ifp->if_xname, IFNAMSIZ16);
1073
1074 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010;
1075
1076 msk_reset(sc_if);
1077
1078 /*
1079 * Do miibus setup.
1080 */
1081 msk_init_yukon(sc_if);
1082
1083 DPRINTFN(2, ("msk_attach: 1\n"));
1084
1085 sc_if->sk_mii.mii_ifp = ifp;
1086 sc_if->sk_mii.mii_readreg = msk_miibus_readreg;
1087 sc_if->sk_mii.mii_writereg = msk_miibus_writereg;
1088 sc_if->sk_mii.mii_statchg = msk_miibus_statchg;
1089
1090 ifmedia_init(&sc_if->sk_mii.mii_media, 0,
1091 msk_ifmedia_upd, msk_ifmedia_sts);
1092 mii_flags = MIIF_DOPAUSE0x0100;
1093 if (sc->sk_fibertype)
1094 mii_flags |= MIIF_HAVEFIBER0x0020;
1095 mii_attach(self, &sc_if->sk_mii, 0xffffffff, 0,
1096 MII_OFFSET_ANY-1, mii_flags);
1097 if (LIST_FIRST(&sc_if->sk_mii.mii_phys)((&sc_if->sk_mii.mii_phys)->lh_first) == NULL((void *)0)) {
1098 printf("%s: no PHY found!\n", sc_if->sk_dev.dv_xname);
1099 ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_MANUAL1ULL,
1100 0, NULL((void *)0));
1101 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_MANUAL1ULL);
1102 } else
1103 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL);
1104
1105 timeout_set(&sc_if->sk_tick_ch, msk_tick, sc_if);
1106 timeout_set(&sc_if->sk_tick_rx, msk_fill_rx_tick, sc_if);
1107
1108 /*
1109 * Call MI attach routines.
1110 */
1111 if_attach(ifp);
1112 ether_ifattach(ifp);
1113
1114#if NKSTAT1 > 0
1115 msk_kstat_attach(sc_if);
1116#endif
1117
1118 DPRINTFN(2, ("msk_attach: end\n"));
1119 return;
1120
1121fail_4:
1122 for (i = 0; i < MSK_RX_RING_CNT512; i++) {
1123 if (sc_if->sk_cdata.sk_rx_maps[i] != NULL((void *)0))
1124 bus_dmamap_destroy(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag),
(sc_if->sk_cdata.sk_rx_maps[i]))
1125 sc_if->sk_cdata.sk_rx_maps[i])(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag),
(sc_if->sk_cdata.sk_rx_maps[i]))
;
1126 }
1127
1128fail_3:
1129 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag),
(sc_if->sk_ring_map))
;
1130fail_2:
1131 bus_dmamem_unmap(sc->sc_dmatag, kva, sizeof(struct msk_ring_data))(*(sc->sc_dmatag)->_dmamem_unmap)((sc->sc_dmatag), (
kva), (sizeof(struct msk_ring_data)))
;
1132fail_1:
1133 bus_dmamem_free(sc->sc_dmatag, &sc_if->sk_ring_seg, sc_if->sk_ring_nseg)(*(sc->sc_dmatag)->_dmamem_free)((sc->sc_dmatag), (&
sc_if->sk_ring_seg), (sc_if->sk_ring_nseg))
;
1134fail:
1135 sc->sk_if[sa->skc_port] = NULL((void *)0);
1136}
1137
1138int
1139msk_detach(struct device *self, int flags)
1140{
1141 struct sk_if_softc *sc_if = (struct sk_if_softc *)self;
1142 struct sk_softc *sc = sc_if->sk_softc;
1143 struct ifnet *ifp= &sc_if->arpcom.ac_if;
1144
1145 if (sc->sk_if[sc_if->sk_port] == NULL((void *)0))
1146 return (0);
1147
1148 msk_stop(sc_if, 1);
1149
1150#if NKSTAT1 > 0
1151 msk_kstat_detach(sc_if);
1152#endif
1153
1154 /* Detach any PHYs we might have. */
1155 if (LIST_FIRST(&sc_if->sk_mii.mii_phys)((&sc_if->sk_mii.mii_phys)->lh_first) != NULL((void *)0))
1156 mii_detach(&sc_if->sk_mii, MII_PHY_ANY-1, MII_OFFSET_ANY-1);
1157
1158 /* Delete any remaining media. */
1159 ifmedia_delete_instance(&sc_if->sk_mii.mii_media, IFM_INST_ANY((uint64_t) -1));
1160
1161 ether_ifdetach(ifp);
1162 if_detach(ifp);
1163
1164 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc_if->sk_rdata,(*(sc->sc_dmatag)->_dmamem_unmap)((sc->sc_dmatag), (
(caddr_t)sc_if->sk_rdata), (sizeof(struct msk_ring_data)))
1165 sizeof(struct msk_ring_data))(*(sc->sc_dmatag)->_dmamem_unmap)((sc->sc_dmatag), (
(caddr_t)sc_if->sk_rdata), (sizeof(struct msk_ring_data)))
;
1166 bus_dmamem_free(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamem_free)((sc->sc_dmatag), (&
sc_if->sk_ring_seg), (sc_if->sk_ring_nseg))
1167 &sc_if->sk_ring_seg, sc_if->sk_ring_nseg)(*(sc->sc_dmatag)->_dmamem_free)((sc->sc_dmatag), (&
sc_if->sk_ring_seg), (sc_if->sk_ring_nseg))
;
1168 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag),
(sc_if->sk_ring_map))
;
1169 sc->sk_if[sc_if->sk_port] = NULL((void *)0);
1170
1171 return (0);
1172}
1173
1174int
1175msk_activate(struct device *self, int act)
1176{
1177 struct sk_if_softc *sc_if = (void *)self;
1178 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1179 int rv = 0;
1180
1181 switch (act) {
1182 case DVACT_RESUME4:
1183 msk_reset(sc_if);
1184 if (ifp->if_flags & IFF_RUNNING0x40)
1185 msk_init(sc_if);
1186 break;
1187 default:
1188 rv = config_activate_children(self, act);
1189 break;
1190 }
1191 return (rv);
1192}
1193
1194int
1195mskcprint(void *aux, const char *pnp)
1196{
1197 struct skc_attach_args *sa = aux;
1198
1199 if (pnp)
1200 printf("msk port %c at %s",
1201 (sa->skc_port == SK_PORT_A0) ? 'A' : 'B', pnp);
1202 else
1203 printf(" port %c", (sa->skc_port == SK_PORT_A0) ? 'A' : 'B');
1204 return (UNCONF1);
1205}
1206
1207/*
1208 * Attach the interface. Allocate softc structures, do ifmedia
1209 * setup and ethernet/BPF attach.
1210 */
1211void
1212mskc_attach(struct device *parent, struct device *self, void *aux)
1213{
1214 struct sk_softc *sc = (struct sk_softc *)self;
1215 struct pci_attach_args *pa = aux;
1216 struct skc_attach_args skca;
1217 pci_chipset_tag_t pc = pa->pa_pc;
1218 pcireg_t memtype;
1219 pci_intr_handle_t ih;
1220 const char *intrstr = NULL((void *)0);
1221 u_int8_t hw, pmd;
1222 char *revstr = NULL((void *)0);
1223 caddr_t kva;
1224
1225 DPRINTFN(2, ("begin mskc_attach\n"));
1226
1227 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D00x0000);
1228
1229 /*
1230 * Map control/status registers.
1231 */
1232 memtype = pci_mapreg_type(pc, pa->pa_tag, SK_PCI_LOMEM0x0010);
1233 if (pci_mapreg_map(pa, SK_PCI_LOMEM0x0010, memtype, 0, &sc->sk_btag,
1234 &sc->sk_bhandle, NULL((void *)0), &sc->sk_bsize, 0)) {
1235 printf(": can't map mem space\n");
1236 return;
1237 }
1238
1239 sc->sc_dmatag = pa->pa_dmat;
1240
1241 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER0x011B);
1242 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG0x011A) >> 4);
1243
1244 /* bail out here if chip is not recognized */
1245 if (!(SK_IS_YUKON2(sc)((sc)->sk_type >= 0xB3 && (sc)->sk_type <=
0xBE)
)) {
1246 printf(": unknown chip type: %d\n", sc->sk_type);
1247 goto fail_1;
1248 }
1249 DPRINTFN(2, ("mskc_attach: allocate interrupt\n"));
1250
1251 if (PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff) == PCI_VENDOR_MARVELL0x11ab) {
1252 switch (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff)) {
1253 case PCI_PRODUCT_MARVELL_YUKON_80360x4351:
1254 case PCI_PRODUCT_MARVELL_YUKON_80530x4362:
1255 pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED0x20;
1256 }
1257 }
1258
1259 /* Allocate interrupt */
1260 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
1261 printf(": couldn't map interrupt\n");
1262 goto fail_1;
1263 }
1264
1265 intrstr = pci_intr_string(pc, ih);
1266 sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET0x4, msk_intr, sc,
1267 self->dv_xname);
1268 if (sc->sk_intrhand == NULL((void *)0)) {
1269 printf(": couldn't establish interrupt");
1270 if (intrstr != NULL((void *)0))
1271 printf(" at %s", intrstr);
1272 printf("\n");
1273 goto fail_1;
1274 }
1275 sc->sk_pc = pc;
1276
1277 if (bus_dmamem_alloc(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), (
2048 * sizeof(uint64_t)), (2048 * sizeof(uint64_t)), (0), (&
sc->sk_status_seg), (1), (&sc->sk_status_nseg), (0x0001
| 0x1000))
1278 MSK_STATUS_RING_CNT * sizeof(uint64_t),(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), (
2048 * sizeof(uint64_t)), (2048 * sizeof(uint64_t)), (0), (&
sc->sk_status_seg), (1), (&sc->sk_status_nseg), (0x0001
| 0x1000))
1279 MSK_STATUS_RING_CNT * sizeof(uint64_t),(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), (
2048 * sizeof(uint64_t)), (2048 * sizeof(uint64_t)), (0), (&
sc->sk_status_seg), (1), (&sc->sk_status_nseg), (0x0001
| 0x1000))
1280 0, &sc->sk_status_seg, 1, &sc->sk_status_nseg,(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), (
2048 * sizeof(uint64_t)), (2048 * sizeof(uint64_t)), (0), (&
sc->sk_status_seg), (1), (&sc->sk_status_nseg), (0x0001
| 0x1000))
1281 BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(sc->sc_dmatag)->_dmamem_alloc)((sc->sc_dmatag), (
2048 * sizeof(uint64_t)), (2048 * sizeof(uint64_t)), (0), (&
sc->sk_status_seg), (1), (&sc->sk_status_nseg), (0x0001
| 0x1000))
) {
1282 printf(": can't alloc status buffers\n");
1283 goto fail_2;
1284 }
1285
1286 if (bus_dmamem_map(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamem_map)((sc->sc_dmatag), (&
sc->sk_status_seg), (sc->sk_status_nseg), (2048 * sizeof
(uint64_t)), (&kva), (0x0001))
1287 &sc->sk_status_seg, sc->sk_status_nseg,(*(sc->sc_dmatag)->_dmamem_map)((sc->sc_dmatag), (&
sc->sk_status_seg), (sc->sk_status_nseg), (2048 * sizeof
(uint64_t)), (&kva), (0x0001))
1288 MSK_STATUS_RING_CNT * sizeof(uint64_t),(*(sc->sc_dmatag)->_dmamem_map)((sc->sc_dmatag), (&
sc->sk_status_seg), (sc->sk_status_nseg), (2048 * sizeof
(uint64_t)), (&kva), (0x0001))
1289 &kva, BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamem_map)((sc->sc_dmatag), (&
sc->sk_status_seg), (sc->sk_status_nseg), (2048 * sizeof
(uint64_t)), (&kva), (0x0001))
) {
1290 printf(": can't map dma buffers (%zu bytes)\n",
1291 MSK_STATUS_RING_CNT2048 * sizeof(uint64_t));
1292 goto fail_3;
1293 }
1294 if (bus_dmamap_create(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
2048 * sizeof(uint64_t)), (1), (2048 * sizeof(uint64_t)), (0)
, (0x0001 | 0x0002 | 0x2000), (&sc->sk_status_map))
1295 MSK_STATUS_RING_CNT * sizeof(uint64_t), 1,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
2048 * sizeof(uint64_t)), (1), (2048 * sizeof(uint64_t)), (0)
, (0x0001 | 0x0002 | 0x2000), (&sc->sk_status_map))
1296 MSK_STATUS_RING_CNT * sizeof(uint64_t), 0,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
2048 * sizeof(uint64_t)), (1), (2048 * sizeof(uint64_t)), (0)
, (0x0001 | 0x0002 | 0x2000), (&sc->sk_status_map))
1297 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
2048 * sizeof(uint64_t)), (1), (2048 * sizeof(uint64_t)), (0)
, (0x0001 | 0x0002 | 0x2000), (&sc->sk_status_map))
1298 &sc->sk_status_map)(*(sc->sc_dmatag)->_dmamap_create)((sc->sc_dmatag), (
2048 * sizeof(uint64_t)), (1), (2048 * sizeof(uint64_t)), (0)
, (0x0001 | 0x0002 | 0x2000), (&sc->sk_status_map))
) {
1299 printf(": can't create dma map\n");
1300 goto fail_4;
1301 }
1302 if (bus_dmamap_load(sc->sc_dmatag, sc->sk_status_map, kva,(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc
->sk_status_map), (kva), (2048 * sizeof(uint64_t)), (((void
*)0)), (0x0001))
1303 MSK_STATUS_RING_CNT * sizeof(uint64_t),(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc
->sk_status_map), (kva), (2048 * sizeof(uint64_t)), (((void
*)0)), (0x0001))
1304 NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamap_load)((sc->sc_dmatag), (sc
->sk_status_map), (kva), (2048 * sizeof(uint64_t)), (((void
*)0)), (0x0001))
) {
1305 printf(": can't load dma map\n");
1306 goto fail_5;
1307 }
1308 sc->sk_status_ring = (uint64_t *)kva;
1309
1310 /* Reset the adapter. */
1311 mskc_reset(sc);
1312
1313 sc->sk_ramsize = sk_win_read_1(sc, SK_EPROM00x011C) * 4096;
1314 DPRINTFN(2, ("mskc_attach: ramsize=%dK\n", sc->sk_ramsize / 1024));
1315
1316 pmd = sk_win_read_1(sc, SK_PMDTYPE0x0119);
1317 if (pmd == 'L' || pmd == 'S' || pmd == 'P')
1318 sc->sk_fibertype = 1;
1319
1320 switch (sc->sk_type) {
1321 case SK_YUKON_XL0xB3:
1322 sc->sk_name = "Yukon-2 XL";
1323 break;
1324 case SK_YUKON_EC_U0xB4:
1325 sc->sk_name = "Yukon-2 EC Ultra";
1326 break;
1327 case SK_YUKON_EX0xB5:
1328 sc->sk_name = "Yukon-2 Extreme";
1329 break;
1330 case SK_YUKON_EC0xB6:
1331 sc->sk_name = "Yukon-2 EC";
1332 break;
1333 case SK_YUKON_FE0xB7:
1334 sc->sk_name = "Yukon-2 FE";
1335 break;
1336 case SK_YUKON_FE_P0xB8:
1337 sc->sk_name = "Yukon-2 FE+";
1338 break;
1339 case SK_YUKON_SUPR0xB9:
1340 sc->sk_name = "Yukon-2 Supreme";
1341 break;
1342 case SK_YUKON_ULTRA20xBA:
1343 sc->sk_name = "Yukon-2 Ultra 2";
1344 break;
1345 case SK_YUKON_OPTIMA0xBC:
1346 sc->sk_name = "Yukon-2 Optima";
1347 break;
1348 case SK_YUKON_PRM0xBD:
1349 sc->sk_name = "Yukon-2 Optima Prime";
1350 break;
1351 case SK_YUKON_OPTIMA20xBE:
1352 sc->sk_name = "Yukon-2 Optima 2";
1353 break;
1354 default:
1355 sc->sk_name = "Yukon (Unknown)";
1356 }
1357
1358 if (sc->sk_type == SK_YUKON_XL0xB3) {
1359 switch (sc->sk_rev) {
1360 case SK_YUKON_XL_REV_A00x0:
1361 revstr = "A0";
1362 break;
1363 case SK_YUKON_XL_REV_A10x1:
1364 revstr = "A1";
1365 break;
1366 case SK_YUKON_XL_REV_A20x2:
1367 revstr = "A2";
1368 break;
1369 case SK_YUKON_XL_REV_A30x3:
1370 revstr = "A3";
1371 break;
1372 default:
1373 ;
1374 }
1375 }
1376
1377 if (sc->sk_type == SK_YUKON_EC0xB6) {
1378 switch (sc->sk_rev) {
1379 case SK_YUKON_EC_REV_A10x0:
1380 revstr = "A1";
1381 break;
1382 case SK_YUKON_EC_REV_A20x1:
1383 revstr = "A2";
1384 break;
1385 case SK_YUKON_EC_REV_A30x2:
1386 revstr = "A3";
1387 break;
1388 default:
1389 ;
1390 }
1391 }
1392
1393 if (sc->sk_type == SK_YUKON_EC_U0xB4) {
1394 switch (sc->sk_rev) {
1395 case SK_YUKON_EC_U_REV_A00x1:
1396 revstr = "A0";
1397 break;
1398 case SK_YUKON_EC_U_REV_A10x2:
1399 revstr = "A1";
1400 break;
1401 case SK_YUKON_EC_U_REV_B00x3:
1402 revstr = "B0";
1403 break;
1404 case SK_YUKON_EC_U_REV_B10x5:
1405 revstr = "B1";
1406 break;
1407 default:
1408 ;
1409 }
1410 }
1411
1412 if (sc->sk_type == SK_YUKON_FE0xB7) {
1413 switch (sc->sk_rev) {
1414 case SK_YUKON_FE_REV_A10x1:
1415 revstr = "A1";
1416 break;
1417 case SK_YUKON_FE_REV_A20x2:
1418 revstr = "A2";
1419 break;
1420 default:
1421 ;
1422 }
1423 }
1424
1425 if (sc->sk_type == SK_YUKON_FE_P0xB8 && sc->sk_rev == SK_YUKON_FE_P_REV_A00x0)
1426 revstr = "A0";
1427
1428 if (sc->sk_type == SK_YUKON_EX0xB5) {
1429 switch (sc->sk_rev) {
1430 case SK_YUKON_EX_REV_A00x1:
1431 revstr = "A0";
1432 break;
1433 case SK_YUKON_EX_REV_B00x2:
1434 revstr = "B0";
1435 break;
1436 default:
1437 ;
1438 }
1439 }
1440
1441 if (sc->sk_type == SK_YUKON_SUPR0xB9) {
1442 switch (sc->sk_rev) {
1443 case SK_YUKON_SUPR_REV_A00x0:
1444 revstr = "A0";
1445 break;
1446 case SK_YUKON_SUPR_REV_B00x1:
1447 revstr = "B0";
1448 break;
1449 case SK_YUKON_SUPR_REV_B10x3:
1450 revstr = "B1";
1451 break;
1452 default:
1453 ;
1454 }
1455 }
1456
1457 if (sc->sk_type == SK_YUKON_PRM0xBD) {
1458 switch (sc->sk_rev) {
1459 case SK_YUKON_PRM_REV_Z10x1:
1460 revstr = "Z1";
1461 break;
1462 case SK_YUKON_PRM_REV_A00x2:
1463 revstr = "A0";
1464 break;
1465 default:
1466 ;
1467 }
1468 }
1469
1470 /* Announce the product name. */
1471 printf(", %s", sc->sk_name);
1472 if (revstr != NULL((void *)0))
1473 printf(" rev. %s", revstr);
1474 printf(" (0x%x): %s\n", sc->sk_rev, intrstr);
1475
1476 sc->sk_macs = 1;
1477
1478 hw = sk_win_read_1(sc, SK_Y2_HWRES0x011E);
1479 if ((hw & SK_Y2_HWRES_LINK_MASK(0x01 | 0x02)) == SK_Y2_HWRES_LINK_DUAL(0x01 | 0x02)) {
1480 if ((sk_win_read_1(sc, SK_Y2_CLKGATE0x011D) &
1481 SK_Y2_CLKGATE_LINK2_INACTIVE0x80) == 0)
1482 sc->sk_macs++;
1483 }
1484
1485 skca.skc_port = SK_PORT_A0;
1486 skca.skc_type = sc->sk_type;
1487 skca.skc_rev = sc->sk_rev;
1488 (void)config_found(&sc->sk_dev, &skca, mskcprint)config_found_sm((&sc->sk_dev), (&skca), (mskcprint
), ((void *)0))
;
1489
1490 if (sc->sk_macs > 1) {
1491 skca.skc_port = SK_PORT_B1;
1492 skca.skc_type = sc->sk_type;
1493 skca.skc_rev = sc->sk_rev;
1494 (void)config_found(&sc->sk_dev, &skca, mskcprint)config_found_sm((&sc->sk_dev), (&skca), (mskcprint
), ((void *)0))
;
1495 }
1496
1497 /* Turn on the 'driver is loaded' LED. */
1498 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON)(((sc)->sk_btag)->write_2(((sc)->sk_bhandle), ((0x0006
)), ((0x02))))
;
1499
1500 return;
1501
1502fail_4:
1503 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sk_status_ring,(*(sc->sc_dmatag)->_dmamem_unmap)((sc->sc_dmatag), (
(caddr_t)sc->sk_status_ring), (2048 * sizeof(uint64_t)))
1504 MSK_STATUS_RING_CNT * sizeof(uint64_t))(*(sc->sc_dmatag)->_dmamem_unmap)((sc->sc_dmatag), (
(caddr_t)sc->sk_status_ring), (2048 * sizeof(uint64_t)))
;
1505fail_3:
1506 bus_dmamem_free(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamem_free)((sc->sc_dmatag), (&
sc->sk_status_seg), (sc->sk_status_nseg))
1507 &sc->sk_status_seg, sc->sk_status_nseg)(*(sc->sc_dmatag)->_dmamem_free)((sc->sc_dmatag), (&
sc->sk_status_seg), (sc->sk_status_nseg))
;
1508 sc->sk_status_nseg = 0;
1509fail_5:
1510 bus_dmamap_destroy(sc->sc_dmatag, sc->sk_status_map)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag),
(sc->sk_status_map))
;
1511fail_2:
1512 pci_intr_disestablish(sc->sk_pc, sc->sk_intrhand);
1513 sc->sk_intrhand = NULL((void *)0);
1514fail_1:
1515 bus_space_unmap(sc->sk_btag, sc->sk_bhandle, sc->sk_bsize);
1516 sc->sk_bsize = 0;
1517}
1518
1519int
1520mskc_detach(struct device *self, int flags)
1521{
1522 struct sk_softc *sc = (struct sk_softc *)self;
1523 int rv;
1524
1525 if (sc->sk_intrhand)
1526 pci_intr_disestablish(sc->sk_pc, sc->sk_intrhand);
1527
1528 rv = config_detach_children(self, flags);
1529 if (rv != 0)
1530 return (rv);
1531
1532 if (sc->sk_status_nseg > 0) {
1533 bus_dmamap_destroy(sc->sc_dmatag, sc->sk_status_map)(*(sc->sc_dmatag)->_dmamap_destroy)((sc->sc_dmatag),
(sc->sk_status_map))
;
1534 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sk_status_ring,(*(sc->sc_dmatag)->_dmamem_unmap)((sc->sc_dmatag), (
(caddr_t)sc->sk_status_ring), (2048 * sizeof(uint64_t)))
1535 MSK_STATUS_RING_CNT * sizeof(uint64_t))(*(sc->sc_dmatag)->_dmamem_unmap)((sc->sc_dmatag), (
(caddr_t)sc->sk_status_ring), (2048 * sizeof(uint64_t)))
;
1536 bus_dmamem_free(sc->sc_dmatag,(*(sc->sc_dmatag)->_dmamem_free)((sc->sc_dmatag), (&
sc->sk_status_seg), (sc->sk_status_nseg))
1537 &sc->sk_status_seg, sc->sk_status_nseg)(*(sc->sc_dmatag)->_dmamem_free)((sc->sc_dmatag), (&
sc->sk_status_seg), (sc->sk_status_nseg))
;
1538 }
1539
1540 if (sc->sk_bsize > 0)
1541 bus_space_unmap(sc->sk_btag, sc->sk_bhandle, sc->sk_bsize);
1542
1543 return(0);
1544}
1545
1546int
1547mskc_activate(struct device *self, int act)
1548{
1549 struct sk_softc *sc = (void *)self;
1550 int rv = 0;
1551
1552 switch (act) {
1553 case DVACT_RESUME4:
1554 mskc_reset(sc);
1555 rv = config_activate_children(self, act);
1556 break;
1557 default:
1558 rv = config_activate_children(self, act);
1559 break;
1560 }
1561 return (rv);
1562}
1563
1564static unsigned int
1565msk_encap(struct sk_if_softc *sc_if, struct mbuf *m, uint32_t prod)
1566{
1567 struct sk_softc *sc = sc_if->sk_softc;
1568 struct msk_ring_data *rd = sc_if->sk_rdata;
1569 struct msk_tx_desc *t;
1570 bus_dmamap_t map;
1571 uint64_t addr;
1572 uint32_t hiaddr;
1573 uint32_t next, last;
1574 uint8_t opcode;
1575 unsigned int entries = 0;
1576 int i;
1577
1578 map = sc_if->sk_cdata.sk_tx_maps[prod];
1579
1580 switch (bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag
), (map), (m), (0x0100 | 0x0001))
1581 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag
), (map), (m), (0x0100 | 0x0001))
) {
1582 case 0:
1583 break;
1584 case EFBIG27: /* mbuf chain is too fragmented */
1585 if (m_defrag(m, M_DONTWAIT0x0002) == 0 &&
1586 bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag
), (map), (m), (0x0100 | 0x0001))
1587 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)(*(sc->sc_dmatag)->_dmamap_load_mbuf)((sc->sc_dmatag
), (map), (m), (0x0100 | 0x0001))
== 0)
1588 break;
1589 /* FALLTHROUGH */
1590 default:
1591 return (0);
1592 }
1593
1594 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (map
), (0), (map->dm_mapsize), (0x04))
1595 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (map
), (0), (map->dm_mapsize), (0x04))
;
1596
1597 opcode = SK_Y2_TXOPC_OWN0x80 | SK_Y2_TXOPC_PACKET0x41;
1598 next = prod;
1599 for (i = 0; i < map->dm_nsegs; i++) {
1600 /* high 32 bits of address */
1601 addr = map->dm_segs[i].ds_addr;
1602 hiaddr = addr >> 32;
1603 if (sc_if->sk_cdata.sk_tx_hiaddr != hiaddr) {
1604 t = &rd->sk_tx_ring[next];
1605 htolem32(&t->sk_addr, hiaddr)(*(__uint32_t *)(&t->sk_addr) = ((__uint32_t)(hiaddr))
)
;
1606 t->sk_opcode = SK_Y2_TXOPC_OWN0x80 | SK_Y2_TXOPC_ADDR640x21;
1607
1608 sc_if->sk_cdata.sk_tx_hiaddr = hiaddr;
1609
1610 SK_INC(next, MSK_TX_RING_CNT)(next) = (next + 1) % 512;
1611 entries++;
1612 }
1613
1614 /* low 32 bits of address + length */
1615 t = &rd->sk_tx_ring[next];
1616 htolem32(&t->sk_addr, addr)(*(__uint32_t *)(&t->sk_addr) = ((__uint32_t)(addr)));
1617 htolem16(&t->sk_len, map->dm_segs[i].ds_len)(*(__uint16_t *)(&t->sk_len) = ((__uint16_t)(map->dm_segs
[i].ds_len)))
;
1618 t->sk_ctl = 0;
1619 t->sk_opcode = opcode;
1620
1621 last = next;
1622 SK_INC(next, MSK_TX_RING_CNT)(next) = (next + 1) % 512;
1623 entries++;
1624
1625 opcode = SK_Y2_TXOPC_OWN0x80 | SK_Y2_TXOPC_BUFFER0x40;
1626 }
1627 t->sk_ctl = SK_Y2_TXCTL_LASTFRAG0x80;
1628
1629 sc_if->sk_cdata.sk_tx_maps[prod] = sc_if->sk_cdata.sk_tx_maps[last];
1630 sc_if->sk_cdata.sk_tx_maps[last] = map;
1631 sc_if->sk_cdata.sk_tx_mbuf[last] = m;
1632
1633 return (entries);
1634}
1635
1636void
1637msk_start(struct ifnet *ifp)
1638{
1639 struct sk_if_softc *sc_if = ifp->if_softc;
1640 struct mbuf *m = NULL((void *)0);
1641 uint32_t prod, free, used;
1642 int post = 0;
1643
1644 prod = sc_if->sk_cdata.sk_tx_prod;
1645 free = sc_if->sk_cdata.sk_tx_cons;
1646 if (free <= prod)
1647 free += MSK_TX_RING_CNT512;
1648 free -= prod;
1649
1650 MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT, BUS_DMASYNC_POSTWRITE)do { int __x, __n; __x = (0); __n = (512); if ((__x + __n) >
512) { (*((sc_if)->sk_softc->sc_dmatag)->_dmamap_sync
)(((sc_if)->sk_softc->sc_dmatag), ((sc_if)->sk_ring_map
), (__builtin_offsetof(struct msk_ring_data, sk_tx_ring[(__x)
])), (sizeof(struct msk_tx_desc) * (512 - __x)), ((0x08))); __n
-= (512 - __x); __x = 0; } (*((sc_if)->sk_softc->sc_dmatag
)->_dmamap_sync)(((sc_if)->sk_softc->sc_dmatag), ((sc_if
)->sk_ring_map), (__builtin_offsetof(struct msk_ring_data,
sk_tx_ring[((__x))])), (sizeof(struct msk_tx_desc) * __n), (
(0x08))); } while ( 0)
;
1651
1652 for (;;) {
1653 if (free <= SK_NTXSEG30 * 2) {
1654 ifq_set_oactive(&ifp->if_snd);
1655 break;
1656 }
1657
1658 m = ifq_dequeue(&ifp->if_snd);
1659 if (m == NULL((void *)0))
1660 break;
1661
1662 used = msk_encap(sc_if, m, prod);
1663 if (used == 0) {
1664 m_freem(m);
1665 continue;
1666 }
1667
1668 free -= used;
1669 prod += used;
1670 prod &= MSK_TX_RING_CNT512 - 1;
1671
1672#if NBPFILTER1 > 0
1673 if (ifp->if_bpf)
1674 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
1675#endif
1676 post = 1;
1677 }
1678
1679 MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT, BUS_DMASYNC_PREWRITE)do { int __x, __n; __x = (0); __n = (512); if ((__x + __n) >
512) { (*((sc_if)->sk_softc->sc_dmatag)->_dmamap_sync
)(((sc_if)->sk_softc->sc_dmatag), ((sc_if)->sk_ring_map
), (__builtin_offsetof(struct msk_ring_data, sk_tx_ring[(__x)
])), (sizeof(struct msk_tx_desc) * (512 - __x)), ((0x04))); __n
-= (512 - __x); __x = 0; } (*((sc_if)->sk_softc->sc_dmatag
)->_dmamap_sync)(((sc_if)->sk_softc->sc_dmatag), ((sc_if
)->sk_ring_map), (__builtin_offsetof(struct msk_ring_data,
sk_tx_ring[((__x))])), (sizeof(struct msk_tx_desc) * __n), (
(0x04))); } while ( 0)
;
1680
1681 if (post == 0)
1682 return;
1683
1684 /* Transmit */
1685 sc_if->sk_cdata.sk_tx_prod = prod;
1686 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_PUTIDX, prod)sk_win_write_2(sc_if->sk_softc, 0x06E4 + ((sc_if->sk_port
* (1 + 1)) * 0x80), prod)
;
1687
1688 /* Set a timeout in case the chip goes out to lunch. */
1689 ifp->if_timer = MSK_TX_TIMEOUT5;
1690}
1691
1692void
1693msk_watchdog(struct ifnet *ifp)
1694{
1695 struct sk_if_softc *sc_if = ifp->if_softc;
1696
1697 if (sc_if->sk_cdata.sk_tx_prod != sc_if->sk_cdata.sk_tx_cons) {
1698 printf("%s: watchdog timeout\n", sc_if->sk_dev.dv_xname);
1699
1700 ifp->if_oerrorsif_data.ifi_oerrors++;
1701
1702 /* XXX Resets both ports; we shouldn't do that. */
1703 mskc_reset(sc_if->sk_softc);
1704 msk_reset(sc_if);
1705 msk_init(sc_if);
1706 }
1707}
1708
1709static inline int
1710msk_rxvalid(struct sk_softc *sc, u_int32_t stat, u_int32_t len)
1711{
1712 if ((stat & (YU_RXSTAT_CRCERR0x00000002 | YU_RXSTAT_LONGERR0x00000010 |
1713 YU_RXSTAT_MIIERR0x00000020 | YU_RXSTAT_BADFC0x00000040 | YU_RXSTAT_GOODFC0x00000080 |
1714 YU_RXSTAT_JABBER0x00001000)) != 0 ||
1715 (stat & YU_RXSTAT_RXOK0x00000100) != YU_RXSTAT_RXOK0x00000100 ||
1716 YU_RXSTAT_BYTES(stat)((stat) >> 16) != len)
1717 return (0);
1718
1719 return (1);
1720}
1721
1722void
1723msk_rxeof(struct sk_if_softc *sc_if, struct mbuf_list *ml,
1724 uint16_t len, uint32_t rxstat)
1725{
1726 struct sk_softc *sc = sc_if->sk_softc;
1727 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1728 struct mbuf *m = NULL((void *)0);
1729 int prod, cons, tail;
1730 bus_dmamap_t map;
1731
1732 prod = sc_if->sk_cdata.sk_rx_prod;
1733 cons = sc_if->sk_cdata.sk_rx_cons;
1734
1735 while (cons != prod) {
1736 tail = cons;
1737 SK_INC(cons, MSK_RX_RING_CNT)(cons) = (cons + 1) % 512;
1738
1739 m = sc_if->sk_cdata.sk_rx_mbuf[tail];
1740 if (m != NULL((void *)0)) {
1741 /* found it */
1742 break;
1743 }
1744 }
1745 sc_if->sk_cdata.sk_rx_cons = cons;
1746
1747 if (m == NULL((void *)0)) {
1748 /* maybe if ADDR64 is consumed? */
1749 return;
1750 }
1751
1752 sc_if->sk_cdata.sk_rx_mbuf[tail] = NULL((void *)0);
1753
1754 map = sc_if->sk_cdata.sk_rx_maps[tail];
1755 if_rxr_put(&sc_if->sk_cdata.sk_rx_ring, 1)do { (&sc_if->sk_cdata.sk_rx_ring)->rxr_alive -= (1
); } while (0)
;
1756
1757 bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, map, 0, map->dm_mapsize,(*(sc_if->sk_softc->sc_dmatag)->_dmamap_sync)((sc_if
->sk_softc->sc_dmatag), (map), (0), (map->dm_mapsize
), (0x02))
1758 BUS_DMASYNC_POSTREAD)(*(sc_if->sk_softc->sc_dmatag)->_dmamap_sync)((sc_if
->sk_softc->sc_dmatag), (map), (0), (map->dm_mapsize
), (0x02))
;
1759 bus_dmamap_unload(sc_if->sk_softc->sc_dmatag, map)(*(sc_if->sk_softc->sc_dmatag)->_dmamap_unload)((sc_if
->sk_softc->sc_dmatag), (map))
;
1760
1761 if (len < SK_MIN_FRAMELEN(64 - 4) || len > SK_JUMBO_FRAMELEN9018 ||
1762 msk_rxvalid(sc, rxstat, len) == 0) {
1763 ifp->if_ierrorsif_data.ifi_ierrors++;
1764 m_freem(m);
1765 return;
1766 }
1767
1768 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len;
1769
1770 ml_enqueue(ml, m);
1771}
1772
1773void
1774msk_txeof(struct sk_if_softc *sc_if, unsigned int prod)
1775{
1776 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1777 struct sk_softc *sc = sc_if->sk_softc;
1778 uint32_t cons;
1779 struct mbuf *m;
1780 bus_dmamap_t map;
1781
1782 /*
1783 * Go through our tx ring and free mbufs for those
1784 * frames that have been sent.
1785 */
1786 cons = sc_if->sk_cdata.sk_tx_cons;
1787
1788 if (cons == prod)
1789 return;
1790
1791 while (cons != prod) {
1792 m = sc_if->sk_cdata.sk_tx_mbuf[cons];
1793 if (m != NULL((void *)0)) {
1794 sc_if->sk_cdata.sk_tx_mbuf[cons] = NULL((void *)0);
1795
1796 map = sc_if->sk_cdata.sk_tx_maps[cons];
1797 bus_dmamap_sync(sc->sc_dmatag, map, 0,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (map
), (0), (map->dm_mapsize), (0x08))
1798 map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (map
), (0), (map->dm_mapsize), (0x08))
;
1799 bus_dmamap_unload(sc->sc_dmatag, map)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), (
map))
;
1800
1801 m_freem(m);
1802 }
1803
1804 SK_INC(cons, MSK_TX_RING_CNT)(cons) = (cons + 1) % 512;
1805 }
1806 if (cons == sc_if->sk_cdata.sk_tx_prod)
1807 ifp->if_timer = 0;
1808
1809 sc_if->sk_cdata.sk_tx_cons = cons;
1810
1811 if (ifq_is_oactive(&ifp->if_snd))
1812 ifq_restart(&ifp->if_snd);
1813}
1814
1815void
1816msk_fill_rx_ring(struct sk_if_softc *sc_if)
1817{
1818 u_int slots, used;
1819
1820 slots = if_rxr_get(&sc_if->sk_cdata.sk_rx_ring, MSK_RX_RING_CNT512/2);
1821
1822 MSK_CDRXSYNC(sc_if, 0, BUS_DMASYNC_POSTWRITE)do { (*((sc_if)->sk_softc->sc_dmatag)->_dmamap_sync)
(((sc_if)->sk_softc->sc_dmatag), ((sc_if)->sk_ring_map
), (__builtin_offsetof(struct msk_ring_data, sk_rx_ring[((0))
])), (sizeof(struct msk_rx_desc)), ((0x08))); } while ( 0)
; /* XXX */
1823 while (slots > 0) {
1824 used = msk_newbuf(sc_if);
1825 if (used == 0)
1826 break;
1827
1828 slots -= used;
1829 }
1830 MSK_CDRXSYNC(sc_if, 0, BUS_DMASYNC_PREWRITE)do { (*((sc_if)->sk_softc->sc_dmatag)->_dmamap_sync)
(((sc_if)->sk_softc->sc_dmatag), ((sc_if)->sk_ring_map
), (__builtin_offsetof(struct msk_ring_data, sk_rx_ring[((0))
])), (sizeof(struct msk_rx_desc)), ((0x04))); } while ( 0)
; /* XXX */
1831
1832 if_rxr_put(&sc_if->sk_cdata.sk_rx_ring, slots)do { (&sc_if->sk_cdata.sk_rx_ring)->rxr_alive -= (slots
); } while (0)
;
1833 if (if_rxr_inuse(&sc_if->sk_cdata.sk_rx_ring)((&sc_if->sk_cdata.sk_rx_ring)->rxr_alive) == 0)
1834 timeout_add(&sc_if->sk_tick_rx, 1);
1835}
1836
1837void
1838msk_fill_rx_tick(void *xsc_if)
1839{
1840 struct sk_if_softc *sc_if = xsc_if;
1841 int s;
1842
1843 s = splnet()splraise(0x4);
1844 if (if_rxr_inuse(&sc_if->sk_cdata.sk_rx_ring)((&sc_if->sk_cdata.sk_rx_ring)->rxr_alive) == 0) {
1845 msk_fill_rx_ring(sc_if);
1846 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_PUTIDX,sk_win_write_2(sc_if->sk_softc, 0x0464 + ((sc_if->sk_port
* (0 + 1)) * 0x80), sc_if->sk_cdata.sk_rx_prod)
1847 sc_if->sk_cdata.sk_rx_prod)sk_win_write_2(sc_if->sk_softc, 0x0464 + ((sc_if->sk_port
* (0 + 1)) * 0x80), sc_if->sk_cdata.sk_rx_prod)
;
1848 }
1849 splx(s)spllower(s);
1850}
1851
1852void
1853msk_tick(void *xsc_if)
1854{
1855 struct sk_if_softc *sc_if = xsc_if;
1856 struct mii_data *mii = &sc_if->sk_mii;
1857 int s;
1858
1859 s = splnet()splraise(0x4);
1860 mii_tick(mii);
1861 splx(s)spllower(s);
1862 timeout_add_sec(&sc_if->sk_tick_ch, 1);
1863}
1864
1865void
1866msk_intr_yukon(struct sk_if_softc *sc_if)
1867{
1868 u_int8_t status;
1869
1870 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR)sk_win_read_1(sc_if->sk_softc, 0x0f08 + ((sc_if->sk_port
* (0 + 1)) * 0x80))
;
1871 /* RX overrun */
1872 if ((status & SK_GMAC_INT_RX_OVER0x02) != 0) {
1873 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,sk_win_write_1(sc_if->sk_softc, 0x0C48 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000020)
1874 SK_RFCTL_RX_FIFO_OVER)sk_win_write_1(sc_if->sk_softc, 0x0C48 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000020)
;
1875 }
1876 /* TX underrun */
1877 if ((status & SK_GMAC_INT_TX_UNDER0x08) != 0) {
1878 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST,sk_win_write_1(sc_if->sk_softc, 0x0D48 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000040)
1879 SK_TFCTL_TX_FIFO_UNDER)sk_win_write_1(sc_if->sk_softc, 0x0D48 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000040)
;
1880 }
1881
1882 DPRINTFN(2, ("msk_intr_yukon status=%#x\n", status));
1883}
1884
1885int
1886msk_intr(void *xsc)
1887{
1888 struct sk_softc *sc = xsc;
1889 struct sk_if_softc *sc_if0 = sc->sk_if[SK_PORT_A0];
1
'sc_if0' initialized here
1890 struct sk_if_softc *sc_if1 = sc->sk_if[SK_PORT_B1];
1891 struct mbuf_list ml[2] = {
1892 MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 },
1893 MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 },
1894 };
1895 struct ifnet *ifp0 = NULL((void *)0), *ifp1 = NULL((void *)0);
1896 int claimed = 0;
1897 u_int32_t status;
1898 uint64_t *ring = sc->sk_status_ring;
1899 uint64_t desc;
1900
1901 status = CSR_READ_4(sc, SK_Y2_ISSR2)(((sc)->sk_btag)->read_4(((sc)->sk_bhandle), ((0x001C
))))
;
1902 if (status == 0xffffffff)
2
Assuming 'status' is not equal to -1
3
Taking false branch
1903 return (0);
1904 if (status == 0) {
4
Assuming 'status' is not equal to 0
5
Taking false branch
1905 CSR_WRITE_4(sc, SK_Y2_ICR, 2)(((sc)->sk_btag)->write_4(((sc)->sk_bhandle), ((0x002C
)), ((2))))
;
1906 return (0);
1907 }
1908
1909 status = CSR_READ_4(sc, SK_ISR)(((sc)->sk_btag)->read_4(((sc)->sk_bhandle), ((0x0008
))))
;
1910
1911 if (sc_if0 != NULL((void *)0))
6
Assuming 'sc_if0' is equal to NULL
7
Taking false branch
1912 ifp0 = &sc_if0->arpcom.ac_if;
1913 if (sc_if1 != NULL((void *)0))
8
Assuming 'sc_if1' is not equal to NULL
9
Taking true branch
1914 ifp1 = &sc_if1->arpcom.ac_if;
1915
1916 if (sc_if0
9.1
'sc_if0' is null
&& (status & SK_Y2_IMR_MAC10x00000008) &&
1917 (ifp0->if_flags & IFF_RUNNING0x40)) {
1918 msk_intr_yukon(sc_if0);
1919 }
1920
1921 if (sc_if1
9.2
'sc_if1' is non-null
&& (status & SK_Y2_IMR_MAC20x00000800) &&
10
Assuming the condition is false
1922 (ifp1->if_flags & IFF_RUNNING0x40)) {
1923 msk_intr_yukon(sc_if1);
1924 }
1925
1926 MSK_CDSTSYNC(sc, sc->sk_status_idx,do { (*((sc)->sc_dmatag)->_dmamap_sync)(((sc)->sc_dmatag
), ((sc)->sk_status_map), ((((sc->sk_status_idx)) * sizeof
(uint64_t))), (sizeof(uint64_t)), ((0x02|0x08))); } while ( 0
)
11
Loop condition is false. Exiting loop
1927 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)do { (*((sc)->sc_dmatag)->_dmamap_sync)(((sc)->sc_dmatag
), ((sc)->sk_status_map), ((((sc->sk_status_idx)) * sizeof
(uint64_t))), (sizeof(uint64_t)), ((0x02|0x08))); } while ( 0
)
;
1928
1929 while (MSK_STATUS_OWN(desc = lemtoh64(&ring[sc->sk_status_idx]))(((desc = ((__uint64_t)(*(__uint64_t *)(&ring[sc->sk_status_idx
])))) >> 63) & 0x1)
) {
12
Loop condition is true. Entering loop body
15
Loop condition is false. Execution continues on line 1960
1930 unsigned int opcode, port;
1931
1932 ring[sc->sk_status_idx] = htole64(0)((__uint64_t)(0)); /* clear ownership */
1933
1934 opcode = MSK_STATUS_OPCODE(desc)(((desc) >> 56) & 0x7f);
1935 switch (opcode) {
13
Control jumps to 'case 96:' at line 1936
1936 case MSK_STATUS_OPCODE_RXSTAT0x60:
1937 port = MSK_STATUS_RXSTAT_PORT(desc)(((desc) >> 48) & 0x1);
1938 msk_rxeof(sc->sk_if[port], &ml[port],
1939 MSK_STATUS_RXSTAT_LEN(desc)(((desc) >> 32) & 0xffff),
1940 MSK_STATUS_RXSTAT_STATUS(desc)(((desc) >> 0) & 0xffffffff));
1941 break;
14
Execution continues on line 1957
1942 case SK_Y2_STOPC_TXSTAT0x68:
1943 if (sc_if0) {
1944 msk_txeof(sc_if0,
1945 MSK_STATUS_TXIDX_PORTA(desc)(((desc) >> 0) & 0xfff));
1946 }
1947 if (sc_if1) {
1948 msk_txeof(sc_if1,
1949 MSK_STATUS_TXIDX_PORTB(desc)(((desc) >> 24) & 0xfff));
1950 }
1951 break;
1952 default:
1953 printf("opcode=0x%x\n", opcode);
1954 break;
1955 }
1956
1957 SK_INC(sc->sk_status_idx, MSK_STATUS_RING_CNT)(sc->sk_status_idx) = (sc->sk_status_idx + 1) % 2048;
1958 }
1959
1960 MSK_CDSTSYNC(sc, sc->sk_status_idx,do { (*((sc)->sc_dmatag)->_dmamap_sync)(((sc)->sc_dmatag
), ((sc)->sk_status_map), ((((sc->sk_status_idx)) * sizeof
(uint64_t))), (sizeof(uint64_t)), ((0x02|0x08))); } while ( 0
)
16
Loop condition is false. Exiting loop
1961 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)do { (*((sc)->sc_dmatag)->_dmamap_sync)(((sc)->sc_dmatag
), ((sc)->sk_status_map), ((((sc->sk_status_idx)) * sizeof
(uint64_t))), (sizeof(uint64_t)), ((0x02|0x08))); } while ( 0
)
;
1962
1963 if (status & SK_Y2_IMR_BMU0x40000000) {
17
Assuming the condition is false
18
Taking false branch
1964 CSR_WRITE_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_IRQ_CLEAR)(((sc)->sk_btag)->write_4(((sc)->sk_bhandle), ((0x0e80
)), ((0x00000010))))
;
1965 claimed = 1;
1966 }
1967
1968 CSR_WRITE_4(sc, SK_Y2_ICR, 2)(((sc)->sk_btag)->write_4(((sc)->sk_bhandle), ((0x002C
)), ((2))))
;
1969
1970 if (!ml_empty(&ml[0])((&ml[0])->ml_len == 0)) {
19
Assuming field 'ml_len' is not equal to 0
20
Taking true branch
1971 if (ifiq_input(&ifp0->if_rcv, &ml[0]))
21
Assuming the condition is false
22
Taking false branch
1972 if_rxr_livelocked(&sc_if0->sk_cdata.sk_rx_ring);
1973 msk_fill_rx_ring(sc_if0);
1974 SK_IF_WRITE_2(sc_if0, 0, SK_RXQ1_Y2_PREF_PUTIDX,sk_win_write_2(sc_if0->sk_softc, 0x0464 + ((sc_if0->sk_port
* (0 + 1)) * 0x80), sc_if0->sk_cdata.sk_rx_prod)
23
Access to field 'sk_softc' results in a dereference of a null pointer (loaded from variable 'sc_if0')
1975 sc_if0->sk_cdata.sk_rx_prod)sk_win_write_2(sc_if0->sk_softc, 0x0464 + ((sc_if0->sk_port
* (0 + 1)) * 0x80), sc_if0->sk_cdata.sk_rx_prod)
;
1976 }
1977 if (!ml_empty(&ml[1])((&ml[1])->ml_len == 0)) {
1978 if (ifiq_input(&ifp1->if_rcv, &ml[1]))
1979 if_rxr_livelocked(&sc_if1->sk_cdata.sk_rx_ring);
1980 msk_fill_rx_ring(sc_if1);
1981 SK_IF_WRITE_2(sc_if1, 0, SK_RXQ1_Y2_PREF_PUTIDX,sk_win_write_2(sc_if1->sk_softc, 0x0464 + ((sc_if1->sk_port
* (0 + 1)) * 0x80), sc_if1->sk_cdata.sk_rx_prod)
1982 sc_if1->sk_cdata.sk_rx_prod)sk_win_write_2(sc_if1->sk_softc, 0x0464 + ((sc_if1->sk_port
* (0 + 1)) * 0x80), sc_if1->sk_cdata.sk_rx_prod)
;
1983 }
1984
1985 return (claimed);
1986}
1987
1988void
1989msk_init_yukon(struct sk_if_softc *sc_if)
1990{
1991 u_int32_t v;
1992 u_int16_t reg;
1993 struct sk_softc *sc;
1994 int i;
1995
1996 sc = sc_if->sk_softc;
1997
1998 DPRINTFN(2, ("msk_init_yukon: start: sk_csr=%#x\n",
1999 CSR_READ_4(sc_if->sk_softc, SK_CSR)));
2000
2001 DPRINTFN(6, ("msk_init_yukon: 1\n"));
2002
2003 DPRINTFN(3, ("msk_init_yukon: gmac_ctrl=%#x\n",
2004 SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL)));
2005
2006 DPRINTFN(6, ("msk_init_yukon: 3\n"));
2007
2008 /* unused read of the interrupt source register */
2009 DPRINTFN(6, ("msk_init_yukon: 4\n"));
2010 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR)sk_win_read_2(sc_if->sk_softc, 0x0f08 + ((sc_if->sk_port
* (0 + 1)) * 0x80))
;
2011
2012 DPRINTFN(6, ("msk_init_yukon: 4a\n"));
2013 reg = SK_YU_READ_2(sc_if, YUKON_PAR)sk_win_read_2((sc_if)->sk_softc, (((0x0088)) + 0x2800 + ((
((sc_if))->sk_port) * (0x3800 - 0x2800))))
;
2014 DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg));
2015
2016 /* MIB Counter Clear Mode set */
2017 reg |= YU_PAR_MIB_CLR0x0020;
2018 DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg));
2019 DPRINTFN(6, ("msk_init_yukon: 4b\n"));
2020 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg)sk_win_write_2((sc_if)->sk_softc, (((0x0088)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), (reg))
;
2021
2022 /* MIB Counter Clear Mode clear */
2023 DPRINTFN(6, ("msk_init_yukon: 5\n"));
2024 reg &= ~YU_PAR_MIB_CLR0x0020;
2025 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg)sk_win_write_2((sc_if)->sk_softc, (((0x0088)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), (reg))
;
2026
2027 /* receive control reg */
2028 DPRINTFN(6, ("msk_init_yukon: 7\n"));
2029 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR)sk_win_write_2((sc_if)->sk_softc, (((0x000c)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), (0x2000))
;
2030
2031 /* transmit parameter register */
2032 DPRINTFN(6, ("msk_init_yukon: 8\n"));
2033 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |sk_win_write_2((sc_if)->sk_softc, (((0x0014)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), ((((0x3) &
0x3) << 14) | (((0xb) & 0x1f) << 9) | (((0x1a
) & 0x1f) << 4)))
2034 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) )sk_win_write_2((sc_if)->sk_softc, (((0x0014)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), ((((0x3) &
0x3) << 14) | (((0xb) & 0x1f) << 9) | (((0x1a
) & 0x1f) << 4)))
;
2035
2036 /* serial mode register */
2037 DPRINTFN(6, ("msk_init_yukon: 9\n"));
2038 reg = YU_SMR_DATA_BLIND(0x1c)(((0x1c) & 0x1f) << 11) |
2039 YU_SMR_MFL_VLAN0x0200 |
2040 YU_SMR_IPG_DATA(0x1e)((0x1e) & 0x1f);
2041
2042 if (sc->sk_type != SK_YUKON_FE0xB7 &&
2043 sc->sk_type != SK_YUKON_FE_P0xB8)
2044 reg |= YU_SMR_MFL_JUMBO0x0100;
2045
2046 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg)sk_win_write_2((sc_if)->sk_softc, (((0x0018)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), (reg))
;
2047
2048 DPRINTFN(6, ("msk_init_yukon: 10\n"));
2049 /* Setup Yukon's address */
2050 for (i = 0; i < 3; i++) {
2051 /* Write Source Address 1 (unicast filter) */
2052 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,sk_win_write_2((sc_if)->sk_softc, (((0x001c + i * 4)) + 0x2800
+ ((((sc_if))->sk_port) * (0x3800 - 0x2800))), (sc_if->
arpcom.ac_enaddr[i * 2] | sc_if->arpcom.ac_enaddr[i * 2 + 1
] << 8))
2053 sc_if->arpcom.ac_enaddr[i * 2] |sk_win_write_2((sc_if)->sk_softc, (((0x001c + i * 4)) + 0x2800
+ ((((sc_if))->sk_port) * (0x3800 - 0x2800))), (sc_if->
arpcom.ac_enaddr[i * 2] | sc_if->arpcom.ac_enaddr[i * 2 + 1
] << 8))
2054 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8)sk_win_write_2((sc_if)->sk_softc, (((0x001c + i * 4)) + 0x2800
+ ((((sc_if))->sk_port) * (0x3800 - 0x2800))), (sc_if->
arpcom.ac_enaddr[i * 2] | sc_if->arpcom.ac_enaddr[i * 2 + 1
] << 8))
;
2055 }
2056
2057 for (i = 0; i < 3; i++) {
2058 reg = sk_win_read_2(sc_if->sk_softc,
2059 SK_MAC1_00x0108 + i * 2 + sc_if->sk_port * 8);
2060 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg)sk_win_write_2((sc_if)->sk_softc, (((0x0028 + i * 4)) + 0x2800
+ ((((sc_if))->sk_port) * (0x3800 - 0x2800))), (reg))
;
2061 }
2062
2063 /* Program promiscuous mode and multicast filters */
2064 DPRINTFN(6, ("msk_init_yukon: 11\n"));
2065 msk_iff(sc_if);
2066
2067 /* enable interrupt mask for counter overflows */
2068 DPRINTFN(6, ("msk_init_yukon: 12\n"));
2069 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0)sk_win_write_2((sc_if)->sk_softc, (((0x0050)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), (0))
;
2070 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0)sk_win_write_2((sc_if)->sk_softc, (((0x0054)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), (0))
;
2071 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0)sk_win_write_2((sc_if)->sk_softc, (((0x0058)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), (0))
;
2072
2073 /* Configure RX MAC FIFO Flush Mask */
2074 v = YU_RXSTAT_FOFL0x00000001 | YU_RXSTAT_CRCERR0x00000002 | YU_RXSTAT_MIIERR0x00000020 |
2075 YU_RXSTAT_BADFC0x00000040 | YU_RXSTAT_GOODFC0x00000080 | YU_RXSTAT_RUNT0x00000800 |
2076 YU_RXSTAT_JABBER0x00001000;
2077 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v)sk_win_write_2(sc_if->sk_softc, 0x0C4C + ((sc_if->sk_port
* (0 + 1)) * 0x80), v)
;
2078
2079 /* Configure RX MAC FIFO */
2080 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR)sk_win_write_1(sc_if->sk_softc, 0x0C48 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000002)
;
2081 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON |sk_win_write_2(sc_if->sk_softc, 0x0C48 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000008 | 0x00000080)
2082 SK_RFCTL_FIFO_FLUSH_ON)sk_win_write_2(sc_if->sk_softc, 0x0C48 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000008 | 0x00000080)
;
2083
2084 /* Increase flush threshold to 64 bytes */
2085 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,sk_win_write_2(sc_if->sk_softc, 0x0C50 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x0a + 1)
2086 SK_RFCTL_FIFO_THRESHOLD + 1)sk_win_write_2(sc_if->sk_softc, 0x0C50 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x0a + 1)
;
2087
2088 /* Configure TX MAC FIFO */
2089 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR)sk_win_write_1(sc_if->sk_softc, 0x0D48 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000002)
;
2090 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON)sk_win_write_2(sc_if->sk_softc, 0x0D48 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000008)
;
2091
2092#if 1
2093 SK_YU_WRITE_2(sc_if, YUKON_GPCR, YU_GPCR_TXEN | YU_GPCR_RXEN)sk_win_write_2((sc_if)->sk_softc, (((0x0004)) + 0x2800 + (
(((sc_if))->sk_port) * (0x3800 - 0x2800))), (0x1000 | 0x0800
))
;
2094#endif
2095 DPRINTFN(6, ("msk_init_yukon: end\n"));
2096}
2097
2098/*
2099 * Note that to properly initialize any part of the GEnesis chip,
2100 * you first have to take it out of reset mode.
2101 */
2102void
2103msk_init(void *xsc_if)
2104{
2105 struct sk_if_softc *sc_if = xsc_if;
2106 struct sk_softc *sc = sc_if->sk_softc;
2107 struct ifnet *ifp = &sc_if->arpcom.ac_if;
2108 struct mii_data *mii = &sc_if->sk_mii;
2109 int s;
2110
2111 DPRINTFN(2, ("msk_init\n"));
2112
2113 s = splnet()splraise(0x4);
2114
2115 /* Cancel pending I/O and free all RX/TX buffers. */
2116 msk_stop(sc_if, 0);
2117
2118 /* Configure I2C registers */
2119
2120 /* Configure XMAC(s) */
2121 msk_init_yukon(sc_if);
2122 mii_mediachg(mii);
2123
2124 /* Configure transmit arbiter(s) */
2125 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_ON)sk_win_write_1(sc_if->sk_softc, 0x0210 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x02)
;
2126#if 0
2127 SK_TXARCTL_ON0x02|SK_TXARCTL_FSYNC_ON0x80);
2128#endif
2129
2130 /* Configure RAMbuffers */
2131 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET)sk_win_write_4(sc_if->sk_softc, 0x0828 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000002)
;
2132 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart)sk_win_write_4(sc_if->sk_softc, 0x0800 + ((sc_if->sk_port
* (0 + 1)) * 0x80), sc_if->sk_rx_ramstart)
;
2133 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart)sk_win_write_4(sc_if->sk_softc, 0x0808 + ((sc_if->sk_port
* (0 + 1)) * 0x80), sc_if->sk_rx_ramstart)
;
2134 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart)sk_win_write_4(sc_if->sk_softc, 0x080C + ((sc_if->sk_port
* (0 + 1)) * 0x80), sc_if->sk_rx_ramstart)
;
2135 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend)sk_win_write_4(sc_if->sk_softc, 0x0804 + ((sc_if->sk_port
* (0 + 1)) * 0x80), sc_if->sk_rx_ramend)
;
2136 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON)sk_win_write_4(sc_if->sk_softc, 0x0828 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000008)
;
2137
2138 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_UNRESET)sk_win_write_4(sc_if->sk_softc, 0x0AA8 + ((sc_if->sk_port
* (1 + 1)) * 0x80), 0x00000002)
;
2139 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_STORENFWD_ON)sk_win_write_4(sc_if->sk_softc, 0x0AA8 + ((sc_if->sk_port
* (1 + 1)) * 0x80), 0x00000020)
;
2140 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_START, sc_if->sk_tx_ramstart)sk_win_write_4(sc_if->sk_softc, 0x0A80 + ((sc_if->sk_port
* (1 + 1)) * 0x80), sc_if->sk_tx_ramstart)
;
2141 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_WR_PTR, sc_if->sk_tx_ramstart)sk_win_write_4(sc_if->sk_softc, 0x0A88 + ((sc_if->sk_port
* (1 + 1)) * 0x80), sc_if->sk_tx_ramstart)
;
2142 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_RD_PTR, sc_if->sk_tx_ramstart)sk_win_write_4(sc_if->sk_softc, 0x0A8C + ((sc_if->sk_port
* (1 + 1)) * 0x80), sc_if->sk_tx_ramstart)
;
2143 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_END, sc_if->sk_tx_ramend)sk_win_write_4(sc_if->sk_softc, 0x0A84 + ((sc_if->sk_port
* (1 + 1)) * 0x80), sc_if->sk_tx_ramend)
;
2144 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_ON)sk_win_write_4(sc_if->sk_softc, 0x0AA8 + ((sc_if->sk_port
* (1 + 1)) * 0x80), 0x00000008)
;
2145
2146 /* Configure BMUs */
2147 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000016)sk_win_write_4(sc_if->sk_softc, 0x0434 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000016)
;
2148 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000d28)sk_win_write_4(sc_if->sk_softc, 0x0434 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000d28)
;
2149 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000080)sk_win_write_4(sc_if->sk_softc, 0x0434 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000080)
;
2150 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_WATERMARK, 0x00000600)sk_win_write_4(sc_if->sk_softc, 0x0438 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000600)
;
2151
2152 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000016)sk_win_write_4(sc_if->sk_softc, 0x06B4 + ((sc_if->sk_port
* (1 + 1)) * 0x80), 0x00000016)
;
2153 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000d28)sk_win_write_4(sc_if->sk_softc, 0x06B4 + ((sc_if->sk_port
* (1 + 1)) * 0x80), 0x00000d28)
;
2154 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000080)sk_win_write_4(sc_if->sk_softc, 0x06B4 + ((sc_if->sk_port
* (1 + 1)) * 0x80), 0x00000080)
;
2155 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_WATERMARK, 0x00000600)sk_win_write_4(sc_if->sk_softc, 0x06B8 + ((sc_if->sk_port
* (1 + 1)) * 0x80), 0x00000600)
;
2156
2157 /* Make sure the sync transmit queue is disabled. */
2158 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET)sk_win_write_4(sc_if->sk_softc, 0x0A28 + ((sc_if->sk_port
* (1 + 1)) * 0x80), 0x00000001)
;
2159
2160 /* Init descriptors */
2161 if (msk_init_rx_ring(sc_if) == ENOBUFS55) {
2162 printf("%s: initialization failed: no "
2163 "memory for rx buffers\n", sc_if->sk_dev.dv_xname);
2164 msk_stop(sc_if, 0);
2165 splx(s)spllower(s);
2166 return;
2167 }
2168
2169 if (msk_init_tx_ring(sc_if) == ENOBUFS55) {
2170 printf("%s: initialization failed: no "
2171 "memory for tx buffers\n", sc_if->sk_dev.dv_xname);
2172 msk_stop(sc_if, 0);
2173 splx(s)spllower(s);
2174 return;
2175 }
2176
2177 /* Initialize prefetch engine. */
2178 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001)sk_win_write_4(sc_if->sk_softc, 0x0450 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000001)
;
2179 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000002)sk_win_write_4(sc_if->sk_softc, 0x0450 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000002)
;
2180 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_LIDX, MSK_RX_RING_CNT - 1)sk_win_write_2(sc_if->sk_softc, 0x0454 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 512 - 1)
;
2181 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRLO,sk_win_write_4(sc_if->sk_softc, 0x0458 + ((sc_if->sk_port
* (0 + 1)) * 0x80), ((sc_if)->sk_ring_map->dm_segs[0].
ds_addr + __builtin_offsetof(struct msk_ring_data, sk_rx_ring
[(0)])))
2182 MSK_RX_RING_ADDR(sc_if, 0))sk_win_write_4(sc_if->sk_softc, 0x0458 + ((sc_if->sk_port
* (0 + 1)) * 0x80), ((sc_if)->sk_ring_map->dm_segs[0].
ds_addr + __builtin_offsetof(struct msk_ring_data, sk_rx_ring
[(0)])))
;
2183 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRHI,sk_win_write_4(sc_if->sk_softc, 0x045C + ((sc_if->sk_port
* (0 + 1)) * 0x80), (u_int64_t)((sc_if)->sk_ring_map->
dm_segs[0].ds_addr + __builtin_offsetof(struct msk_ring_data,
sk_rx_ring[(0)])) >> 32)
2184 (u_int64_t)MSK_RX_RING_ADDR(sc_if, 0) >> 32)sk_win_write_4(sc_if->sk_softc, 0x045C + ((sc_if->sk_port
* (0 + 1)) * 0x80), (u_int64_t)((sc_if)->sk_ring_map->
dm_segs[0].ds_addr + __builtin_offsetof(struct msk_ring_data,
sk_rx_ring[(0)])) >> 32)
;
2185 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000008)sk_win_write_4(sc_if->sk_softc, 0x0450 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000008)
;
2186 SK_IF_READ_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR)sk_win_read_4(sc_if->sk_softc, 0x0450 + ((sc_if->sk_port
* (0 + 1)) * 0x80))
;
2187
2188 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001)sk_win_write_4(sc_if->sk_softc, 0x06D0 + ((sc_if->sk_port
* (1 + 1)) * 0x80), 0x00000001)
;
2189 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000002)sk_win_write_4(sc_if->sk_softc, 0x06D0 + ((sc_if->sk_port
* (1 + 1)) * 0x80), 0x00000002)
;
2190 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_LIDX, MSK_TX_RING_CNT - 1)sk_win_write_2(sc_if->sk_softc, 0x06D4 + ((sc_if->sk_port
* (1 + 1)) * 0x80), 512 - 1)
;
2191 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRLO,sk_win_write_4(sc_if->sk_softc, 0x06D8 + ((sc_if->sk_port
* (1 + 1)) * 0x80), ((sc_if)->sk_ring_map->dm_segs[0].
ds_addr + __builtin_offsetof(struct msk_ring_data, sk_tx_ring
[(0)])))
2192 MSK_TX_RING_ADDR(sc_if, 0))sk_win_write_4(sc_if->sk_softc, 0x06D8 + ((sc_if->sk_port
* (1 + 1)) * 0x80), ((sc_if)->sk_ring_map->dm_segs[0].
ds_addr + __builtin_offsetof(struct msk_ring_data, sk_tx_ring
[(0)])))
;
2193 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRHI,sk_win_write_4(sc_if->sk_softc, 0x06DC + ((sc_if->sk_port
* (1 + 1)) * 0x80), (u_int64_t)((sc_if)->sk_ring_map->
dm_segs[0].ds_addr + __builtin_offsetof(struct msk_ring_data,
sk_tx_ring[(0)])) >> 32)
2194 (u_int64_t)MSK_TX_RING_ADDR(sc_if, 0) >> 32)sk_win_write_4(sc_if->sk_softc, 0x06DC + ((sc_if->sk_port
* (1 + 1)) * 0x80), (u_int64_t)((sc_if)->sk_ring_map->
dm_segs[0].ds_addr + __builtin_offsetof(struct msk_ring_data,
sk_tx_ring[(0)])) >> 32)
;
2195 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000008)sk_win_write_4(sc_if->sk_softc, 0x06D0 + ((sc_if->sk_port
* (1 + 1)) * 0x80), 0x00000008)
;
2196 SK_IF_READ_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR)sk_win_read_4(sc_if->sk_softc, 0x06D0 + ((sc_if->sk_port
* (1 + 1)) * 0x80))
;
2197
2198 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_PUTIDX,sk_win_write_2(sc_if->sk_softc, 0x0464 + ((sc_if->sk_port
* (0 + 1)) * 0x80), sc_if->sk_cdata.sk_rx_prod)
2199 sc_if->sk_cdata.sk_rx_prod)sk_win_write_2(sc_if->sk_softc, 0x0464 + ((sc_if->sk_port
* (0 + 1)) * 0x80), sc_if->sk_cdata.sk_rx_prod)
;
2200
2201 /*
2202 * tell the chip the tx ring is empty for now. the first
2203 * msk_start will end up posting the ADDR64 tx descriptor
2204 * that resets the high address.
2205 */
2206 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_PUTIDX, 0)sk_win_write_2(sc_if->sk_softc, 0x06E4 + ((sc_if->sk_port
* (1 + 1)) * 0x80), 0)
;
2207
2208 /* Configure interrupt handling */
2209 if (sc_if->sk_port == SK_PORT_A0)
2210 sc->sk_intrmask |= SK_Y2_INTRS1(0x00000004|0x00000001 |0x00000008|0x00000010);
2211 else
2212 sc->sk_intrmask |= SK_Y2_INTRS2(0x00000400|0x00000100 |0x00000800|0x00001000);
2213 sc->sk_intrmask |= SK_Y2_IMR_BMU0x40000000;
2214 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask)(((sc)->sk_btag)->write_4(((sc)->sk_bhandle), ((0x000C
)), ((sc->sk_intrmask))))
;
2215
2216 ifp->if_flags |= IFF_RUNNING0x40;
2217 ifq_clr_oactive(&ifp->if_snd);
2218
2219 timeout_add_sec(&sc_if->sk_tick_ch, 1);
2220
2221 splx(s)spllower(s);
2222}
2223
2224void
2225msk_stop(struct sk_if_softc *sc_if, int softonly)
2226{
2227 struct sk_softc *sc = sc_if->sk_softc;
2228 struct ifnet *ifp = &sc_if->arpcom.ac_if;
2229 struct mbuf *m;
2230 bus_dmamap_t map;
2231 int i;
2232
2233 DPRINTFN(2, ("msk_stop\n"));
2234
2235 timeout_del(&sc_if->sk_tick_ch);
2236 timeout_del(&sc_if->sk_tick_rx);
2237
2238 ifp->if_flags &= ~IFF_RUNNING0x40;
2239 ifq_clr_oactive(&ifp->if_snd);
2240
2241 /* Stop transfer of Tx descriptors */
2242
2243 /* Stop transfer of Rx descriptors */
2244
2245 if (!softonly) {
2246 /* Turn off various components of this interface. */
2247 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET)sk_win_write_1(sc_if->sk_softc, 0x0C48 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000001)
;
2248 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET)sk_win_write_1(sc_if->sk_softc, 0x0D48 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000001)
;
2249 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE)sk_win_write_4(sc_if->sk_softc, 0x0434 + ((sc_if->sk_port
* (0 + 1)) * 0x80), (0x00000100|0x00000400| 0x00001000|0x00004000
| 0x00010000|0x00040000| 0x00100000))
;
2250 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF)sk_win_write_4(sc_if->sk_softc, 0x0828 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000001|0x00000004)
;
2251 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, SK_TXBMU_OFFLINE)sk_win_write_4(sc_if->sk_softc, 0x06B4 + ((sc_if->sk_port
* (1 + 1)) * 0x80), (0x00000100|0x00000400| 0x00001000|0x00004000
| 0x00010000|0x00040000| 0x00100000|0x00000040))
;
2252 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF)sk_win_write_4(sc_if->sk_softc, 0x0AA8 + ((sc_if->sk_port
* (1 + 1)) * 0x80), 0x00000001|0x00000004)
;
2253 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF)sk_win_write_1(sc_if->sk_softc, 0x0210 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x01)
;
2254 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP)sk_win_write_1(sc_if->sk_softc, 0x0C28 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x0002)
;
2255 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_STOP)sk_win_write_1(sc_if->sk_softc, 0x0D28 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x0002)
;
2256 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF)sk_win_write_1(sc_if->sk_softc, 0x0C3C + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x0001)
;
2257 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF)sk_win_write_1(sc_if->sk_softc, 0x0C3C + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x0004)
;
2258
2259 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001)sk_win_write_4(sc_if->sk_softc, 0x0450 + ((sc_if->sk_port
* (0 + 1)) * 0x80), 0x00000001)
;
2260 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001)sk_win_write_4(sc_if->sk_softc, 0x06D0 + ((sc_if->sk_port
* (1 + 1)) * 0x80), 0x00000001)
;
2261
2262 /* Disable interrupts */
2263 if (sc_if->sk_port == SK_PORT_A0)
2264 sc->sk_intrmask &= ~SK_Y2_INTRS1(0x00000004|0x00000001 |0x00000008|0x00000010);
2265 else
2266 sc->sk_intrmask &= ~SK_Y2_INTRS2(0x00000400|0x00000100 |0x00000800|0x00001000);
2267 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask)(((sc)->sk_btag)->write_4(((sc)->sk_bhandle), ((0x000C
)), ((sc->sk_intrmask))))
;
2268 }
2269
2270 /* Free RX and TX mbufs still in the queues. */
2271 for (i = 0; i < MSK_RX_RING_CNT512; i++) {
2272 m = sc_if->sk_cdata.sk_rx_mbuf[i];
2273 if (m == NULL((void *)0))
2274 continue;
2275
2276 map = sc_if->sk_cdata.sk_rx_maps[i];
2277 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (map
), (0), (map->dm_mapsize), (0x02))
2278 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (map
), (0), (map->dm_mapsize), (0x02))
;
2279 bus_dmamap_unload(sc->sc_dmatag, map)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), (
map))
;
2280
2281 m_freem(m);
2282
2283 sc_if->sk_cdata.sk_rx_mbuf[i] = NULL((void *)0);
2284 }
2285
2286 sc_if->sk_cdata.sk_rx_prod = 0;
2287 sc_if->sk_cdata.sk_rx_cons = 0;
2288
2289 for (i = 0; i < MSK_TX_RING_CNT512; i++) {
2290 m = sc_if->sk_cdata.sk_tx_mbuf[i];
2291 if (m == NULL((void *)0))
2292 continue;
2293
2294 map = sc_if->sk_cdata.sk_tx_maps[i];
2295 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (map
), (0), (map->dm_mapsize), (0x02))
2296 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmatag)->_dmamap_sync)((sc->sc_dmatag), (map
), (0), (map->dm_mapsize), (0x02))
;
2297 bus_dmamap_unload(sc->sc_dmatag, map)(*(sc->sc_dmatag)->_dmamap_unload)((sc->sc_dmatag), (
map))
;
2298
2299 m_freem(m);
2300
2301 sc_if->sk_cdata.sk_tx_mbuf[i] = NULL((void *)0);
2302 }
2303}
2304
2305const struct cfattach mskc_ca = {
2306 sizeof(struct sk_softc), mskc_probe, mskc_attach, mskc_detach,
2307 mskc_activate
2308};
2309
2310struct cfdriver mskc_cd = {
2311 NULL((void *)0), "mskc", DV_DULL
2312};
2313
2314const struct cfattach msk_ca = {
2315 sizeof(struct sk_if_softc), msk_probe, msk_attach, msk_detach,
2316 msk_activate
2317};
2318
2319struct cfdriver msk_cd = {
2320 NULL((void *)0), "msk", DV_IFNET
2321};
2322
2323#if NKSTAT1 > 0
2324static uint32_t
2325msk_mib_read32(struct sk_if_softc *sc_if, uint32_t r)
2326{
2327 uint16_t hi, lo, xx;
2328
2329 hi = SK_YU_READ_2(sc_if, r + 4)sk_win_read_2((sc_if)->sk_softc, (((r + 4)) + 0x2800 + (((
(sc_if))->sk_port) * (0x3800 - 0x2800))))
;
2330 for (;;) {
2331 /* XXX barriers? */
2332 lo = SK_YU_READ_2(sc_if, r)sk_win_read_2((sc_if)->sk_softc, (((r)) + 0x2800 + ((((sc_if
))->sk_port) * (0x3800 - 0x2800))))
;
2333 xx = SK_YU_READ_2(sc_if, r + 4)sk_win_read_2((sc_if)->sk_softc, (((r + 4)) + 0x2800 + (((
(sc_if))->sk_port) * (0x3800 - 0x2800))))
;
2334
2335 if (hi == xx)
2336 break;
2337
2338 hi = xx;
2339 }
2340
2341 return (((uint32_t)hi << 16) | (uint32_t) lo);
2342}
2343
2344static uint64_t
2345msk_mib_read64(struct sk_if_softc *sc_if, uint32_t r)
2346{
2347 uint32_t hi, lo, xx;
2348
2349 hi = msk_mib_read32(sc_if, r + 8);
2350 for (;;) {
2351 lo = msk_mib_read32(sc_if, r);
2352 xx = msk_mib_read32(sc_if, r + 8);
2353
2354 if (hi == xx)
2355 break;
2356
2357 hi = xx;
2358 }
2359
2360 return (((uint64_t)hi << 32) | (uint64_t)lo);
2361}
2362
2363void
2364msk_kstat_attach(struct sk_if_softc *sc_if)
2365{
2366 struct kstat *ks;
2367 struct kstat_kv *kvs;
2368 struct msk_kstat *mks;
2369 size_t i;
2370
2371 ks = kstat_create(sc_if->sk_dev.dv_xname, 0, "msk-mib", 0,
2372 KSTAT_T_KV1, 0);
2373 if (ks == NULL((void *)0)) {
2374 /* oh well */
2375 return;
2376 }
2377
2378 mks = malloc(sizeof(*mks), M_DEVBUF2, M_WAITOK0x0001);
2379 rw_init(&mks->lock, "mskstat")_rw_init_flags(&mks->lock, "mskstat", 0, ((void *)0));
2380 mks->ks = ks;
2381
2382 kvs = mallocarray(nitems(msk_mib)(sizeof((msk_mib)) / sizeof((msk_mib)[0])), sizeof(*kvs),
2383 M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
2384 for (i = 0; i < nitems(msk_mib)(sizeof((msk_mib)) / sizeof((msk_mib)[0])); i++) {
2385 const struct msk_mib *m = &msk_mib[i];
2386 kstat_kv_unit_init(&kvs[i], m->name, m->type, m->unit);
2387 }
2388
2389 ks->ks_softc = sc_if;
2390 ks->ks_data = kvs;
2391 ks->ks_datalen = nitems(msk_mib)(sizeof((msk_mib)) / sizeof((msk_mib)[0])) * sizeof(*kvs);
2392 ks->ks_read = msk_kstat_read;
2393 kstat_set_wlock(ks, &mks->lock);
2394
2395 kstat_install(ks);
2396
2397 sc_if->sk_kstat = mks;
2398}
2399
2400void
2401msk_kstat_detach(struct sk_if_softc *sc_if)
2402{
2403 struct msk_kstat *mks = sc_if->sk_kstat;
2404 struct kstat_kv *kvs;
2405 size_t kvslen;
2406
2407 if (mks == NULL((void *)0))
2408 return;
2409
2410 sc_if->sk_kstat = NULL((void *)0);
2411
2412 kvs = mks->ks->ks_data;
2413 kvslen = mks->ks->ks_datalen;
2414
2415 kstat_destroy(mks->ks);
2416 free(kvs, M_DEVBUF2, kvslen);
2417 free(mks, M_DEVBUF2, sizeof(*mks));
2418}
2419
2420int
2421msk_kstat_read(struct kstat *ks)
2422{
2423 struct sk_if_softc *sc_if = ks->ks_softc;
2424 struct kstat_kv *kvs = ks->ks_data;
2425 size_t i;
2426
2427 nanouptime(&ks->ks_updated);
2428
2429 for (i = 0; i < nitems(msk_mib)(sizeof((msk_mib)) / sizeof((msk_mib)[0])); i++) {
2430 const struct msk_mib *m = &msk_mib[i];
2431
2432 switch (m->type) {
2433 case KSTAT_KV_T_COUNTER32:
2434 kstat_kv_u32(&kvs[i])(&kvs[i])->kv_v.v_u32 = msk_mib_read32(sc_if, m->reg);
2435 break;
2436 case KSTAT_KV_T_COUNTER64:
2437 kstat_kv_u64(&kvs[i])(&kvs[i])->kv_v.v_u64 = msk_mib_read64(sc_if, m->reg);
2438 break;
2439 default:
2440 panic("unexpected msk_mib type");
2441 /* NOTREACHED */
2442 }
2443 }
2444
2445 return (0);
2446}
2447#endif /* NKSTAT */
2448
2449#ifdef MSK_DEBUG
2450void
2451msk_dump_txdesc(struct msk_tx_desc *le, int idx)
2452{
2453#define DESC_PRINT(X) \
2454 if (X) \
2455 printf("txdesc[%d]." #X "=%#x\n", \
2456 idx, X);
2457
2458 DESC_PRINT(letoh32(le->sk_addr)((__uint32_t)(le->sk_addr)));
2459 DESC_PRINT(letoh16(le->sk_len)((__uint16_t)(le->sk_len)));
2460 DESC_PRINT(le->sk_ctl);
2461 DESC_PRINT(le->sk_opcode);
2462#undef DESC_PRINT
2463}
2464
2465void
2466msk_dump_bytes(const char *data, int len)
2467{
2468 int c, i, j;
2469
2470 for (i = 0; i < len; i += 16) {
2471 printf("%08x ", i);
2472 c = len - i;
2473 if (c > 16) c = 16;
2474
2475 for (j = 0; j < c; j++) {
2476 printf("%02x ", data[i + j] & 0xff);
2477 if ((j & 0xf) == 7 && j > 0)
2478 printf(" ");
2479 }
2480
2481 for (; j < 16; j++)
2482 printf(" ");
2483 printf(" ");
2484
2485 for (j = 0; j < c; j++) {
2486 int ch = data[i + j] & 0xff;
2487 printf("%c", ' ' <= ch && ch <= '~' ? ch : ' ');
2488 }
2489
2490 printf("\n");
2491
2492 if (c < 16)
2493 break;
2494 }
2495}
2496
2497void
2498msk_dump_mbuf(struct mbuf *m)
2499{
2500 int count = m->m_pkthdrM_dat.MH.MH_pkthdr.len;
2501
2502 printf("m=%#lx, m->m_pkthdr.len=%#d\n", m, m->m_pkthdrM_dat.MH.MH_pkthdr.len);
2503
2504 while (count > 0 && m) {
2505 printf("m=%#lx, m->m_data=%#lx, m->m_len=%d\n",
2506 m, m->m_datam_hdr.mh_data, m->m_lenm_hdr.mh_len);
2507 msk_dump_bytes(mtod(m, char *)((char *)((m)->m_hdr.mh_data)), m->m_lenm_hdr.mh_len);
2508
2509 count -= m->m_lenm_hdr.mh_len;
2510 m = m->m_nextm_hdr.mh_next;
2511 }
2512}
2513#endif