File: | dev/pci/if_vic.c |
Warning: | line 755, column 4 Value stored to 'rxd' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: if_vic.c,v 1.105 2023/11/10 15:51:24 bluhm Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2006 Reyk Floeter <reyk@openbsd.org> |
5 | * Copyright (c) 2006 David Gwynne <dlg@openbsd.org> |
6 | * |
7 | * Permission to use, copy, modify, and distribute this software for any |
8 | * purpose with or without fee is hereby granted, provided that the above |
9 | * copyright notice and this permission notice appear in all copies. |
10 | * |
11 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
12 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
13 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
14 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
15 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
16 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
17 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
18 | */ |
19 | |
20 | /* |
21 | * Driver for the VMware Virtual NIC ("vmxnet") |
22 | */ |
23 | |
24 | #include "bpfilter.h" |
25 | |
26 | #include <sys/param.h> |
27 | #include <sys/systm.h> |
28 | #include <sys/sockio.h> |
29 | #include <sys/mbuf.h> |
30 | #include <sys/kernel.h> |
31 | #include <sys/socket.h> |
32 | #include <sys/malloc.h> |
33 | #include <sys/timeout.h> |
34 | #include <sys/device.h> |
35 | |
36 | #include <machine/bus.h> |
37 | #include <machine/intr.h> |
38 | |
39 | #include <net/if.h> |
40 | #include <net/if_media.h> |
41 | |
42 | #if NBPFILTER1 > 0 |
43 | #include <net/bpf.h> |
44 | #endif |
45 | |
46 | #include <netinet/in.h> |
47 | #include <netinet/if_ether.h> |
48 | |
49 | #include <dev/pci/pcireg.h> |
50 | #include <dev/pci/pcivar.h> |
51 | #include <dev/pci/pcidevs.h> |
52 | |
53 | #define VIC_PCI_BAR0x10 PCI_MAPREG_START0x10 /* Base Address Register */ |
54 | |
55 | #define VIC_LANCE_SIZE0x20 0x20 |
56 | #define VIC_MORPH_SIZE0x04 0x04 |
57 | #define VIC_MORPH_MASK0xffff 0xffff |
58 | #define VIC_MORPH_LANCE0x2934 0x2934 |
59 | #define VIC_MORPH_VMXNET0x4392 0x4392 |
60 | #define VIC_VMXNET_SIZE0x40 0x40 |
61 | #define VIC_LANCE_MINLEN(0x20 + 0x04 + 0x40) (VIC_LANCE_SIZE0x20 + VIC_MORPH_SIZE0x04 + \ |
62 | VIC_VMXNET_SIZE0x40) |
63 | |
64 | #define VIC_MAGIC0xbabe864f 0xbabe864f |
65 | |
66 | /* Register address offsets */ |
67 | #define VIC_DATA_ADDR0x0000 0x0000 /* Shared data address */ |
68 | #define VIC_DATA_LENGTH0x0004 0x0004 /* Shared data length */ |
69 | #define VIC_Tx_ADDR0x0008 0x0008 /* Tx pointer address */ |
70 | |
71 | /* Command register */ |
72 | #define VIC_CMD0x000c 0x000c /* Command register */ |
73 | #define VIC_CMD_INTR_ACK0x0001 0x0001 /* Acknowledge interrupt */ |
74 | #define VIC_CMD_MCASTFIL0x0002 0x0002 /* Multicast address filter */ |
75 | #define VIC_CMD_MCASTFIL_LENGTH2 2 |
76 | #define VIC_CMD_IFF0x0004 0x0004 /* Interface flags */ |
77 | #define VIC_CMD_IFF_PROMISC0x0001 0x0001 /* Promiscuous enabled */ |
78 | #define VIC_CMD_IFF_BROADCAST0x0002 0x0002 /* Broadcast enabled */ |
79 | #define VIC_CMD_IFF_MULTICAST0x0004 0x0004 /* Multicast enabled */ |
80 | #define VIC_CMD_INTR_DISABLE0x0020 0x0020 /* Disable interrupts */ |
81 | #define VIC_CMD_INTR_ENABLE0x0040 0x0040 /* Enable interrupts */ |
82 | #define VIC_CMD_Tx_DONE0x0100 0x0100 /* Tx done register */ |
83 | #define VIC_CMD_NUM_Rx_BUF0x0200 0x0200 /* Number of Rx buffers */ |
84 | #define VIC_CMD_NUM_Tx_BUF0x0400 0x0400 /* Number of Tx buffers */ |
85 | #define VIC_CMD_NUM_PINNED_BUF0x0800 0x0800 /* Number of pinned buffers */ |
86 | #define VIC_CMD_HWCAP0x1000 0x1000 /* Capability register */ |
87 | #define VIC_CMD_HWCAP_SG(1<<0) (1<<0) /* Scatter-gather transmits */ |
88 | #define VIC_CMD_HWCAP_CSUM_IPv4(1<<1) (1<<1) /* TCP/UDP cksum */ |
89 | #define VIC_CMD_HWCAP_CSUM_ALL(1<<3) (1<<3) /* Hardware cksum */ |
90 | #define VIC_CMD_HWCAP_CSUM((1<<1) | (1<<3)) \ |
91 | (VIC_CMD_HWCAP_CSUM_IPv4(1<<1) | VIC_CMD_HWCAP_CSUM_ALL(1<<3)) |
92 | #define VIC_CMD_HWCAP_DMA_HIGH(1<<4) (1<<4) /* High DMA mapping */ |
93 | #define VIC_CMD_HWCAP_TOE(1<<5) (1<<5) /* TCP offload engine */ |
94 | #define VIC_CMD_HWCAP_TSO(1<<6) (1<<6) /* TCP segmentation offload */ |
95 | #define VIC_CMD_HWCAP_TSO_SW(1<<7) (1<<7) /* Software TCP segmentation */ |
96 | #define VIC_CMD_HWCAP_VPROM(1<<8) (1<<8) /* Virtual PROM available */ |
97 | #define VIC_CMD_HWCAP_VLAN_Tx(1<<9) (1<<9) /* Hardware VLAN MTU Rx */ |
98 | #define VIC_CMD_HWCAP_VLAN_Rx(1<<10) (1<<10) /* Hardware VLAN MTU Tx */ |
99 | #define VIC_CMD_HWCAP_VLAN_SW(1<<11) (1<<11) /* Software VLAN MTU */ |
100 | #define VIC_CMD_HWCAP_VLAN((1<<9) | (1<<10) | (1<<11)) \ |
101 | (VIC_CMD_HWCAP_VLAN_Tx(1<<9) | VIC_CMD_HWCAP_VLAN_Rx(1<<10) | \ |
102 | VIC_CMD_HWCAP_VLAN_SW(1<<11)) |
103 | #define VIC_CMD_HWCAP_BITS"\20\01SG\02CSUM4\03CSUM\04HDMA\05TOE\06TSO" "\07TSOSW\10VPROM\13VLANTx\14VLANRx\15VLANSW" \ |
104 | "\20\01SG\02CSUM4\03CSUM\04HDMA\05TOE\06TSO" \ |
105 | "\07TSOSW\10VPROM\13VLANTx\14VLANRx\15VLANSW" |
106 | #define VIC_CMD_FEATURE0x2000 0x2000 /* Additional feature register */ |
107 | #define VIC_CMD_FEATURE_0_Tx(1<<0) (1<<0) |
108 | #define VIC_CMD_FEATURE_TSO(1<<1) (1<<1) |
109 | |
110 | #define VIC_LLADDR0x0010 0x0010 /* MAC address register */ |
111 | #define VIC_VERSION_MINOR0x0018 0x0018 /* Minor version register */ |
112 | #define VIC_VERSION_MAJOR0x001c 0x001c /* Major version register */ |
113 | #define VIC_VERSION_MAJOR_M0xffff0000 0xffff0000 |
114 | |
115 | /* Status register */ |
116 | #define VIC_STATUS0x0020 0x0020 |
117 | #define VIC_STATUS_CONNECTED(1<<0) (1<<0) |
118 | #define VIC_STATUS_ENABLED(1<<1) (1<<1) |
119 | |
120 | #define VIC_TOE_ADDR0x0024 0x0024 /* TCP offload address */ |
121 | |
122 | /* Virtual PROM address */ |
123 | #define VIC_VPROM0x0028 0x0028 |
124 | #define VIC_VPROM_LENGTH6 6 |
125 | |
126 | /* Shared DMA data structures */ |
127 | |
128 | struct vic_sg { |
129 | u_int32_t sg_addr_low; |
130 | u_int16_t sg_addr_high; |
131 | u_int16_t sg_length; |
132 | } __packed__attribute__((__packed__)); |
133 | |
134 | #define VIC_SG_MAX6 6 |
135 | #define VIC_SG_ADDR_MACH0 0 |
136 | #define VIC_SG_ADDR_PHYS1 1 |
137 | #define VIC_SG_ADDR_VIRT3 3 |
138 | |
139 | struct vic_sgarray { |
140 | u_int16_t sa_addr_type; |
141 | u_int16_t sa_length; |
142 | struct vic_sg sa_sg[VIC_SG_MAX6]; |
143 | } __packed__attribute__((__packed__)); |
144 | |
145 | struct vic_rxdesc { |
146 | u_int64_t rx_physaddr; |
147 | u_int32_t rx_buflength; |
148 | u_int32_t rx_length; |
149 | u_int16_t rx_owner; |
150 | u_int16_t rx_flags; |
151 | u_int32_t rx_priv; |
152 | } __packed__attribute__((__packed__)); |
153 | |
154 | #define VIC_RX_FLAGS_CSUMHW_OK0x0001 0x0001 |
155 | |
156 | struct vic_txdesc { |
157 | u_int16_t tx_flags; |
158 | u_int16_t tx_owner; |
159 | u_int32_t tx_priv; |
160 | u_int32_t tx_tsomss; |
161 | struct vic_sgarray tx_sa; |
162 | } __packed__attribute__((__packed__)); |
163 | |
164 | #define VIC_TX_FLAGS_KEEP0x0001 0x0001 |
165 | #define VIC_TX_FLAGS_TXURN0x0002 0x0002 |
166 | #define VIC_TX_FLAGS_CSUMHW0x0004 0x0004 |
167 | #define VIC_TX_FLAGS_TSO0x0008 0x0008 |
168 | #define VIC_TX_FLAGS_PINNED0x0010 0x0010 |
169 | #define VIC_TX_FLAGS_QRETRY0x1000 0x1000 |
170 | |
171 | struct vic_stats { |
172 | u_int32_t vs_tx_count; |
173 | u_int32_t vs_tx_packets; |
174 | u_int32_t vs_tx_0copy; |
175 | u_int32_t vs_tx_copy; |
176 | u_int32_t vs_tx_maxpending; |
177 | u_int32_t vs_tx_stopped; |
178 | u_int32_t vs_tx_overrun; |
179 | u_int32_t vs_intr; |
180 | u_int32_t vs_rx_packets; |
181 | u_int32_t vs_rx_underrun; |
182 | } __packed__attribute__((__packed__)); |
183 | |
184 | #define VIC_NRXRINGS2 2 |
185 | |
186 | struct vic_data { |
187 | u_int32_t vd_magic; |
188 | |
189 | struct { |
190 | u_int32_t length; |
191 | u_int32_t nextidx; |
192 | } vd_rx[VIC_NRXRINGS2]; |
193 | |
194 | u_int32_t vd_irq; |
195 | u_int32_t vd_iff; |
196 | |
197 | u_int32_t vd_mcastfil[VIC_CMD_MCASTFIL_LENGTH2]; |
198 | |
199 | u_int32_t vd_reserved1[1]; |
200 | |
201 | u_int32_t vd_tx_length; |
202 | u_int32_t vd_tx_curidx; |
203 | u_int32_t vd_tx_nextidx; |
204 | u_int32_t vd_tx_stopped; |
205 | u_int32_t vd_tx_triggerlvl; |
206 | u_int32_t vd_tx_queued; |
207 | u_int32_t vd_tx_minlength; |
208 | |
209 | u_int32_t vd_reserved2[6]; |
210 | |
211 | u_int32_t vd_rx_saved_nextidx[VIC_NRXRINGS2]; |
212 | u_int32_t vd_tx_saved_nextidx; |
213 | |
214 | u_int32_t vd_length; |
215 | u_int32_t vd_rx_offset[VIC_NRXRINGS2]; |
216 | u_int32_t vd_tx_offset; |
217 | u_int32_t vd_debug; |
218 | u_int32_t vd_tx_physaddr; |
219 | u_int32_t vd_tx_physaddr_length; |
220 | u_int32_t vd_tx_maxlength; |
221 | |
222 | struct vic_stats vd_stats; |
223 | } __packed__attribute__((__packed__)); |
224 | |
225 | #define VIC_OWNER_DRIVER0 0 |
226 | #define VIC_OWNER_DRIVER_PEND1 1 |
227 | #define VIC_OWNER_NIC2 2 |
228 | #define VIC_OWNER_NIC_PEND3 3 |
229 | |
230 | #define VIC_JUMBO_FRAMELEN9018 9018 |
231 | #define VIC_JUMBO_MTU(9018 - ((6 * 2) + 2) - 4) (VIC_JUMBO_FRAMELEN9018 - ETHER_HDR_LEN((6 * 2) + 2) - ETHER_CRC_LEN4) |
232 | |
233 | #define VIC_NBUF100 100 |
234 | #define VIC_NBUF_MAX128 128 |
235 | #define VIC_MAX_SCATTER1 1 /* 8? */ |
236 | #define VIC_QUEUE_SIZE128 VIC_NBUF_MAX128 |
237 | #define VIC_INC(_x, _y)(_x) = ((_x) + 1) % (_y) (_x) = ((_x) + 1) % (_y) |
238 | #define VIC_TX_TIMEOUT5 5 |
239 | |
240 | #define VIC_MIN_FRAMELEN(64 - 4) (ETHER_MIN_LEN64 - ETHER_CRC_LEN4) |
241 | |
242 | #define VIC_TXURN_WARN(_sc)((_sc)->sc_txpending >= ((_sc)->sc_ntxbuf - 5)) ((_sc)->sc_txpending >= ((_sc)->sc_ntxbuf - 5)) |
243 | #define VIC_TXURN(_sc)((_sc)->sc_txpending >= (_sc)->sc_ntxbuf) ((_sc)->sc_txpending >= (_sc)->sc_ntxbuf) |
244 | |
245 | struct vic_rxbuf { |
246 | bus_dmamap_t rxb_dmamap; |
247 | struct mbuf *rxb_m; |
248 | }; |
249 | |
250 | struct vic_txbuf { |
251 | bus_dmamap_t txb_dmamap; |
252 | struct mbuf *txb_m; |
253 | }; |
254 | |
255 | struct vic_softc { |
256 | struct device sc_dev; |
257 | |
258 | pci_chipset_tag_t sc_pc; |
259 | pcitag_t sc_tag; |
260 | |
261 | bus_space_tag_t sc_iot; |
262 | bus_space_handle_t sc_ioh; |
263 | bus_size_t sc_ios; |
264 | bus_dma_tag_t sc_dmat; |
265 | |
266 | void *sc_ih; |
267 | |
268 | struct timeout sc_tick; |
269 | |
270 | struct arpcom sc_ac; |
271 | struct ifmedia sc_media; |
272 | |
273 | u_int32_t sc_nrxbuf; |
274 | u_int32_t sc_ntxbuf; |
275 | u_int32_t sc_cap; |
276 | u_int32_t sc_feature; |
277 | u_int8_t sc_lladdr[ETHER_ADDR_LEN6]; |
278 | |
279 | bus_dmamap_t sc_dma_map; |
280 | bus_dma_segment_t sc_dma_seg; |
281 | size_t sc_dma_size; |
282 | caddr_t sc_dma_kva; |
283 | #define VIC_DMA_DVA(_sc)((_sc)->sc_dma_map->dm_segs[0].ds_addr) ((_sc)->sc_dma_map->dm_segs[0].ds_addr) |
284 | #define VIC_DMA_KVA(_sc)((void *)(_sc)->sc_dma_kva) ((void *)(_sc)->sc_dma_kva) |
285 | |
286 | struct vic_data *sc_data; |
287 | |
288 | struct { |
289 | struct if_rxring ring; |
290 | struct vic_rxbuf *bufs; |
291 | struct vic_rxdesc *slots; |
292 | int end; |
293 | u_int pktlen; |
294 | } sc_rxq[VIC_NRXRINGS2]; |
295 | |
296 | struct vic_txbuf *sc_txbuf; |
297 | struct vic_txdesc *sc_txq; |
298 | volatile u_int sc_txpending; |
299 | }; |
300 | |
301 | struct cfdriver vic_cd = { |
302 | NULL((void *)0), "vic", DV_IFNET |
303 | }; |
304 | |
305 | int vic_match(struct device *, void *, void *); |
306 | void vic_attach(struct device *, struct device *, void *); |
307 | |
308 | const struct cfattach vic_ca = { |
309 | sizeof(struct vic_softc), vic_match, vic_attach |
310 | }; |
311 | |
312 | int vic_intr(void *); |
313 | |
314 | int vic_query(struct vic_softc *); |
315 | int vic_alloc_data(struct vic_softc *); |
316 | int vic_init_data(struct vic_softc *sc); |
317 | int vic_uninit_data(struct vic_softc *sc); |
318 | |
319 | u_int32_t vic_read(struct vic_softc *, bus_size_t); |
320 | void vic_write(struct vic_softc *, bus_size_t, u_int32_t); |
321 | |
322 | u_int32_t vic_read_cmd(struct vic_softc *, u_int32_t); |
323 | |
324 | int vic_alloc_dmamem(struct vic_softc *); |
325 | void vic_free_dmamem(struct vic_softc *); |
326 | |
327 | void vic_link_state(struct vic_softc *); |
328 | void vic_rx_fill(struct vic_softc *, int); |
329 | void vic_rx_proc(struct vic_softc *, int); |
330 | void vic_tx_proc(struct vic_softc *); |
331 | void vic_iff(struct vic_softc *); |
332 | void vic_getlladdr(struct vic_softc *); |
333 | void vic_setlladdr(struct vic_softc *); |
334 | int vic_media_change(struct ifnet *); |
335 | void vic_media_status(struct ifnet *, struct ifmediareq *); |
336 | void vic_start(struct ifnet *); |
337 | int vic_load_txb(struct vic_softc *, struct vic_txbuf *, |
338 | struct mbuf *); |
339 | void vic_watchdog(struct ifnet *); |
340 | int vic_ioctl(struct ifnet *, u_long, caddr_t); |
341 | int vic_rxrinfo(struct vic_softc *, struct if_rxrinfo *); |
342 | void vic_init(struct ifnet *); |
343 | void vic_stop(struct ifnet *); |
344 | void vic_tick(void *); |
345 | |
346 | #define DEVNAME(_s)((_s)->sc_dev.dv_xname) ((_s)->sc_dev.dv_xname) |
347 | |
348 | struct mbuf *vic_alloc_mbuf(struct vic_softc *, bus_dmamap_t, u_int); |
349 | |
350 | const struct pci_matchid vic_devices[] = { |
351 | { PCI_VENDOR_VMWARE0x15ad, PCI_PRODUCT_VMWARE_NET0x0720 } |
352 | }; |
353 | |
354 | int |
355 | vic_match(struct device *parent, void *match, void *aux) |
356 | { |
357 | struct pci_attach_args *pa = aux; |
358 | pcireg_t memtype; |
359 | bus_size_t pcisize; |
360 | bus_addr_t pciaddr; |
361 | |
362 | switch (pa->pa_id) { |
363 | case PCI_ID_CODE(PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET)((((0x15ad) & 0xffff) << 0) | (((0x0720) & 0xffff ) << 16)): |
364 | return (1); |
365 | |
366 | case PCI_ID_CODE(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCNET_PCI)((((0x1022) & 0xffff) << 0) | (((0x2000) & 0xffff ) << 16)): |
367 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, VIC_PCI_BAR0x10); |
368 | if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, VIC_PCI_BAR0x10, |
369 | memtype, &pciaddr, &pcisize, NULL((void *)0)) != 0) |
370 | break; |
371 | |
372 | if (pcisize > VIC_LANCE_MINLEN(0x20 + 0x04 + 0x40)) |
373 | return (2); |
374 | |
375 | break; |
376 | } |
377 | |
378 | return (0); |
379 | } |
380 | |
381 | void |
382 | vic_attach(struct device *parent, struct device *self, void *aux) |
383 | { |
384 | struct vic_softc *sc = (struct vic_softc *)self; |
385 | struct pci_attach_args *pa = aux; |
386 | bus_space_handle_t ioh; |
387 | pcireg_t r; |
388 | pci_intr_handle_t ih; |
389 | struct ifnet *ifp; |
390 | |
391 | sc->sc_pc = pa->pa_pc; |
392 | sc->sc_tag = pa->pa_tag; |
393 | sc->sc_dmat = pa->pa_dmat; |
394 | |
395 | r = pci_mapreg_type(sc->sc_pc, sc->sc_tag, VIC_PCI_BAR0x10); |
396 | if (pci_mapreg_map(pa, VIC_PCI_BAR0x10, r, 0, &sc->sc_iot, |
397 | &ioh, NULL((void *)0), &sc->sc_ios, 0) != 0) { |
398 | printf(": unable to map system interface register\n"); |
399 | return; |
400 | } |
401 | |
402 | switch (pa->pa_id) { |
403 | case PCI_ID_CODE(PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET)((((0x15ad) & 0xffff) << 0) | (((0x0720) & 0xffff ) << 16)): |
404 | if (bus_space_subregion(sc->sc_iot, ioh, 0, sc->sc_ios, |
405 | &sc->sc_ioh) != 0) { |
406 | printf(": unable to map register window\n"); |
407 | goto unmap; |
408 | } |
409 | break; |
410 | |
411 | case PCI_ID_CODE(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCNET_PCI)((((0x1022) & 0xffff) << 0) | (((0x2000) & 0xffff ) << 16)): |
412 | if (bus_space_subregion(sc->sc_iot, ioh, |
413 | VIC_LANCE_SIZE0x20 + VIC_MORPH_SIZE0x04, VIC_VMXNET_SIZE0x40, |
414 | &sc->sc_ioh) != 0) { |
415 | printf(": unable to map register window\n"); |
416 | goto unmap; |
417 | } |
418 | |
419 | bus_space_barrier(sc->sc_iot, ioh, VIC_LANCE_SIZE0x20, 4, |
420 | BUS_SPACE_BARRIER_READ0x01); |
421 | r = bus_space_read_4(sc->sc_iot, ioh, VIC_LANCE_SIZE)((sc->sc_iot)->read_4((ioh), (0x20))); |
422 | |
423 | if ((r & VIC_MORPH_MASK0xffff) == VIC_MORPH_VMXNET0x4392) |
424 | break; |
425 | if ((r & VIC_MORPH_MASK0xffff) != VIC_MORPH_LANCE0x2934) { |
426 | printf(": unexpected morph value (0x%08x)\n", r); |
427 | goto unmap; |
428 | } |
429 | |
430 | r &= ~VIC_MORPH_MASK0xffff; |
431 | r |= VIC_MORPH_VMXNET0x4392; |
432 | |
433 | bus_space_write_4(sc->sc_iot, ioh, VIC_LANCE_SIZE, r)((sc->sc_iot)->write_4((ioh), (0x20), (r))); |
434 | bus_space_barrier(sc->sc_iot, ioh, VIC_LANCE_SIZE0x20, 4, |
435 | BUS_SPACE_BARRIER_WRITE0x02); |
436 | |
437 | bus_space_barrier(sc->sc_iot, ioh, VIC_LANCE_SIZE0x20, 4, |
438 | BUS_SPACE_BARRIER_READ0x01); |
439 | r = bus_space_read_4(sc->sc_iot, ioh, VIC_LANCE_SIZE)((sc->sc_iot)->read_4((ioh), (0x20))); |
440 | |
441 | if ((r & VIC_MORPH_MASK0xffff) != VIC_MORPH_VMXNET0x4392) { |
442 | printf(": unable to morph vlance chip\n"); |
443 | goto unmap; |
444 | } |
445 | |
446 | break; |
447 | } |
448 | |
449 | if (pci_intr_map(pa, &ih) != 0) { |
450 | printf(": unable to map interrupt\n"); |
451 | goto unmap; |
452 | } |
453 | |
454 | sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET0x4, |
455 | vic_intr, sc, DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
456 | if (sc->sc_ih == NULL((void *)0)) { |
457 | printf(": unable to establish interrupt\n"); |
458 | goto unmap; |
459 | } |
460 | |
461 | if (vic_query(sc) != 0) { |
462 | /* error printed by vic_query */ |
463 | goto unmap; |
464 | } |
465 | |
466 | if (vic_alloc_data(sc) != 0) { |
467 | /* error printed by vic_alloc */ |
468 | goto unmap; |
469 | } |
470 | |
471 | timeout_set(&sc->sc_tick, vic_tick, sc); |
472 | |
473 | bcopy(sc->sc_lladdr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN6); |
474 | |
475 | ifp = &sc->sc_ac.ac_if; |
476 | ifp->if_softc = sc; |
477 | ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000; |
478 | ifp->if_ioctl = vic_ioctl; |
479 | ifp->if_start = vic_start; |
480 | ifp->if_watchdog = vic_watchdog; |
481 | ifp->if_hardmtu = VIC_JUMBO_MTU(9018 - ((6 * 2) + 2) - 4); |
482 | strlcpy(ifp->if_xname, DEVNAME(sc)((sc)->sc_dev.dv_xname), IFNAMSIZ16); |
483 | ifq_init_maxlen(&ifp->if_snd, sc->sc_ntxbuf - 1); |
484 | |
485 | ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010; |
486 | |
487 | #if 0 |
488 | /* XXX interface capabilities */ |
489 | if (sc->sc_cap & VIC_CMD_HWCAP_VLAN((1<<9) | (1<<10) | (1<<11))) |
490 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020; |
491 | if (sc->sc_cap & VIC_CMD_HWCAP_CSUM((1<<1) | (1<<3))) |
492 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_CSUM_IPv40x00000001 | IFCAP_CSUM_TCPv40x00000002 | |
493 | IFCAP_CSUM_UDPv40x00000004; |
494 | #endif |
495 | |
496 | ifmedia_init(&sc->sc_media, 0, vic_media_change, vic_media_status); |
497 | ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0)); |
498 | ifmedia_set(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL); |
499 | |
500 | if_attach(ifp); |
501 | ether_ifattach(ifp); |
502 | |
503 | printf(": %s, address %s\n", pci_intr_string(pa->pa_pc, ih), |
504 | ether_sprintf(sc->sc_lladdr)); |
505 | |
506 | #ifdef VIC_DEBUG |
507 | printf("%s: feature 0x%8x, cap 0x%8x, rx/txbuf %d/%d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
508 | sc->sc_feature, sc->sc_cap, sc->sc_nrxbuf, sc->sc_ntxbuf); |
509 | #endif |
510 | |
511 | return; |
512 | |
513 | unmap: |
514 | bus_space_unmap(sc->sc_iot, ioh, sc->sc_ios); |
515 | sc->sc_ios = 0; |
516 | } |
517 | |
518 | int |
519 | vic_query(struct vic_softc *sc) |
520 | { |
521 | u_int32_t major, minor; |
522 | |
523 | major = vic_read(sc, VIC_VERSION_MAJOR0x001c); |
524 | minor = vic_read(sc, VIC_VERSION_MINOR0x0018); |
525 | |
526 | /* Check for a supported version */ |
527 | if ((major & VIC_VERSION_MAJOR_M0xffff0000) != |
528 | (VIC_MAGIC0xbabe864f & VIC_VERSION_MAJOR_M0xffff0000)) { |
529 | printf(": magic mismatch\n"); |
530 | return (1); |
531 | } |
532 | |
533 | if (VIC_MAGIC0xbabe864f > major || VIC_MAGIC0xbabe864f < minor) { |
534 | printf(": unsupported version (%X)\n", |
535 | major & ~VIC_VERSION_MAJOR_M0xffff0000); |
536 | return (1); |
537 | } |
538 | |
539 | sc->sc_nrxbuf = vic_read_cmd(sc, VIC_CMD_NUM_Rx_BUF0x0200); |
540 | sc->sc_ntxbuf = vic_read_cmd(sc, VIC_CMD_NUM_Tx_BUF0x0400); |
541 | sc->sc_feature = vic_read_cmd(sc, VIC_CMD_FEATURE0x2000); |
542 | sc->sc_cap = vic_read_cmd(sc, VIC_CMD_HWCAP0x1000); |
543 | |
544 | vic_getlladdr(sc); |
545 | |
546 | if (sc->sc_nrxbuf > VIC_NBUF_MAX128 || sc->sc_nrxbuf == 0) |
547 | sc->sc_nrxbuf = VIC_NBUF100; |
548 | if (sc->sc_ntxbuf > VIC_NBUF_MAX128 || sc->sc_ntxbuf == 0) |
549 | sc->sc_ntxbuf = VIC_NBUF100; |
550 | |
551 | return (0); |
552 | } |
553 | |
554 | int |
555 | vic_alloc_data(struct vic_softc *sc) |
556 | { |
557 | u_int8_t *kva; |
558 | u_int offset; |
559 | struct vic_rxdesc *rxd; |
560 | int i, q; |
561 | |
562 | sc->sc_rxq[0].pktlen = MCLBYTES(1 << 11); |
563 | sc->sc_rxq[1].pktlen = 4096; |
564 | |
565 | for (q = 0; q < VIC_NRXRINGS2; q++) { |
566 | sc->sc_rxq[q].bufs = mallocarray(sc->sc_nrxbuf, |
567 | sizeof(struct vic_rxbuf), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008); |
568 | if (sc->sc_rxq[q].bufs == NULL((void *)0)) { |
569 | printf(": unable to allocate rxbuf for ring %d\n", q); |
570 | goto freerx; |
571 | } |
572 | } |
573 | |
574 | sc->sc_txbuf = mallocarray(sc->sc_ntxbuf, sizeof(struct vic_txbuf), |
575 | M_DEVBUF2, M_NOWAIT0x0002); |
576 | if (sc->sc_txbuf == NULL((void *)0)) { |
577 | printf(": unable to allocate txbuf\n"); |
578 | goto freerx; |
579 | } |
580 | |
581 | sc->sc_dma_size = sizeof(struct vic_data) + |
582 | (sc->sc_nrxbuf * VIC_NRXRINGS2) * sizeof(struct vic_rxdesc) + |
583 | sc->sc_ntxbuf * sizeof(struct vic_txdesc); |
584 | |
585 | if (vic_alloc_dmamem(sc) != 0) { |
586 | printf(": unable to allocate dma region\n"); |
587 | goto freetx; |
588 | } |
589 | kva = VIC_DMA_KVA(sc)((void *)(sc)->sc_dma_kva); |
590 | |
591 | /* set up basic vic data */ |
592 | sc->sc_data = VIC_DMA_KVA(sc)((void *)(sc)->sc_dma_kva); |
593 | |
594 | sc->sc_data->vd_magic = VIC_MAGIC0xbabe864f; |
595 | sc->sc_data->vd_length = sc->sc_dma_size; |
596 | |
597 | offset = sizeof(struct vic_data); |
598 | |
599 | /* set up the rx rings */ |
600 | |
601 | for (q = 0; q < VIC_NRXRINGS2; q++) { |
602 | sc->sc_rxq[q].slots = (struct vic_rxdesc *)&kva[offset]; |
603 | sc->sc_data->vd_rx_offset[q] = offset; |
604 | sc->sc_data->vd_rx[q].length = sc->sc_nrxbuf; |
605 | |
606 | for (i = 0; i < sc->sc_nrxbuf; i++) { |
607 | rxd = &sc->sc_rxq[q].slots[i]; |
608 | |
609 | rxd->rx_physaddr = 0; |
610 | rxd->rx_buflength = 0; |
611 | rxd->rx_length = 0; |
612 | rxd->rx_owner = VIC_OWNER_DRIVER0; |
613 | |
614 | offset += sizeof(struct vic_rxdesc); |
615 | } |
616 | } |
617 | |
618 | /* set up the tx ring */ |
619 | sc->sc_txq = (struct vic_txdesc *)&kva[offset]; |
620 | |
621 | sc->sc_data->vd_tx_offset = offset; |
622 | sc->sc_data->vd_tx_length = sc->sc_ntxbuf; |
623 | |
624 | return (0); |
625 | freetx: |
626 | free(sc->sc_txbuf, M_DEVBUF2, 0); |
627 | q = VIC_NRXRINGS2; |
628 | freerx: |
629 | while (q--) |
630 | free(sc->sc_rxq[q].bufs, M_DEVBUF2, 0); |
631 | |
632 | return (1); |
633 | } |
634 | |
635 | void |
636 | vic_rx_fill(struct vic_softc *sc, int q) |
637 | { |
638 | struct vic_rxbuf *rxb; |
639 | struct vic_rxdesc *rxd; |
640 | u_int slots; |
641 | |
642 | for (slots = if_rxr_get(&sc->sc_rxq[q].ring, sc->sc_nrxbuf); |
643 | slots > 0; slots--) { |
644 | rxb = &sc->sc_rxq[q].bufs[sc->sc_rxq[q].end]; |
645 | rxd = &sc->sc_rxq[q].slots[sc->sc_rxq[q].end]; |
646 | |
647 | rxb->rxb_m = vic_alloc_mbuf(sc, rxb->rxb_dmamap, |
648 | sc->sc_rxq[q].pktlen); |
649 | if (rxb->rxb_m == NULL((void *)0)) |
650 | break; |
651 | |
652 | bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxb-> rxb_dmamap), (0), (rxb->rxb_m->M_dat.MH.MH_pkthdr.len), (0x01)) |
653 | rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxb-> rxb_dmamap), (0), (rxb->rxb_m->M_dat.MH.MH_pkthdr.len), (0x01)); |
654 | |
655 | rxd->rx_physaddr = rxb->rxb_dmamap->dm_segs[0].ds_addr; |
656 | rxd->rx_buflength = rxb->rxb_m->m_pkthdrM_dat.MH.MH_pkthdr.len; |
657 | rxd->rx_length = 0; |
658 | rxd->rx_owner = VIC_OWNER_NIC2; |
659 | |
660 | VIC_INC(sc->sc_rxq[q].end, sc->sc_data->vd_rx[q].length)(sc->sc_rxq[q].end) = ((sc->sc_rxq[q].end) + 1) % (sc-> sc_data->vd_rx[q].length); |
661 | } |
662 | if_rxr_put(&sc->sc_rxq[q].ring, slots)do { (&sc->sc_rxq[q].ring)->rxr_alive -= (slots); } while (0); |
663 | } |
664 | |
665 | int |
666 | vic_init_data(struct vic_softc *sc) |
667 | { |
668 | struct vic_rxbuf *rxb; |
669 | struct vic_rxdesc *rxd; |
670 | struct vic_txbuf *txb; |
671 | |
672 | int q, i; |
673 | |
674 | for (q = 0; q < VIC_NRXRINGS2; q++) { |
675 | for (i = 0; i < sc->sc_nrxbuf; i++) { |
676 | rxb = &sc->sc_rxq[q].bufs[i]; |
677 | rxd = &sc->sc_rxq[q].slots[i]; |
678 | |
679 | if (bus_dmamap_create(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_rxq[q].pktlen), (1), (sc->sc_rxq[q].pktlen), (0), (0x0001 ), (&rxb->rxb_dmamap)) |
680 | sc->sc_rxq[q].pktlen, 1, sc->sc_rxq[q].pktlen, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_rxq[q].pktlen), (1), (sc->sc_rxq[q].pktlen), (0), (0x0001 ), (&rxb->rxb_dmamap)) |
681 | BUS_DMA_NOWAIT, &rxb->rxb_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_rxq[q].pktlen), (1), (sc->sc_rxq[q].pktlen), (0), (0x0001 ), (&rxb->rxb_dmamap)) != 0) { |
682 | printf("%s: unable to create dmamap for " |
683 | "ring %d slot %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), q, i); |
684 | goto freerxbs; |
685 | } |
686 | |
687 | /* scrub the ring */ |
688 | rxd->rx_physaddr = 0; |
689 | rxd->rx_buflength = 0; |
690 | rxd->rx_length = 0; |
691 | rxd->rx_owner = VIC_OWNER_DRIVER0; |
692 | } |
693 | sc->sc_rxq[q].end = 0; |
694 | |
695 | if_rxr_init(&sc->sc_rxq[q].ring, 2, sc->sc_nrxbuf - 1); |
696 | vic_rx_fill(sc, q); |
697 | } |
698 | |
699 | for (i = 0; i < sc->sc_ntxbuf; i++) { |
700 | txb = &sc->sc_txbuf[i]; |
701 | if (bus_dmamap_create(sc->sc_dmat, VIC_JUMBO_FRAMELEN,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9018 ), ((sc->sc_cap & (1<<0)) ? 6 : 1), (9018), (0), (0x0001), (&txb->txb_dmamap)) |
702 | (sc->sc_cap & VIC_CMD_HWCAP_SG) ? VIC_SG_MAX : 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9018 ), ((sc->sc_cap & (1<<0)) ? 6 : 1), (9018), (0), (0x0001), (&txb->txb_dmamap)) |
703 | VIC_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9018 ), ((sc->sc_cap & (1<<0)) ? 6 : 1), (9018), (0), (0x0001), (&txb->txb_dmamap)) |
704 | &txb->txb_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9018 ), ((sc->sc_cap & (1<<0)) ? 6 : 1), (9018), (0), (0x0001), (&txb->txb_dmamap)) != 0) { |
705 | printf("%s: unable to create dmamap for tx %d\n", |
706 | DEVNAME(sc)((sc)->sc_dev.dv_xname), i); |
707 | goto freetxbs; |
708 | } |
709 | txb->txb_m = NULL((void *)0); |
710 | } |
711 | |
712 | return (0); |
713 | |
714 | freetxbs: |
715 | while (i--) { |
716 | txb = &sc->sc_txbuf[i]; |
717 | bus_dmamap_destroy(sc->sc_dmat, txb->txb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (txb ->txb_dmamap)); |
718 | } |
719 | |
720 | i = sc->sc_nrxbuf; |
721 | q = VIC_NRXRINGS2 - 1; |
722 | freerxbs: |
723 | while (q >= 0) { |
724 | while (i--) { |
725 | rxb = &sc->sc_rxq[q].bufs[i]; |
726 | |
727 | if (rxb->rxb_m != NULL((void *)0)) { |
728 | bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxb-> rxb_dmamap), (0), (rxb->rxb_m->M_dat.MH.MH_pkthdr.len), (0x02)) |
729 | 0, rxb->rxb_m->m_pkthdr.len,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxb-> rxb_dmamap), (0), (rxb->rxb_m->M_dat.MH.MH_pkthdr.len), (0x02)) |
730 | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxb-> rxb_dmamap), (0), (rxb->rxb_m->M_dat.MH.MH_pkthdr.len), (0x02)); |
731 | bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (rxb ->rxb_dmamap)); |
732 | m_freem(rxb->rxb_m); |
733 | rxb->rxb_m = NULL((void *)0); |
734 | } |
735 | bus_dmamap_destroy(sc->sc_dmat, rxb->rxb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (rxb ->rxb_dmamap)); |
736 | } |
737 | q--; |
738 | } |
739 | |
740 | return (1); |
741 | } |
742 | |
743 | int |
744 | vic_uninit_data(struct vic_softc *sc) |
745 | { |
746 | struct vic_rxbuf *rxb; |
747 | struct vic_rxdesc *rxd; |
748 | struct vic_txbuf *txb; |
749 | |
750 | int i, q; |
751 | |
752 | for (q = 0; q < VIC_NRXRINGS2; q++) { |
753 | for (i = 0; i < sc->sc_nrxbuf; i++) { |
754 | rxb = &sc->sc_rxq[q].bufs[i]; |
755 | rxd = &sc->sc_rxq[q].slots[i]; |
Value stored to 'rxd' is never read | |
756 | |
757 | if (rxb->rxb_m != NULL((void *)0)) { |
758 | bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxb-> rxb_dmamap), (0), (rxb->rxb_m->M_dat.MH.MH_pkthdr.len), (0x02)) |
759 | 0, rxb->rxb_m->m_pkthdr.len,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxb-> rxb_dmamap), (0), (rxb->rxb_m->M_dat.MH.MH_pkthdr.len), (0x02)) |
760 | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxb-> rxb_dmamap), (0), (rxb->rxb_m->M_dat.MH.MH_pkthdr.len), (0x02)); |
761 | bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (rxb ->rxb_dmamap)); |
762 | m_freem(rxb->rxb_m); |
763 | rxb->rxb_m = NULL((void *)0); |
764 | } |
765 | bus_dmamap_destroy(sc->sc_dmat, rxb->rxb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (rxb ->rxb_dmamap)); |
766 | } |
767 | } |
768 | |
769 | for (i = 0; i < sc->sc_ntxbuf; i++) { |
770 | txb = &sc->sc_txbuf[i]; |
771 | bus_dmamap_destroy(sc->sc_dmat, txb->txb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (txb ->txb_dmamap)); |
772 | } |
773 | |
774 | return (0); |
775 | } |
776 | |
777 | void |
778 | vic_link_state(struct vic_softc *sc) |
779 | { |
780 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
781 | u_int32_t status; |
782 | int link_state = LINK_STATE_DOWN2; |
783 | |
784 | status = vic_read(sc, VIC_STATUS0x0020); |
785 | if (status & VIC_STATUS_CONNECTED(1<<0)) |
786 | link_state = LINK_STATE_FULL_DUPLEX6; |
787 | if (ifp->if_link_stateif_data.ifi_link_state != link_state) { |
788 | ifp->if_link_stateif_data.ifi_link_state = link_state; |
789 | if_link_state_change(ifp); |
790 | } |
791 | } |
792 | |
793 | int |
794 | vic_intr(void *arg) |
795 | { |
796 | struct vic_softc *sc = (struct vic_softc *)arg; |
797 | int q; |
798 | |
799 | vic_write(sc, VIC_CMD0x000c, VIC_CMD_INTR_ACK0x0001); |
800 | |
801 | for (q = 0; q < VIC_NRXRINGS2; q++) |
802 | vic_rx_proc(sc, q); |
803 | vic_tx_proc(sc); |
804 | |
805 | return (-1); |
806 | } |
807 | |
808 | void |
809 | vic_rx_proc(struct vic_softc *sc, int q) |
810 | { |
811 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
812 | struct vic_rxdesc *rxd; |
813 | struct vic_rxbuf *rxb; |
814 | struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 }; |
815 | struct mbuf *m; |
816 | int len, idx; |
817 | |
818 | if ((ifp->if_flags & IFF_RUNNING0x40) == 0) |
819 | return; |
820 | |
821 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> sc_dma_map), (0), (sc->sc_dma_size), (0x02 | 0x08)) |
822 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> sc_dma_map), (0), (sc->sc_dma_size), (0x02 | 0x08)); |
823 | |
824 | while (if_rxr_inuse(&sc->sc_rxq[q].ring)((&sc->sc_rxq[q].ring)->rxr_alive) > 0) { |
825 | idx = sc->sc_data->vd_rx[q].nextidx; |
826 | if (idx >= sc->sc_data->vd_rx[q].length) { |
827 | ifp->if_ierrorsif_data.ifi_ierrors++; |
828 | if (ifp->if_flags & IFF_DEBUG0x4) |
829 | printf("%s: receive index error\n", |
830 | sc->sc_dev.dv_xname); |
831 | break; |
832 | } |
833 | |
834 | rxd = &sc->sc_rxq[q].slots[idx]; |
835 | if (rxd->rx_owner != VIC_OWNER_DRIVER0) |
836 | break; |
837 | |
838 | rxb = &sc->sc_rxq[q].bufs[idx]; |
839 | |
840 | if (rxb->rxb_m == NULL((void *)0)) { |
841 | ifp->if_ierrorsif_data.ifi_ierrors++; |
842 | printf("%s: rxb %d has no mbuf\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), idx); |
843 | break; |
844 | } |
845 | |
846 | bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxb-> rxb_dmamap), (0), (rxb->rxb_m->M_dat.MH.MH_pkthdr.len), (0x02)) |
847 | rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (rxb-> rxb_dmamap), (0), (rxb->rxb_m->M_dat.MH.MH_pkthdr.len), (0x02)); |
848 | bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (rxb ->rxb_dmamap)); |
849 | |
850 | m = rxb->rxb_m; |
851 | rxb->rxb_m = NULL((void *)0); |
852 | len = rxd->rx_length; |
853 | |
854 | if (len < VIC_MIN_FRAMELEN(64 - 4)) { |
855 | m_freem(m); |
856 | |
857 | ifp->if_iqdropsif_data.ifi_iqdrops++; |
858 | goto nextp; |
859 | } |
860 | |
861 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len; |
862 | |
863 | ml_enqueue(&ml, m); |
864 | |
865 | nextp: |
866 | if_rxr_put(&sc->sc_rxq[q].ring, 1)do { (&sc->sc_rxq[q].ring)->rxr_alive -= (1); } while (0); |
867 | VIC_INC(sc->sc_data->vd_rx[q].nextidx, sc->sc_nrxbuf)(sc->sc_data->vd_rx[q].nextidx) = ((sc->sc_data-> vd_rx[q].nextidx) + 1) % (sc->sc_nrxbuf); |
868 | } |
869 | |
870 | if (ifiq_input(&ifp->if_rcv, &ml)) |
871 | if_rxr_livelocked(&sc->sc_rxq[q].ring); |
872 | |
873 | vic_rx_fill(sc, q); |
874 | |
875 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> sc_dma_map), (0), (sc->sc_dma_size), (0x01 | 0x04)) |
876 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> sc_dma_map), (0), (sc->sc_dma_size), (0x01 | 0x04)); |
877 | } |
878 | |
879 | void |
880 | vic_tx_proc(struct vic_softc *sc) |
881 | { |
882 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
883 | struct vic_txdesc *txd; |
884 | struct vic_txbuf *txb; |
885 | int idx; |
886 | |
887 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> sc_dma_map), (0), (sc->sc_dma_size), (0x02 | 0x08)) |
888 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> sc_dma_map), (0), (sc->sc_dma_size), (0x02 | 0x08)); |
889 | |
890 | while (sc->sc_txpending > 0) { |
891 | idx = sc->sc_data->vd_tx_curidx; |
892 | if (idx >= sc->sc_data->vd_tx_length) { |
893 | ifp->if_oerrorsif_data.ifi_oerrors++; |
894 | break; |
895 | } |
896 | |
897 | txd = &sc->sc_txq[idx]; |
898 | if (txd->tx_owner != VIC_OWNER_DRIVER0) |
899 | break; |
900 | |
901 | txb = &sc->sc_txbuf[idx]; |
902 | if (txb->txb_m == NULL((void *)0)) { |
903 | printf("%s: tx ring is corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
904 | ifp->if_oerrorsif_data.ifi_oerrors++; |
905 | break; |
906 | } |
907 | |
908 | bus_dmamap_sync(sc->sc_dmat, txb->txb_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txb-> txb_dmamap), (0), (txb->txb_dmamap->dm_mapsize), (0x08) ) |
909 | txb->txb_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txb-> txb_dmamap), (0), (txb->txb_dmamap->dm_mapsize), (0x08) ); |
910 | bus_dmamap_unload(sc->sc_dmat, txb->txb_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txb ->txb_dmamap)); |
911 | |
912 | m_freem(txb->txb_m); |
913 | txb->txb_m = NULL((void *)0); |
914 | ifq_clr_oactive(&ifp->if_snd); |
915 | |
916 | sc->sc_txpending--; |
917 | sc->sc_data->vd_tx_stopped = 0; |
918 | |
919 | VIC_INC(sc->sc_data->vd_tx_curidx, sc->sc_data->vd_tx_length)(sc->sc_data->vd_tx_curidx) = ((sc->sc_data->vd_tx_curidx ) + 1) % (sc->sc_data->vd_tx_length); |
920 | } |
921 | |
922 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> sc_dma_map), (0), (sc->sc_dma_size), (0x01 | 0x04)) |
923 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> sc_dma_map), (0), (sc->sc_dma_size), (0x01 | 0x04)); |
924 | |
925 | vic_start(ifp); |
926 | } |
927 | |
928 | void |
929 | vic_iff(struct vic_softc *sc) |
930 | { |
931 | struct arpcom *ac = &sc->sc_ac; |
932 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
933 | struct ether_multi *enm; |
934 | struct ether_multistep step; |
935 | u_int32_t crc; |
936 | u_int16_t *mcastfil = (u_int16_t *)sc->sc_data->vd_mcastfil; |
937 | u_int flags; |
938 | |
939 | ifp->if_flags &= ~IFF_ALLMULTI0x200; |
940 | |
941 | /* Always accept broadcast frames. */ |
942 | flags = VIC_CMD_IFF_BROADCAST0x0002; |
943 | |
944 | if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0) { |
945 | ifp->if_flags |= IFF_ALLMULTI0x200; |
946 | if (ifp->if_flags & IFF_PROMISC0x100) |
947 | flags |= VIC_CMD_IFF_PROMISC0x0001; |
948 | else |
949 | flags |= VIC_CMD_IFF_MULTICAST0x0004; |
950 | memset(&sc->sc_data->vd_mcastfil, 0xff,__builtin_memset((&sc->sc_data->vd_mcastfil), (0xff ), (sizeof(sc->sc_data->vd_mcastfil))) |
951 | sizeof(sc->sc_data->vd_mcastfil))__builtin_memset((&sc->sc_data->vd_mcastfil), (0xff ), (sizeof(sc->sc_data->vd_mcastfil))); |
952 | } else { |
953 | flags |= VIC_CMD_IFF_MULTICAST0x0004; |
954 | |
955 | bzero(&sc->sc_data->vd_mcastfil,__builtin_bzero((&sc->sc_data->vd_mcastfil), (sizeof (sc->sc_data->vd_mcastfil))) |
956 | sizeof(sc->sc_data->vd_mcastfil))__builtin_bzero((&sc->sc_data->vd_mcastfil), (sizeof (sc->sc_data->vd_mcastfil))); |
957 | |
958 | ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first ); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step )).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while ( 0); |
959 | while (enm != NULL((void *)0)) { |
960 | crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN6); |
961 | |
962 | crc >>= 26; |
963 | |
964 | mcastfil[crc >> 4] |= htole16(1 << (crc & 0xf))((__uint16_t)(1 << (crc & 0xf))); |
965 | |
966 | ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm = (((enm))->enm_list.le_next); } while ( 0); |
967 | } |
968 | } |
969 | |
970 | vic_write(sc, VIC_CMD0x000c, VIC_CMD_MCASTFIL0x0002); |
971 | sc->sc_data->vd_iff = flags; |
972 | vic_write(sc, VIC_CMD0x000c, VIC_CMD_IFF0x0004); |
973 | } |
974 | |
975 | void |
976 | vic_getlladdr(struct vic_softc *sc) |
977 | { |
978 | u_int32_t reg; |
979 | |
980 | /* Get MAC address */ |
981 | reg = (sc->sc_cap & VIC_CMD_HWCAP_VPROM(1<<8)) ? VIC_VPROM0x0028 : VIC_LLADDR0x0010; |
982 | |
983 | bus_space_barrier(sc->sc_iot, sc->sc_ioh, reg, ETHER_ADDR_LEN6, |
984 | BUS_SPACE_BARRIER_READ0x01); |
985 | bus_space_read_region_1(sc->sc_iot, sc->sc_ioh, reg, sc->sc_lladdr,((sc->sc_iot)->read_region_1((sc->sc_ioh), (reg), (sc ->sc_lladdr), (6))) |
986 | ETHER_ADDR_LEN)((sc->sc_iot)->read_region_1((sc->sc_ioh), (reg), (sc ->sc_lladdr), (6))); |
987 | |
988 | /* Update the MAC address register */ |
989 | if (reg == VIC_VPROM0x0028) |
990 | vic_setlladdr(sc); |
991 | } |
992 | |
993 | void |
994 | vic_setlladdr(struct vic_softc *sc) |
995 | { |
996 | bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, VIC_LLADDR,((sc->sc_iot)->write_region_1((sc->sc_ioh), (0x0010) , (sc->sc_lladdr), (6))) |
997 | sc->sc_lladdr, ETHER_ADDR_LEN)((sc->sc_iot)->write_region_1((sc->sc_ioh), (0x0010) , (sc->sc_lladdr), (6))); |
998 | bus_space_barrier(sc->sc_iot, sc->sc_ioh, VIC_LLADDR0x0010, ETHER_ADDR_LEN6, |
999 | BUS_SPACE_BARRIER_WRITE0x02); |
1000 | } |
1001 | |
1002 | int |
1003 | vic_media_change(struct ifnet *ifp) |
1004 | { |
1005 | /* Ignore */ |
1006 | return (0); |
1007 | } |
1008 | |
1009 | void |
1010 | vic_media_status(struct ifnet *ifp, struct ifmediareq *imr) |
1011 | { |
1012 | struct vic_softc *sc = (struct vic_softc *)ifp->if_softc; |
1013 | |
1014 | imr->ifm_active = IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL; |
1015 | imr->ifm_status = IFM_AVALID0x0000000000000001ULL; |
1016 | |
1017 | vic_link_state(sc); |
1018 | |
1019 | if (LINK_STATE_IS_UP(ifp->if_link_state)((ifp->if_data.ifi_link_state) >= 4 || (ifp->if_data .ifi_link_state) == 0) && |
1020 | ifp->if_flags & IFF_UP0x1) |
1021 | imr->ifm_status |= IFM_ACTIVE0x0000000000000002ULL; |
1022 | } |
1023 | |
1024 | void |
1025 | vic_start(struct ifnet *ifp) |
1026 | { |
1027 | struct vic_softc *sc; |
1028 | struct mbuf *m; |
1029 | struct vic_txbuf *txb; |
1030 | struct vic_txdesc *txd; |
1031 | struct vic_sg *sge; |
1032 | bus_dmamap_t dmap; |
1033 | int i, idx; |
1034 | int tx = 0; |
1035 | |
1036 | if (!(ifp->if_flags & IFF_RUNNING0x40)) |
1037 | return; |
1038 | |
1039 | if (ifq_is_oactive(&ifp->if_snd)) |
1040 | return; |
1041 | |
1042 | if (ifq_empty(&ifp->if_snd)(({ typeof((&ifp->if_snd)->ifq_len) __tmp = *(volatile typeof((&ifp->if_snd)->ifq_len) *)&((&ifp-> if_snd)->ifq_len); membar_datadep_consumer(); __tmp; }) == 0)) |
1043 | return; |
1044 | |
1045 | sc = (struct vic_softc *)ifp->if_softc; |
1046 | |
1047 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> sc_dma_map), (0), (sc->sc_dma_size), (0x02 | 0x08)) |
1048 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> sc_dma_map), (0), (sc->sc_dma_size), (0x02 | 0x08)); |
1049 | |
1050 | for (;;) { |
1051 | if (VIC_TXURN(sc)((sc)->sc_txpending >= (sc)->sc_ntxbuf)) { |
1052 | ifq_set_oactive(&ifp->if_snd); |
1053 | break; |
1054 | } |
1055 | |
1056 | idx = sc->sc_data->vd_tx_nextidx; |
1057 | if (idx >= sc->sc_data->vd_tx_length) { |
1058 | printf("%s: tx idx is corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1059 | ifp->if_oerrorsif_data.ifi_oerrors++; |
1060 | break; |
1061 | } |
1062 | |
1063 | txd = &sc->sc_txq[idx]; |
1064 | txb = &sc->sc_txbuf[idx]; |
1065 | |
1066 | if (txb->txb_m != NULL((void *)0)) { |
1067 | printf("%s: tx ring is corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1068 | sc->sc_data->vd_tx_stopped = 1; |
1069 | ifp->if_oerrorsif_data.ifi_oerrors++; |
1070 | break; |
1071 | } |
1072 | |
1073 | m = ifq_dequeue(&ifp->if_snd); |
1074 | if (m == NULL((void *)0)) |
1075 | break; |
1076 | |
1077 | if (vic_load_txb(sc, txb, m) != 0) { |
1078 | m_freem(m); |
1079 | ifp->if_oerrorsif_data.ifi_oerrors++; |
1080 | continue; |
1081 | } |
1082 | |
1083 | #if NBPFILTER1 > 0 |
1084 | if (ifp->if_bpf) |
1085 | bpf_mtap(ifp->if_bpf, txb->txb_m, BPF_DIRECTION_OUT(1 << 1)); |
1086 | #endif |
1087 | |
1088 | dmap = txb->txb_dmamap; |
1089 | txd->tx_flags = VIC_TX_FLAGS_KEEP0x0001; |
1090 | txd->tx_owner = VIC_OWNER_NIC2; |
1091 | txd->tx_sa.sa_addr_type = VIC_SG_ADDR_PHYS1; |
1092 | txd->tx_sa.sa_length = dmap->dm_nsegs; |
1093 | for (i = 0; i < dmap->dm_nsegs; i++) { |
1094 | sge = &txd->tx_sa.sa_sg[i]; |
1095 | sge->sg_length = dmap->dm_segs[i].ds_len; |
1096 | sge->sg_addr_low = dmap->dm_segs[i].ds_addr; |
1097 | } |
1098 | |
1099 | if (VIC_TXURN_WARN(sc)((sc)->sc_txpending >= ((sc)->sc_ntxbuf - 5))) { |
1100 | txd->tx_flags |= VIC_TX_FLAGS_TXURN0x0002; |
1101 | } |
1102 | |
1103 | bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap) , (0), (dmap->dm_mapsize), (0x04)) |
1104 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap) , (0), (dmap->dm_mapsize), (0x04)); |
1105 | |
1106 | sc->sc_txpending++; |
1107 | |
1108 | VIC_INC(sc->sc_data->vd_tx_nextidx, sc->sc_data->vd_tx_length)(sc->sc_data->vd_tx_nextidx) = ((sc->sc_data->vd_tx_nextidx ) + 1) % (sc->sc_data->vd_tx_length); |
1109 | |
1110 | tx = 1; |
1111 | } |
1112 | |
1113 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> sc_dma_map), (0), (sc->sc_dma_size), (0x01 | 0x04)) |
1114 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> sc_dma_map), (0), (sc->sc_dma_size), (0x01 | 0x04)); |
1115 | |
1116 | if (tx) |
1117 | vic_read(sc, VIC_Tx_ADDR0x0008); |
1118 | } |
1119 | |
1120 | int |
1121 | vic_load_txb(struct vic_softc *sc, struct vic_txbuf *txb, struct mbuf *m) |
1122 | { |
1123 | bus_dmamap_t dmap = txb->txb_dmamap; |
1124 | int error; |
1125 | |
1126 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( dmap), (m), (0x0001)); |
1127 | switch (error) { |
1128 | case 0: |
1129 | txb->txb_m = m; |
1130 | break; |
1131 | |
1132 | case EFBIG27: |
1133 | if (m_defrag(m, M_DONTWAIT0x0002) == 0 && |
1134 | bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( dmap), (m), (0x0001)) |
1135 | BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( dmap), (m), (0x0001)) == 0) { |
1136 | txb->txb_m = m; |
1137 | break; |
1138 | } |
1139 | |
1140 | /* FALLTHROUGH */ |
1141 | default: |
1142 | return (ENOBUFS55); |
1143 | } |
1144 | |
1145 | return (0); |
1146 | } |
1147 | |
1148 | void |
1149 | vic_watchdog(struct ifnet *ifp) |
1150 | { |
1151 | #if 0 |
1152 | struct vic_softc *sc = (struct vic_softc *)ifp->if_softc; |
1153 | |
1154 | if (sc->sc_txpending && sc->sc_txtimeout > 0) { |
1155 | if (--sc->sc_txtimeout == 0) { |
1156 | printf("%s: device timeout\n", sc->sc_dev.dv_xname); |
1157 | ifp->if_flags &= ~IFF_RUNNING0x40; |
1158 | vic_init(ifp); |
1159 | ifp->if_oerrorsif_data.ifi_oerrors++; |
1160 | return; |
1161 | } |
1162 | } |
1163 | |
1164 | if (!ifq_empty(&ifp->if_snd)(({ typeof((&ifp->if_snd)->ifq_len) __tmp = *(volatile typeof((&ifp->if_snd)->ifq_len) *)&((&ifp-> if_snd)->ifq_len); membar_datadep_consumer(); __tmp; }) == 0)) |
1165 | vic_start(ifp); |
1166 | #endif |
1167 | } |
1168 | |
1169 | int |
1170 | vic_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) |
1171 | { |
1172 | struct vic_softc *sc = (struct vic_softc *)ifp->if_softc; |
1173 | struct ifreq *ifr = (struct ifreq *)data; |
1174 | int s, error = 0; |
1175 | |
1176 | s = splnet()splraise(0x4); |
1177 | |
1178 | switch (cmd) { |
1179 | case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((12))): |
1180 | ifp->if_flags |= IFF_UP0x1; |
1181 | /* FALLTHROUGH */ |
1182 | case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))): |
1183 | if (ifp->if_flags & IFF_UP0x1) { |
1184 | if (ifp->if_flags & IFF_RUNNING0x40) |
1185 | error = ENETRESET52; |
1186 | else |
1187 | vic_init(ifp); |
1188 | } else { |
1189 | if (ifp->if_flags & IFF_RUNNING0x40) |
1190 | vic_stop(ifp); |
1191 | } |
1192 | break; |
1193 | |
1194 | case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifmediareq) & 0x1fff) << 16) | ((('i')) << 8) | ((56))): |
1195 | case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((55))): |
1196 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); |
1197 | break; |
1198 | |
1199 | case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((170))): |
1200 | error = vic_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data); |
1201 | break; |
1202 | |
1203 | default: |
1204 | error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); |
1205 | } |
1206 | |
1207 | if (error == ENETRESET52) { |
1208 | if (ifp->if_flags & IFF_RUNNING0x40) |
1209 | vic_iff(sc); |
1210 | error = 0; |
1211 | } |
1212 | |
1213 | splx(s)spllower(s); |
1214 | return (error); |
1215 | } |
1216 | |
1217 | int |
1218 | vic_rxrinfo(struct vic_softc *sc, struct if_rxrinfo *ifri) |
1219 | { |
1220 | struct if_rxring_info ifr[2]; |
1221 | |
1222 | memset(ifr, 0, sizeof(ifr))__builtin_memset((ifr), (0), (sizeof(ifr))); |
1223 | |
1224 | ifr[0].ifr_size = MCLBYTES(1 << 11); |
1225 | ifr[0].ifr_info = sc->sc_rxq[0].ring; |
1226 | |
1227 | ifr[1].ifr_size = 4096; |
1228 | ifr[1].ifr_info = sc->sc_rxq[1].ring; |
1229 | |
1230 | return (if_rxr_info_ioctl(ifri, nitems(ifr)(sizeof((ifr)) / sizeof((ifr)[0])), ifr)); |
1231 | } |
1232 | |
1233 | void |
1234 | vic_init(struct ifnet *ifp) |
1235 | { |
1236 | struct vic_softc *sc = (struct vic_softc *)ifp->if_softc; |
1237 | int q; |
1238 | int s; |
1239 | |
1240 | sc->sc_data->vd_tx_curidx = 0; |
1241 | sc->sc_data->vd_tx_nextidx = 0; |
1242 | sc->sc_data->vd_tx_stopped = sc->sc_data->vd_tx_queued = 0; |
1243 | sc->sc_data->vd_tx_saved_nextidx = 0; |
1244 | |
1245 | for (q = 0; q < VIC_NRXRINGS2; q++) { |
1246 | sc->sc_data->vd_rx[q].nextidx = 0; |
1247 | sc->sc_data->vd_rx_saved_nextidx[q] = 0; |
1248 | } |
1249 | |
1250 | if (vic_init_data(sc) != 0) |
1251 | return; |
1252 | |
1253 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> sc_dma_map), (0), (sc->sc_dma_size), (0x01 | 0x04)) |
1254 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> sc_dma_map), (0), (sc->sc_dma_size), (0x01 | 0x04)); |
1255 | |
1256 | s = splnet()splraise(0x4); |
1257 | |
1258 | vic_write(sc, VIC_DATA_ADDR0x0000, VIC_DMA_DVA(sc)((sc)->sc_dma_map->dm_segs[0].ds_addr)); |
1259 | vic_write(sc, VIC_DATA_LENGTH0x0004, sc->sc_dma_size); |
1260 | |
1261 | ifp->if_flags |= IFF_RUNNING0x40; |
1262 | ifq_clr_oactive(&ifp->if_snd); |
1263 | |
1264 | vic_iff(sc); |
1265 | vic_write(sc, VIC_CMD0x000c, VIC_CMD_INTR_ENABLE0x0040); |
1266 | |
1267 | splx(s)spllower(s); |
1268 | |
1269 | timeout_add_sec(&sc->sc_tick, 1); |
1270 | } |
1271 | |
1272 | void |
1273 | vic_stop(struct ifnet *ifp) |
1274 | { |
1275 | struct vic_softc *sc = (struct vic_softc *)ifp->if_softc; |
1276 | int s; |
1277 | |
1278 | s = splnet()splraise(0x4); |
1279 | |
1280 | timeout_del(&sc->sc_tick); |
1281 | |
1282 | ifp->if_flags &= ~IFF_RUNNING0x40; |
1283 | ifq_clr_oactive(&ifp->if_snd); |
1284 | |
1285 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> sc_dma_map), (0), (sc->sc_dma_size), (0x02 | 0x08)) |
1286 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc-> sc_dma_map), (0), (sc->sc_dma_size), (0x02 | 0x08)); |
1287 | |
1288 | /* XXX wait for tx to complete */ |
1289 | while (sc->sc_txpending > 0) { |
1290 | splx(s)spllower(s); |
1291 | delay(1000)(*delay_func)(1000); |
1292 | s = splnet()splraise(0x4); |
1293 | } |
1294 | |
1295 | sc->sc_data->vd_tx_stopped = 1; |
1296 | |
1297 | vic_write(sc, VIC_CMD0x000c, VIC_CMD_INTR_DISABLE0x0020); |
1298 | |
1299 | sc->sc_data->vd_iff = 0; |
1300 | vic_write(sc, VIC_CMD0x000c, VIC_CMD_IFF0x0004); |
1301 | |
1302 | vic_write(sc, VIC_DATA_ADDR0x0000, 0); |
1303 | |
1304 | vic_uninit_data(sc); |
1305 | |
1306 | splx(s)spllower(s); |
1307 | } |
1308 | |
1309 | struct mbuf * |
1310 | vic_alloc_mbuf(struct vic_softc *sc, bus_dmamap_t map, u_int pktlen) |
1311 | { |
1312 | struct mbuf *m = NULL((void *)0); |
1313 | |
1314 | m = MCLGETL(NULL, M_DONTWAIT, pktlen)m_clget((((void *)0)), (0x0002), (pktlen)); |
1315 | if (!m) |
1316 | return (NULL((void *)0)); |
1317 | m->m_datam_hdr.mh_data += ETHER_ALIGN2; |
1318 | m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = pktlen - ETHER_ALIGN2; |
1319 | |
1320 | if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( map), (m), (0x0001)) != 0) { |
1321 | printf("%s: could not load mbuf DMA map\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1322 | m_freem(m); |
1323 | return (NULL((void *)0)); |
1324 | } |
1325 | |
1326 | return (m); |
1327 | } |
1328 | |
1329 | void |
1330 | vic_tick(void *arg) |
1331 | { |
1332 | struct vic_softc *sc = (struct vic_softc *)arg; |
1333 | |
1334 | vic_link_state(sc); |
1335 | |
1336 | timeout_add_sec(&sc->sc_tick, 1); |
1337 | } |
1338 | |
1339 | u_int32_t |
1340 | vic_read(struct vic_softc *sc, bus_size_t r) |
1341 | { |
1342 | bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, |
1343 | BUS_SPACE_BARRIER_READ0x01); |
1344 | return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r)((sc->sc_iot)->read_4((sc->sc_ioh), (r)))); |
1345 | } |
1346 | |
1347 | void |
1348 | vic_write(struct vic_softc *sc, bus_size_t r, u_int32_t v) |
1349 | { |
1350 | bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v)((sc->sc_iot)->write_4((sc->sc_ioh), (r), (v))); |
1351 | bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, |
1352 | BUS_SPACE_BARRIER_WRITE0x02); |
1353 | } |
1354 | |
1355 | u_int32_t |
1356 | vic_read_cmd(struct vic_softc *sc, u_int32_t cmd) |
1357 | { |
1358 | vic_write(sc, VIC_CMD0x000c, cmd); |
1359 | return (vic_read(sc, VIC_CMD0x000c)); |
1360 | } |
1361 | |
1362 | int |
1363 | vic_alloc_dmamem(struct vic_softc *sc) |
1364 | { |
1365 | int nsegs; |
1366 | |
1367 | if (bus_dmamap_create(sc->sc_dmat, sc->sc_dma_size, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_dma_size), (1), (sc->sc_dma_size), (0), (0x0001 | 0x0002 ), (&sc->sc_dma_map)) |
1368 | sc->sc_dma_size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_dma_size), (1), (sc->sc_dma_size), (0), (0x0001 | 0x0002 ), (&sc->sc_dma_map)) |
1369 | &sc->sc_dma_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_dma_size), (1), (sc->sc_dma_size), (0), (0x0001 | 0x0002 ), (&sc->sc_dma_map)) != 0) |
1370 | goto err; |
1371 | |
1372 | if (bus_dmamem_alloc(sc->sc_dmat, sc->sc_dma_size, 16, 0,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sc-> sc_dma_size), (16), (0), (&sc->sc_dma_seg), (1), (& nsegs), (0x0001 | 0x1000)) |
1373 | &sc->sc_dma_seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sc-> sc_dma_size), (16), (0), (&sc->sc_dma_seg), (1), (& nsegs), (0x0001 | 0x1000)) != 0) |
1374 | goto destroy; |
1375 | |
1376 | if (bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_seg, nsegs,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc ->sc_dma_seg), (nsegs), (sc->sc_dma_size), (&sc-> sc_dma_kva), (0x0001)) |
1377 | sc->sc_dma_size, &sc->sc_dma_kva, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc ->sc_dma_seg), (nsegs), (sc->sc_dma_size), (&sc-> sc_dma_kva), (0x0001)) != 0) |
1378 | goto free; |
1379 | |
1380 | if (bus_dmamap_load(sc->sc_dmat, sc->sc_dma_map, sc->sc_dma_kva,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc-> sc_dma_map), (sc->sc_dma_kva), (sc->sc_dma_size), (((void *)0)), (0x0001)) |
1381 | sc->sc_dma_size, NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc-> sc_dma_map), (sc->sc_dma_kva), (sc->sc_dma_size), (((void *)0)), (0x0001)) != 0) |
1382 | goto unmap; |
1383 | |
1384 | return (0); |
1385 | |
1386 | unmap: |
1387 | bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_kva, sc->sc_dma_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (sc-> sc_dma_kva), (sc->sc_dma_size)); |
1388 | free: |
1389 | bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& sc->sc_dma_seg), (1)); |
1390 | destroy: |
1391 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->sc_dma_map)); |
1392 | err: |
1393 | return (1); |
1394 | } |
1395 | |
1396 | void |
1397 | vic_free_dmamem(struct vic_softc *sc) |
1398 | { |
1399 | bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc-> sc_dma_map)); |
1400 | bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_kva, sc->sc_dma_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (sc-> sc_dma_kva), (sc->sc_dma_size)); |
1401 | bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& sc->sc_dma_seg), (1)); |
1402 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc ->sc_dma_map)); |
1403 | } |