Bug Summary

File:dev/pci/if_ixgb.c
Warning:line 1224, column 2
Value stored to 'reg_tctl' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name if_ixgb.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/if_ixgb.c
1/**************************************************************************
2
3Copyright (c) 2001-2005, Intel Corporation
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30POSSIBILITY OF SUCH DAMAGE.
31
32***************************************************************************/
33
34/* $OpenBSD: if_ixgb.c,v 1.76 2023/11/10 15:51:20 bluhm Exp $ */
35
36#include <dev/pci/if_ixgb.h>
37
38#ifdef IXGB_DEBUG
39/*********************************************************************
40 * Set this to one to display debug statistics
41 *********************************************************************/
42int ixgb_display_debug_stats = 0;
43#endif
44
45/*********************************************************************
46 * Driver version
47 *********************************************************************/
48
49#define IXGB_DRIVER_VERSION"6.1.0" "6.1.0"
50
51/*********************************************************************
52 * PCI Device ID Table
53 *********************************************************************/
54
55const struct pci_matchid ixgb_devices[] = {
56 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82597EX0x1048 },
57 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82597EX_SR0x1a48 },
58 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82597EX_LR0x1b48 },
59 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82597EX_CX40x109e },
60};
61
62/*********************************************************************
63 * Function prototypes
64 *********************************************************************/
65int ixgb_probe(struct device *, void *, void *);
66void ixgb_attach(struct device *, struct device *, void *);
67int ixgb_intr(void *);
68void ixgb_start(struct ifnet *);
69int ixgb_ioctl(struct ifnet *, u_long, caddr_t);
70void ixgb_watchdog(struct ifnet *);
71void ixgb_init(void *);
72void ixgb_stop(void *);
73void ixgb_media_status(struct ifnet *, struct ifmediareq *);
74int ixgb_media_change(struct ifnet *);
75void ixgb_identify_hardware(struct ixgb_softc *);
76int ixgb_allocate_pci_resources(struct ixgb_softc *);
77void ixgb_free_pci_resources(struct ixgb_softc *);
78void ixgb_local_timer(void *);
79int ixgb_hardware_init(struct ixgb_softc *);
80void ixgb_setup_interface(struct ixgb_softc *);
81int ixgb_setup_transmit_structures(struct ixgb_softc *);
82void ixgb_initialize_transmit_unit(struct ixgb_softc *);
83int ixgb_setup_receive_structures(struct ixgb_softc *);
84void ixgb_initialize_receive_unit(struct ixgb_softc *);
85void ixgb_enable_intr(struct ixgb_softc *);
86void ixgb_disable_intr(struct ixgb_softc *);
87void ixgb_free_transmit_structures(struct ixgb_softc *);
88void ixgb_free_receive_structures(struct ixgb_softc *);
89void ixgb_update_stats_counters(struct ixgb_softc *);
90void ixgb_txeof(struct ixgb_softc *);
91int ixgb_allocate_receive_structures(struct ixgb_softc *);
92int ixgb_allocate_transmit_structures(struct ixgb_softc *);
93void ixgb_rxeof(struct ixgb_softc *, int);
94void
95ixgb_receive_checksum(struct ixgb_softc *,
96 struct ixgb_rx_desc * rx_desc,
97 struct mbuf *);
98void
99ixgb_transmit_checksum_setup(struct ixgb_softc *,
100 struct mbuf *,
101 u_int8_t *);
102void ixgb_set_promisc(struct ixgb_softc *);
103void ixgb_set_multi(struct ixgb_softc *);
104#ifdef IXGB_DEBUG
105void ixgb_print_hw_stats(struct ixgb_softc *);
106#endif
107void ixgb_update_link_status(struct ixgb_softc *);
108int
109ixgb_get_buf(struct ixgb_softc *, int i,
110 struct mbuf *);
111void ixgb_enable_hw_vlans(struct ixgb_softc *);
112int ixgb_encap(struct ixgb_softc *, struct mbuf *);
113int
114ixgb_dma_malloc(struct ixgb_softc *, bus_size_t,
115 struct ixgb_dma_alloc *, int);
116void ixgb_dma_free(struct ixgb_softc *, struct ixgb_dma_alloc *);
117
118/*********************************************************************
119 * OpenBSD Device Interface Entry Points
120 *********************************************************************/
121
122const struct cfattach ixgb_ca = {
123 sizeof(struct ixgb_softc), ixgb_probe, ixgb_attach
124};
125
126struct cfdriver ixgb_cd = {
127 NULL((void *)0), "ixgb", DV_IFNET
128};
129
130/* some defines for controlling descriptor fetches in h/w */
131#define RXDCTL_PTHRESH_DEFAULT0 0 /* chip considers prefech below this */
132#define RXDCTL_HTHRESH_DEFAULT0 0 /* chip will only prefetch if tail is
133 * pushed this many descriptors from
134 * head */
135#define RXDCTL_WTHRESH_DEFAULT0 0 /* chip writes back at this many or RXT0 */
136
137
138/*********************************************************************
139 * Device identification routine
140 *
141 * ixgb_probe determines if the driver should be loaded on
142 * adapter based on PCI vendor/device id of the adapter.
143 *
144 * return 0 on no match, positive on match
145 *********************************************************************/
146
147int
148ixgb_probe(struct device *parent, void *match, void *aux)
149{
150 INIT_DEBUGOUT("ixgb_probe: begin")if (0) printf("ixgb_probe: begin" "\n");
151
152 return (pci_matchbyid((struct pci_attach_args *)aux, ixgb_devices,
153 nitems(ixgb_devices)(sizeof((ixgb_devices)) / sizeof((ixgb_devices)[0]))));
154}
155
156/*********************************************************************
157 * Device initialization routine
158 *
159 * The attach entry point is called when the driver is being loaded.
160 * This routine identifies the type of hardware, allocates all resources
161 * and initializes the hardware.
162 *
163 *********************************************************************/
164
165void
166ixgb_attach(struct device *parent, struct device *self, void *aux)
167{
168 struct pci_attach_args *pa = aux;
169 struct ixgb_softc *sc;
170 int tsize, rsize;
171
172 INIT_DEBUGOUT("ixgb_attach: begin")if (0) printf("ixgb_attach: begin" "\n");
173
174 sc = (struct ixgb_softc *)self;
175 sc->osdep.ixgb_pa = *pa;
176
177 timeout_set(&sc->timer_handle, ixgb_local_timer, sc);
178
179 /* Determine hardware revision */
180 ixgb_identify_hardware(sc);
181
182 /* Parameters (to be read from user) */
183 sc->num_tx_desc = IXGB_MAX_TXD2048;
184 sc->num_rx_desc = IXGB_MAX_RXD1024;
185 sc->tx_int_delay = TIDV32;
186 sc->rx_int_delay = RDTR72;
187 sc->rx_buffer_len = IXGB_RXBUFFER_20482048;
188
189 /*
190 * These parameters control the automatic generation(Tx) and
191 * response(Rx) to Ethernet PAUSE frames.
192 */
193 sc->hw.fc.high_water = FCRTH0x30000;
194 sc->hw.fc.low_water = FCRTL0x28000;
195 sc->hw.fc.pause_time = FCPAUSE0x100;
196 sc->hw.fc.send_xon = TRUE1;
197 sc->hw.fc.type = FLOW_CONTROLixgb_fc_full;
198
199 /* Set the max frame size assuming standard ethernet sized frames */
200 sc->hw.max_frame_size = IXGB_MAX_JUMBO_FRAME_SIZE0x3F00;
201
202 if (ixgb_allocate_pci_resources(sc))
203 goto err_pci;
204
205 tsize = IXGB_ROUNDUP(sc->num_tx_desc * sizeof(struct ixgb_tx_desc),(((sc->num_tx_desc * sizeof(struct ixgb_tx_desc)) + (2048 *
sizeof(struct ixgb_tx_desc)) - 1) & ~((2048 * sizeof(struct
ixgb_tx_desc)) - 1))
206 IXGB_MAX_TXD * sizeof(struct ixgb_tx_desc))(((sc->num_tx_desc * sizeof(struct ixgb_tx_desc)) + (2048 *
sizeof(struct ixgb_tx_desc)) - 1) & ~((2048 * sizeof(struct
ixgb_tx_desc)) - 1))
;
207 tsize = IXGB_ROUNDUP(tsize, PAGE_SIZE)(((tsize) + ((1 << 12)) - 1) & ~(((1 << 12)) -
1))
;
208
209 /* Allocate Transmit Descriptor ring */
210 if (ixgb_dma_malloc(sc, tsize, &sc->txdma, BUS_DMA_NOWAIT0x0001)) {
211 printf("%s: Unable to allocate TxDescriptor memory\n",
212 sc->sc_dv.dv_xname);
213 goto err_tx_desc;
214 }
215 sc->tx_desc_base = (struct ixgb_tx_desc *) sc->txdma.dma_vaddr;
216
217 rsize = IXGB_ROUNDUP(sc->num_rx_desc * sizeof(struct ixgb_rx_desc),(((sc->num_rx_desc * sizeof(struct ixgb_rx_desc)) + (1024 *
sizeof(struct ixgb_rx_desc)) - 1) & ~((1024 * sizeof(struct
ixgb_rx_desc)) - 1))
218 IXGB_MAX_RXD * sizeof(struct ixgb_rx_desc))(((sc->num_rx_desc * sizeof(struct ixgb_rx_desc)) + (1024 *
sizeof(struct ixgb_rx_desc)) - 1) & ~((1024 * sizeof(struct
ixgb_rx_desc)) - 1))
;
219 rsize = IXGB_ROUNDUP(rsize, PAGE_SIZE)(((rsize) + ((1 << 12)) - 1) & ~(((1 << 12)) -
1))
;
220
221 /* Allocate Receive Descriptor ring */
222 if (ixgb_dma_malloc(sc, rsize, &sc->rxdma, BUS_DMA_NOWAIT0x0001)) {
223 printf("%s: Unable to allocate rx_desc memory\n",
224 sc->sc_dv.dv_xname);
225 goto err_rx_desc;
226 }
227 sc->rx_desc_base = (struct ixgb_rx_desc *) sc->rxdma.dma_vaddr;
228
229 /* Initialize the hardware */
230 if (ixgb_hardware_init(sc)) {
231 printf("%s: Unable to initialize the hardware\n",
232 sc->sc_dv.dv_xname);
233 goto err_hw_init;
234 }
235
236 /* Setup OS specific network interface */
237 ixgb_setup_interface(sc);
238
239 /* Initialize statistics */
240 ixgb_clear_hw_cntrs(&sc->hw);
241 ixgb_update_stats_counters(sc);
242 ixgb_update_link_status(sc);
243
244 printf(", address %s\n", ether_sprintf(sc->interface_data.ac_enaddr));
245
246 INIT_DEBUGOUT("ixgb_attach: end")if (0) printf("ixgb_attach: end" "\n");
247 return;
248
249err_hw_init:
250 ixgb_dma_free(sc, &sc->rxdma);
251err_rx_desc:
252 ixgb_dma_free(sc, &sc->txdma);
253err_tx_desc:
254err_pci:
255 ixgb_free_pci_resources(sc);
256}
257
258/*********************************************************************
259 * Transmit entry point
260 *
261 * ixgb_start is called by the stack to initiate a transmit.
262 * The driver will remain in this routine as long as there are
263 * packets to transmit and transmit resources are available.
264 * In case resources are not available stack is notified and
265 * the packet is requeued.
266 **********************************************************************/
267
268void
269ixgb_start(struct ifnet *ifp)
270{
271 struct mbuf *m_head;
272 struct ixgb_softc *sc = ifp->if_softc;
273 int post = 0;
274
275 if (!(ifp->if_flags & IFF_RUNNING0x40) || ifq_is_oactive(&ifp->if_snd))
276 return;
277
278 if (!sc->link_active)
279 return;
280
281 bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,(*(sc->txdma.dma_tag)->_dmamap_sync)((sc->txdma.dma_tag
), (sc->txdma.dma_map), (0), (sc->txdma.dma_map->dm_mapsize
), (0x02 | 0x08))
282 sc->txdma.dma_map->dm_mapsize,(*(sc->txdma.dma_tag)->_dmamap_sync)((sc->txdma.dma_tag
), (sc->txdma.dma_map), (0), (sc->txdma.dma_map->dm_mapsize
), (0x02 | 0x08))
283 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->txdma.dma_tag)->_dmamap_sync)((sc->txdma.dma_tag
), (sc->txdma.dma_map), (0), (sc->txdma.dma_map->dm_mapsize
), (0x02 | 0x08))
;
284
285 for (;;) {
286 m_head = ifq_deq_begin(&ifp->if_snd);
287 if (m_head == NULL((void *)0))
288 break;
289
290 if (ixgb_encap(sc, m_head)) {
291 ifq_deq_rollback(&ifp->if_snd, m_head);
292 ifq_set_oactive(&ifp->if_snd);
293 break;
294 }
295
296 ifq_deq_commit(&ifp->if_snd, m_head);
297
298#if NBPFILTER1 > 0
299 /* Send a copy of the frame to the BPF listener */
300 if (ifp->if_bpf)
301 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT(1 << 1));
302#endif
303
304 /* Set timeout in case hardware has problems transmitting */
305 ifp->if_timer = IXGB_TX_TIMEOUT5;
306
307 post = 1;
308 }
309
310 bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,(*(sc->txdma.dma_tag)->_dmamap_sync)((sc->txdma.dma_tag
), (sc->txdma.dma_map), (0), (sc->txdma.dma_map->dm_mapsize
), (0x01 | 0x04))
311 sc->txdma.dma_map->dm_mapsize,(*(sc->txdma.dma_tag)->_dmamap_sync)((sc->txdma.dma_tag
), (sc->txdma.dma_map), (0), (sc->txdma.dma_map->dm_mapsize
), (0x01 | 0x04))
312 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->txdma.dma_tag)->_dmamap_sync)((sc->txdma.dma_tag
), (sc->txdma.dma_map), (0), (sc->txdma.dma_map->dm_mapsize
), (0x01 | 0x04))
;
313 /*
314 * Advance the Transmit Descriptor Tail (Tdt),
315 * this tells the E1000 that this frame
316 * is available to transmit.
317 */
318 if (post)
319 IXGB_WRITE_REG(&sc->hw, TDT, sc->next_avail_tx_desc)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00620), (sc->next_avail_tx_desc
)))
;
320}
321
322/*********************************************************************
323 * Ioctl entry point
324 *
325 * ixgb_ioctl is called when the user wants to configure the
326 * interface.
327 *
328 * return 0 on success, positive on failure
329 **********************************************************************/
330
331int
332ixgb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
333{
334 struct ixgb_softc *sc = ifp->if_softc;
335 struct ifreq *ifr = (struct ifreq *) data;
336 int s, error = 0;
337
338 s = splnet()splraise(0x4);
339
340 switch (command) {
341 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
342 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface "if (0) printf("ioctl rcv'd: SIOCSIFADDR (Set Interface " "Addr)"
"\n")
343 "Addr)")if (0) printf("ioctl rcv'd: SIOCSIFADDR (Set Interface " "Addr)"
"\n")
;
344 ifp->if_flags |= IFF_UP0x1;
345 if (!(ifp->if_flags & IFF_RUNNING0x40))
346 ixgb_init(sc);
347 break;
348
349 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
350 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)")if (0) printf("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)"
"\n")
;
351 if (ifp->if_flags & IFF_UP0x1) {
352 /*
353 * If only the PROMISC or ALLMULTI flag changes, then
354 * don't do a full re-init of the chip, just update
355 * the Rx filter.
356 */
357 if ((ifp->if_flags & IFF_RUNNING0x40) &&
358 ((ifp->if_flags ^ sc->if_flags) &
359 (IFF_ALLMULTI0x200 | IFF_PROMISC0x100)) != 0) {
360 ixgb_set_promisc(sc);
361 } else {
362 if (!(ifp->if_flags & IFF_RUNNING0x40))
363 ixgb_init(sc);
364 }
365 } else {
366 if (ifp->if_flags & IFF_RUNNING0x40)
367 ixgb_stop(sc);
368 }
369 sc->if_flags = ifp->if_flags;
370 break;
371
372 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
373 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
374 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)")if (0) printf("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)"
"\n")
;
375 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
376 break;
377
378 default:
379 error = ether_ioctl(ifp, &sc->interface_data, command, data);
380 }
381
382 if (error == ENETRESET52) {
383 if (ifp->if_flags & IFF_RUNNING0x40) {
384 ixgb_disable_intr(sc);
385 ixgb_set_multi(sc);
386 ixgb_enable_intr(sc);
387 }
388 error = 0;
389 }
390
391 splx(s)spllower(s);
392 return (error);
393}
394
395/*********************************************************************
396 * Watchdog entry point
397 *
398 * This routine is called whenever hardware quits transmitting.
399 *
400 **********************************************************************/
401
402void
403ixgb_watchdog(struct ifnet * ifp)
404{
405 struct ixgb_softc *sc = ifp->if_softc;
406
407 /*
408 * If we are in this routine because of pause frames, then don't
409 * reset the hardware.
410 */
411 if (IXGB_READ_REG(&sc->hw, STATUS)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00010)))
& IXGB_STATUS_TXOFF0x00000010) {
412 ifp->if_timer = IXGB_TX_TIMEOUT5;
413 return;
414 }
415
416 printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname);
417
418 ixgb_init(sc);
419
420 sc->watchdog_events++;
421}
422
423/*********************************************************************
424 * Init entry point
425 *
426 * This routine is used in two ways. It is used by the stack as
427 * init entry point in network interface structure. It is also used
428 * by the driver as a hw/sw initialization routine to get to a
429 * consistent state.
430 *
431 **********************************************************************/
432
433void
434ixgb_init(void *arg)
435{
436 struct ixgb_softc *sc = arg;
437 struct ifnet *ifp = &sc->interface_data.ac_if;
438 uint32_t temp_reg;
439 int s;
440
441 INIT_DEBUGOUT("ixgb_init: begin")if (0) printf("ixgb_init: begin" "\n");
442
443 s = splnet()splraise(0x4);
444
445 ixgb_stop(sc);
446
447 /* Get the latest mac address, User can use a LAA */
448 bcopy(sc->interface_data.ac_enaddr, sc->hw.curr_mac_addr,
449 IXGB_ETH_LENGTH_OF_ADDRESS6);
450
451 /* Initialize the hardware */
452 if (ixgb_hardware_init(sc)) {
453 printf("%s: Unable to initialize the hardware\n",
454 sc->sc_dv.dv_xname);
455 splx(s)spllower(s);
456 return;
457 }
458
459 if (ifp->if_capabilitiesif_data.ifi_capabilities & IFCAP_VLAN_HWTAGGING0x00000020)
460 ixgb_enable_hw_vlans(sc);
461
462 /* Prepare transmit descriptors and buffers */
463 if (ixgb_setup_transmit_structures(sc)) {
464 printf("%s: Could not setup transmit structures\n",
465 sc->sc_dv.dv_xname);
466 ixgb_stop(sc);
467 splx(s)spllower(s);
468 return;
469 }
470 ixgb_initialize_transmit_unit(sc);
471
472 /* Setup Multicast table */
473 ixgb_set_multi(sc);
474
475 /* Prepare receive descriptors and buffers */
476 if (ixgb_setup_receive_structures(sc)) {
477 printf("%s: Could not setup receive structures\n",
478 sc->sc_dv.dv_xname);
479 ixgb_stop(sc);
480 splx(s)spllower(s);
481 return;
482 }
483 ixgb_initialize_receive_unit(sc);
484
485 /* Don't lose promiscuous settings */
486 ixgb_set_promisc(sc);
487
488 ifp->if_flags |= IFF_RUNNING0x40;
489 ifq_clr_oactive(&ifp->if_snd);
490
491 /* Enable jumbo frames */
492 IXGB_WRITE_REG(&sc->hw, MFRMS,((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00020), (sc->hw.max_frame_size
<< 16)))
493 sc->hw.max_frame_size << IXGB_MFRMS_SHIFT)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00020), (sc->hw.max_frame_size
<< 16)))
;
494 temp_reg = IXGB_READ_REG(&sc->hw, CTRL0)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00000)))
;
495 temp_reg |= IXGB_CTRL0_JFE0x00000010;
496 IXGB_WRITE_REG(&sc->hw, CTRL0, temp_reg)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00000), (temp_reg)))
;
497
498 timeout_add_sec(&sc->timer_handle, 1);
499 ixgb_clear_hw_cntrs(&sc->hw);
500 ixgb_enable_intr(sc);
501
502 splx(s)spllower(s);
503}
504
505/*********************************************************************
506 *
507 * Interrupt Service routine
508 *
509 **********************************************************************/
510
511int
512ixgb_intr(void *arg)
513{
514 struct ixgb_softc *sc = arg;
515 struct ifnet *ifp;
516 u_int32_t reg_icr;
517 boolean_t rxdmt0 = FALSE0;
518 int claimed = 0;
519
520 ifp = &sc->interface_data.ac_if;
521
522 for (;;) {
523 reg_icr = IXGB_READ_REG(&sc->hw, ICR)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00080)))
;
524 if (reg_icr == 0)
525 break;
526
527 claimed = 1;
528
529 if (reg_icr & IXGB_INT_RXDMT00x00000010)
530 rxdmt0 = TRUE1;
531
532 if (ifp->if_flags & IFF_RUNNING0x40) {
533 ixgb_rxeof(sc, -1);
534 ixgb_txeof(sc);
535 }
536
537 /* Link status change */
538 if (reg_icr & (IXGB_INT_RXSEQ0x00000008 | IXGB_INT_LSC0x00000004)) {
539 timeout_del(&sc->timer_handle);
540 ixgb_check_for_link(&sc->hw);
541 ixgb_update_link_status(sc);
542 timeout_add_sec(&sc->timer_handle, 1);
543 }
544
545 if (rxdmt0 && sc->raidc) {
546 IXGB_WRITE_REG(&sc->hw, IMC, IXGB_INT_RXDMT0)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00098), (0x00000010)))
;
547 IXGB_WRITE_REG(&sc->hw, IMS, IXGB_INT_RXDMT0)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00090), (0x00000010)))
;
548 }
549 }
550
551 if (ifp->if_flags & IFF_RUNNING0x40 && !ifq_empty(&ifp->if_snd)(({ typeof((&ifp->if_snd)->ifq_len) __tmp = *(volatile
typeof((&ifp->if_snd)->ifq_len) *)&((&ifp->
if_snd)->ifq_len); membar_datadep_consumer(); __tmp; }) ==
0)
)
552 ixgb_start(ifp);
553
554 return (claimed);
555}
556
557
558/*********************************************************************
559 *
560 * Media Ioctl callback
561 *
562 * This routine is called whenever the user queries the status of
563 * the interface using ifconfig.
564 *
565 **********************************************************************/
566void
567ixgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
568{
569 struct ixgb_softc *sc = ifp->if_softc;
570
571 INIT_DEBUGOUT("ixgb_media_status: begin")if (0) printf("ixgb_media_status: begin" "\n");
572
573 ixgb_check_for_link(&sc->hw);
574 ixgb_update_link_status(sc);
575
576 ifmr->ifm_status = IFM_AVALID0x0000000000000001ULL;
577 ifmr->ifm_active = IFM_ETHER0x0000000000000100ULL;
578
579 if (!sc->hw.link_up) {
580 ifmr->ifm_active |= IFM_NONE2ULL;
581 return;
582 }
583
584 ifmr->ifm_status |= IFM_ACTIVE0x0000000000000002ULL;
585 if ((sc->hw.phy_type == ixgb_phy_type_g6104) ||
586 (sc->hw.phy_type == ixgb_phy_type_txn17401))
587 ifmr->ifm_active |= IFM_10G_LR18 | IFM_FDX0x0000010000000000ULL;
588 else
589 ifmr->ifm_active |= IFM_10G_SR19 | IFM_FDX0x0000010000000000ULL;
590
591 return;
592}
593
594/*********************************************************************
595 *
596 * Media Ioctl callback
597 *
598 * This routine is called when the user changes speed/duplex using
599 * media/mediopt option with ifconfig.
600 *
601 **********************************************************************/
602int
603ixgb_media_change(struct ifnet * ifp)
604{
605 struct ixgb_softc *sc = ifp->if_softc;
606 struct ifmedia *ifm = &sc->media;
607
608 INIT_DEBUGOUT("ixgb_media_change: begin")if (0) printf("ixgb_media_change: begin" "\n");
609
610 if (IFM_TYPE(ifm->ifm_media)((ifm->ifm_media) & 0x000000000000ff00ULL) != IFM_ETHER0x0000000000000100ULL)
611 return (EINVAL22);
612
613 return (0);
614}
615
616/*********************************************************************
617 *
618 * This routine maps the mbufs to tx descriptors.
619 *
620 * return 0 on success, positive on failure
621 **********************************************************************/
622
623int
624ixgb_encap(struct ixgb_softc *sc, struct mbuf *m_head)
625{
626 u_int8_t txd_popts;
627 int i, j, error = 0;
628 bus_dmamap_t map;
629
630 struct ixgb_buffer *tx_buffer;
631 struct ixgb_tx_desc *current_tx_desc = NULL((void *)0);
632
633 /*
634 * Force a cleanup if number of TX descriptors available hits the
635 * threshold
636 */
637 if (sc->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD(sc->num_tx_desc / 8)) {
638 ixgb_txeof(sc);
639 /* Now do we at least have a minimal? */
640 if (sc->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD(sc->num_tx_desc / 8)) {
641 sc->no_tx_desc_avail1++;
642 return (ENOBUFS55);
643 }
644 }
645
646 /*
647 * Map the packet for DMA.
648 */
649 tx_buffer = &sc->tx_buffer_area[sc->next_avail_tx_desc];
650 map = tx_buffer->map;
651
652 error = bus_dmamap_load_mbuf(sc->txtag, map,(*(sc->txtag)->_dmamap_load_mbuf)((sc->txtag), (map)
, (m_head), (0x0001))
653 m_head, BUS_DMA_NOWAIT)(*(sc->txtag)->_dmamap_load_mbuf)((sc->txtag), (map)
, (m_head), (0x0001))
;
654 if (error != 0) {
655 sc->no_tx_dma_setup++;
656 return (error);
657 }
658 IXGB_KASSERT(map->dm_nsegs != 0, ("ixgb_encap: empty packet"));
659
660 if (map->dm_nsegs > sc->num_tx_desc_avail)
661 goto fail;
662
663#ifdef IXGB_CSUM_OFFLOAD
664 ixgb_transmit_checksum_setup(sc, m_head, &txd_popts);
665#else
666 txd_popts = 0;
667#endif
668
669 i = sc->next_avail_tx_desc;
670 for (j = 0; j < map->dm_nsegs; j++) {
671 tx_buffer = &sc->tx_buffer_area[i];
672 current_tx_desc = &sc->tx_desc_base[i];
673
674 current_tx_desc->buff_addr = htole64(map->dm_segs[j].ds_addr)((__uint64_t)(map->dm_segs[j].ds_addr));
675 current_tx_desc->cmd_type_len = htole32((sc->txd_cmd | map->dm_segs[j].ds_len))((__uint32_t)((sc->txd_cmd | map->dm_segs[j].ds_len)));
676 current_tx_desc->popts = txd_popts;
677 if (++i == sc->num_tx_desc)
678 i = 0;
679
680 tx_buffer->m_head = NULL((void *)0);
681 }
682
683 sc->num_tx_desc_avail -= map->dm_nsegs;
684 sc->next_avail_tx_desc = i;
685
686 /* Find out if we are in VLAN mode */
687 if (m_head->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020) {
688 /* Set the VLAN id */
689 current_tx_desc->vlan = htole16(m_head->m_pkthdr.ether_vtag)((__uint16_t)(m_head->M_dat.MH.MH_pkthdr.ether_vtag));
690
691 /* Tell hardware to add tag */
692 current_tx_desc->cmd_type_len |= htole32(IXGB_TX_DESC_CMD_VLE)((__uint32_t)(0x40000000));
693 }
694
695 tx_buffer->m_head = m_head;
696 bus_dmamap_sync(sc->txtag, map, 0, map->dm_mapsize,(*(sc->txtag)->_dmamap_sync)((sc->txtag), (map), (0)
, (map->dm_mapsize), (0x04))
697 BUS_DMASYNC_PREWRITE)(*(sc->txtag)->_dmamap_sync)((sc->txtag), (map), (0)
, (map->dm_mapsize), (0x04))
;
698
699 /*
700 * Last Descriptor of Packet needs End Of Packet (EOP)
701 */
702 current_tx_desc->cmd_type_len |= htole32(IXGB_TX_DESC_CMD_EOP)((__uint32_t)(0x01000000));
703
704 return (0);
705
706fail:
707 sc->no_tx_desc_avail2++;
708 bus_dmamap_unload(sc->txtag, map)(*(sc->txtag)->_dmamap_unload)((sc->txtag), (map));
709 return (ENOBUFS55);
710}
711
712void
713ixgb_set_promisc(struct ixgb_softc *sc)
714{
715
716 u_int32_t reg_rctl;
717 struct ifnet *ifp = &sc->interface_data.ac_if;
718
719 reg_rctl = IXGB_READ_REG(&sc->hw, RCTL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00100)))
;
720
721 if (ifp->if_flags & IFF_PROMISC0x100) {
722 reg_rctl |= (IXGB_RCTL_UPE0x00000008 | IXGB_RCTL_MPE0x00000010);
723 } else if (ifp->if_flags & IFF_ALLMULTI0x200) {
724 reg_rctl |= IXGB_RCTL_MPE0x00000010;
725 reg_rctl &= ~IXGB_RCTL_UPE0x00000008;
726 } else {
727 reg_rctl &= ~(IXGB_RCTL_UPE0x00000008 | IXGB_RCTL_MPE0x00000010);
728 }
729 IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00100), (reg_rctl)))
;
730}
731
732/*********************************************************************
733 * Multicast Update
734 *
735 * This routine is called whenever multicast address list is updated.
736 *
737 **********************************************************************/
738
739void
740ixgb_set_multi(struct ixgb_softc *sc)
741{
742 u_int32_t reg_rctl = 0;
743 u_int8_t mta[MAX_NUM_MULTICAST_ADDRESSES128 * IXGB_ETH_LENGTH_OF_ADDRESS6];
744 int mcnt = 0;
745 struct ifnet *ifp = &sc->interface_data.ac_if;
746 struct arpcom *ac = &sc->interface_data;
747 struct ether_multi *enm;
748 struct ether_multistep step;
749
750 IOCTL_DEBUGOUT("ixgb_set_multi: begin")if (0) printf("ixgb_set_multi: begin" "\n");
751
752 if (ac->ac_multirangecnt > 0) {
753 ifp->if_flags |= IFF_ALLMULTI0x200;
754 mcnt = MAX_NUM_MULTICAST_ADDRESSES128;
755 goto setit;
756 }
757
758 ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first
); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step
)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while
( 0)
;
759 while (enm != NULL((void *)0)) {
760 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES128)
761 break;
762 bcopy(enm->enm_addrlo, &mta[mcnt*IXGB_ETH_LENGTH_OF_ADDRESS6],
763 IXGB_ETH_LENGTH_OF_ADDRESS6);
764 mcnt++;
765 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
766 }
767
768setit:
769 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES128) {
770 reg_rctl = IXGB_READ_REG(&sc->hw, RCTL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00100)))
;
771 reg_rctl |= IXGB_RCTL_MPE0x00000010;
772 IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00100), (reg_rctl)))
;
773 } else
774 ixgb_mc_addr_list_update(&sc->hw, mta, mcnt, 0);
775}
776
777
778/*********************************************************************
779 * Timer routine
780 *
781 * This routine checks for link status and updates statistics.
782 *
783 **********************************************************************/
784
785void
786ixgb_local_timer(void *arg)
787{
788 struct ifnet *ifp;
789 struct ixgb_softc *sc = arg;
790 int s;
791
792 ifp = &sc->interface_data.ac_if;
793
794 s = splnet()splraise(0x4);
795
796 ixgb_check_for_link(&sc->hw);
797 ixgb_update_link_status(sc);
798 ixgb_update_stats_counters(sc);
799#ifdef IXGB_DEBUG
800 if (ixgb_display_debug_stats && ifp->if_flags & IFF_RUNNING0x40)
801 ixgb_print_hw_stats(sc);
802#endif
803
804 timeout_add_sec(&sc->timer_handle, 1);
805
806 splx(s)spllower(s);
807}
808
809void
810ixgb_update_link_status(struct ixgb_softc *sc)
811{
812 struct ifnet *ifp = &sc->interface_data.ac_if;
813
814 if (sc->hw.link_up) {
815 if (!sc->link_active) {
816 ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL)));
817 sc->link_active = 1;
818 ifp->if_link_stateif_data.ifi_link_state = LINK_STATE_FULL_DUPLEX6;
819 if_link_state_change(ifp);
820 }
821 } else {
822 if (sc->link_active) {
823 ifp->if_baudrateif_data.ifi_baudrate = 0;
824 sc->link_active = 0;
825 ifp->if_link_stateif_data.ifi_link_state = LINK_STATE_DOWN2;
826 if_link_state_change(ifp);
827 }
828 }
829}
830
831/*********************************************************************
832 *
833 * This routine disables all traffic on the adapter by issuing a
834 * global reset on the MAC and deallocates TX/RX buffers.
835 *
836 **********************************************************************/
837
838void
839ixgb_stop(void *arg)
840{
841 struct ifnet *ifp;
842 struct ixgb_softc *sc = arg;
843 ifp = &sc->interface_data.ac_if;
844
845 INIT_DEBUGOUT("ixgb_stop: begin\n")if (0) printf("ixgb_stop: begin\n" "\n");
846 ixgb_disable_intr(sc);
847 sc->hw.adapter_stopped = FALSE0;
848 ixgb_adapter_stop(&sc->hw);
849 timeout_del(&sc->timer_handle);
850
851 /* Tell the stack that the interface is no longer active */
852 ifp->if_flags &= ~IFF_RUNNING0x40;
853 ifq_clr_oactive(&ifp->if_snd);
854
855 ixgb_free_transmit_structures(sc);
856 ixgb_free_receive_structures(sc);
857}
858
859
860/*********************************************************************
861 *
862 * Determine hardware revision.
863 *
864 **********************************************************************/
865void
866ixgb_identify_hardware(struct ixgb_softc *sc)
867{
868 u_int32_t reg;
869 struct pci_attach_args *pa = &sc->osdep.ixgb_pa;
870
871 /* Make sure our PCI config space has the necessary stuff set */
872 sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag,
873 PCI_COMMAND_STATUS_REG0x04);
874
875 /* Save off the information about this board */
876 sc->hw.vendor_id = PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff);
877 sc->hw.device_id = PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff);
878
879 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG0x08);
880 sc->hw.revision_id = PCI_REVISION(reg)(((reg) >> 0) & 0xff);
881
882 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG0x2c);
883 sc->hw.subsystem_vendor_id = PCI_VENDOR(reg)(((reg) >> 0) & 0xffff);
884 sc->hw.subsystem_id = PCI_PRODUCT(reg)(((reg) >> 16) & 0xffff);
885
886 /* Set MacType, etc. based on this PCI info */
887 switch (sc->hw.device_id) {
888 case IXGB_DEVICE_ID_82597EX0x1048:
889 case IXGB_DEVICE_ID_82597EX_SR0x1A48:
890 case IXGB_DEVICE_ID_82597EX_LR0x1B48:
891 case IXGB_DEVICE_ID_82597EX_CX40x109E:
892 sc->hw.mac_type = ixgb_82597;
893 break;
894 default:
895 INIT_DEBUGOUT1("Unknown device if 0x%x", sc->hw.device_id)if (0) printf("Unknown device if 0x%x" "\n", sc->hw.device_id
)
;
896 printf("%s: unsupported device id 0x%x\n",
897 sc->sc_dv.dv_xname, sc->hw.device_id);
898 }
899}
900
901int
902ixgb_allocate_pci_resources(struct ixgb_softc *sc)
903
904{
905 int val;
906 pci_intr_handle_t ih;
907 const char *intrstr = NULL((void *)0);
908 struct pci_attach_args *pa = &sc->osdep.ixgb_pa;
909 pci_chipset_tag_t pc = pa->pa_pc;
910
911 val = pci_conf_read(pa->pa_pc, pa->pa_tag, IXGB_MMBA0x0010);
912 if (PCI_MAPREG_TYPE(val)((val) & 0x00000001) != PCI_MAPREG_TYPE_MEM0x00000000) {
913 printf(": mmba is not mem space\n");
914 return (ENXIO6);
915 }
916 if (pci_mapreg_map(pa, IXGB_MMBA0x0010, PCI_MAPREG_MEM_TYPE(val)((val) & 0x00000006), 0,
917 &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle,
918 &sc->osdep.ixgb_membase, &sc->osdep.ixgb_memsize, 0)) {
919 printf(": cannot find mem space\n");
920 return (ENXIO6);
921 }
922
923 if (pci_intr_map(pa, &ih)) {
924 printf(": couldn't map interrupt\n");
925 return (ENXIO6);
926 }
927
928 sc->hw.back = &sc->osdep;
929
930 intrstr = pci_intr_string(pc, ih);
931 sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET0x4, ixgb_intr, sc,
932 sc->sc_dv.dv_xname);
933 if (sc->sc_intrhand == NULL((void *)0)) {
934 printf(": couldn't establish interrupt");
935 if (intrstr != NULL((void *)0))
936 printf(" at %s", intrstr);
937 printf("\n");
938 return (ENXIO6);
939 }
940 printf(": %s", intrstr);
941
942 return (0);
943}
944
945void
946ixgb_free_pci_resources(struct ixgb_softc *sc)
947{
948 struct pci_attach_args *pa = &sc->osdep.ixgb_pa;
949 pci_chipset_tag_t pc = pa->pa_pc;
950
951 if (sc->sc_intrhand)
952 pci_intr_disestablish(pc, sc->sc_intrhand);
953 sc->sc_intrhand = 0;
954
955 if (sc->osdep.ixgb_membase)
956 bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
957 sc->osdep.ixgb_memsize);
958 sc->osdep.ixgb_membase = 0;
959}
960
961/*********************************************************************
962 *
963 * Initialize the hardware to a configuration as specified by the
964 * adapter structure. The controller is reset, the EEPROM is
965 * verified, the MAC address is set, then the shared initialization
966 * routines are called.
967 *
968 **********************************************************************/
969int
970ixgb_hardware_init(struct ixgb_softc *sc)
971{
972 /* Issue a global reset */
973 sc->hw.adapter_stopped = FALSE0;
974 ixgb_adapter_stop(&sc->hw);
975
976 /* Make sure we have a good EEPROM before we read from it */
977 if (!ixgb_validate_eeprom_checksum(&sc->hw)) {
978 printf("%s: The EEPROM Checksum Is Not Valid\n",
979 sc->sc_dv.dv_xname);
980 return (EIO5);
981 }
982 if (!ixgb_init_hw(&sc->hw)) {
983 printf("%s: Hardware Initialization Failed",
984 sc->sc_dv.dv_xname);
985 return (EIO5);
986 }
987 bcopy(sc->hw.curr_mac_addr, sc->interface_data.ac_enaddr,
988 IXGB_ETH_LENGTH_OF_ADDRESS6);
989
990 return (0);
991}
992
993/*********************************************************************
994 *
995 * Setup networking device structure and register an interface.
996 *
997 **********************************************************************/
998void
999ixgb_setup_interface(struct ixgb_softc *sc)
1000{
1001 struct ifnet *ifp;
1002 INIT_DEBUGOUT("ixgb_setup_interface: begin")if (0) printf("ixgb_setup_interface: begin" "\n");
1003
1004 ifp = &sc->interface_data.ac_if;
1005 strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ16);
1006
1007 ifp->if_softc = sc;
1008 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
1009 ifp->if_ioctl = ixgb_ioctl;
1010 ifp->if_start = ixgb_start;
1011 ifp->if_watchdog = ixgb_watchdog;
1012 ifp->if_hardmtu =
1013 IXGB_MAX_JUMBO_FRAME_SIZE0x3F00 - ETHER_HDR_LEN((6 * 2) + 2) - ETHER_CRC_LEN4;
1014 ifq_init_maxlen(&ifp->if_snd, sc->num_tx_desc - 1);
1015
1016 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010;
1017
1018#if NVLAN1 > 0
1019 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020;
1020#endif
1021
1022#ifdef IXGB_CSUM_OFFLOAD
1023 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_CSUM_TCPv40x00000002|IFCAP_CSUM_UDPv40x00000004;
1024#endif
1025
1026 /*
1027 * Specify the media types supported by this adapter and register
1028 * callbacks to update media and link information
1029 */
1030 ifmedia_init(&sc->media, IFM_IMASK0xff00000000000000ULL, ixgb_media_change,
1031 ixgb_media_status);
1032 if ((sc->hw.phy_type == ixgb_phy_type_g6104) ||
1033 (sc->hw.phy_type == ixgb_phy_type_txn17401)) {
1034 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_10G_LR18 |
1035 IFM_FDX0x0000010000000000ULL, 0, NULL((void *)0));
1036 } else {
1037 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_10G_SR19 |
1038 IFM_FDX0x0000010000000000ULL, 0, NULL((void *)0));
1039 }
1040 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0));
1041 ifmedia_set(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL);
1042
1043 if_attach(ifp);
1044 ether_ifattach(ifp);
1045}
1046
1047/********************************************************************
1048 * Manage DMA'able memory.
1049 *******************************************************************/
1050int
1051ixgb_dma_malloc(struct ixgb_softc *sc, bus_size_t size,
1052 struct ixgb_dma_alloc * dma, int mapflags)
1053{
1054 int r;
1055
1056 dma->dma_tag = sc->osdep.ixgb_pa.pa_dmat;
1057 r = bus_dmamap_create(dma->dma_tag, size, 1,(*(dma->dma_tag)->_dmamap_create)((dma->dma_tag), (size
), (1), (size), (0), (0x0001), (&dma->dma_map))
1058 size, 0, BUS_DMA_NOWAIT, &dma->dma_map)(*(dma->dma_tag)->_dmamap_create)((dma->dma_tag), (size
), (1), (size), (0), (0x0001), (&dma->dma_map))
;
1059 if (r != 0) {
1060 printf("%s: ixgb_dma_malloc: bus_dmamap_create failed; "
1061 "error %u\n", sc->sc_dv.dv_xname, r);
1062 goto fail_0;
1063 }
1064
1065 r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,(*(dma->dma_tag)->_dmamem_alloc)((dma->dma_tag), (size
), ((1 << 12)), (0), (&dma->dma_seg), (1), (&
dma->dma_nseg), (0x0001))
1066 1, &dma->dma_nseg, BUS_DMA_NOWAIT)(*(dma->dma_tag)->_dmamem_alloc)((dma->dma_tag), (size
), ((1 << 12)), (0), (&dma->dma_seg), (1), (&
dma->dma_nseg), (0x0001))
;
1067 if (r != 0) {
1068 printf("%s: ixgb_dma_malloc: bus_dmamem_alloc failed; "
1069 "size %lu, error %d\n", sc->sc_dv.dv_xname,
1070 (unsigned long)size, r);
1071 goto fail_1;
1072 }
1073
1074 r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,(*(dma->dma_tag)->_dmamem_map)((dma->dma_tag), (&
dma->dma_seg), (dma->dma_nseg), (size), (&dma->dma_vaddr
), (0x0001))
1075 &dma->dma_vaddr, BUS_DMA_NOWAIT)(*(dma->dma_tag)->_dmamem_map)((dma->dma_tag), (&
dma->dma_seg), (dma->dma_nseg), (size), (&dma->dma_vaddr
), (0x0001))
;
1076 if (r != 0) {
1077 printf("%s: ixgb_dma_malloc: bus_dmamem_map failed; "
1078 "size %lu, error %d\n", sc->sc_dv.dv_xname,
1079 (unsigned long)size, r);
1080 goto fail_2;
1081 }
1082
1083 r = bus_dmamap_load(sc->osdep.ixgb_pa.pa_dmat, dma->dma_map,(*(sc->osdep.ixgb_pa.pa_dmat)->_dmamap_load)((sc->osdep
.ixgb_pa.pa_dmat), (dma->dma_map), (dma->dma_vaddr), (size
), (((void *)0)), (mapflags | 0x0001))
1084 dma->dma_vaddr, size, NULL,(*(sc->osdep.ixgb_pa.pa_dmat)->_dmamap_load)((sc->osdep
.ixgb_pa.pa_dmat), (dma->dma_map), (dma->dma_vaddr), (size
), (((void *)0)), (mapflags | 0x0001))
1085 mapflags | BUS_DMA_NOWAIT)(*(sc->osdep.ixgb_pa.pa_dmat)->_dmamap_load)((sc->osdep
.ixgb_pa.pa_dmat), (dma->dma_map), (dma->dma_vaddr), (size
), (((void *)0)), (mapflags | 0x0001))
;
1086 if (r != 0) {
1087 printf("%s: ixgb_dma_malloc: bus_dmamap_load failed; "
1088 "error %u\n", sc->sc_dv.dv_xname, r);
1089 goto fail_3;
1090 }
1091
1092 dma->dma_size = size;
1093 return (0);
1094
1095fail_3:
1096 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size)(*(dma->dma_tag)->_dmamem_unmap)((dma->dma_tag), (dma
->dma_vaddr), (size))
;
1097fail_2:
1098 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg)(*(dma->dma_tag)->_dmamem_free)((dma->dma_tag), (&
dma->dma_seg), (dma->dma_nseg))
;
1099fail_1:
1100 bus_dmamap_destroy(dma->dma_tag, dma->dma_map)(*(dma->dma_tag)->_dmamap_destroy)((dma->dma_tag), (
dma->dma_map))
;
1101fail_0:
1102 dma->dma_map = NULL((void *)0);
1103 dma->dma_tag = NULL((void *)0);
1104
1105 return (r);
1106}
1107
1108void
1109ixgb_dma_free(struct ixgb_softc *sc, struct ixgb_dma_alloc *dma)
1110{
1111 if (dma->dma_tag == NULL((void *)0))
1112 return;
1113
1114 if (dma->dma_map != NULL((void *)0)) {
1115 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma
->dma_map), (0), (dma->dma_map->dm_mapsize), (0x02 |
0x08))
1116 dma->dma_map->dm_mapsize,(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma
->dma_map), (0), (dma->dma_map->dm_mapsize), (0x02 |
0x08))
1117 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma
->dma_map), (0), (dma->dma_map->dm_mapsize), (0x02 |
0x08))
;
1118 bus_dmamap_unload(dma->dma_tag, dma->dma_map)(*(dma->dma_tag)->_dmamap_unload)((dma->dma_tag), (dma
->dma_map))
;
1119 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size)(*(dma->dma_tag)->_dmamem_unmap)((dma->dma_tag), (dma
->dma_vaddr), (dma->dma_size))
;
1120 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg)(*(dma->dma_tag)->_dmamem_free)((dma->dma_tag), (&
dma->dma_seg), (dma->dma_nseg))
;
1121 bus_dmamap_destroy(dma->dma_tag, dma->dma_map)(*(dma->dma_tag)->_dmamap_destroy)((dma->dma_tag), (
dma->dma_map))
;
1122 }
1123}
1124
1125/*********************************************************************
1126 *
1127 * Allocate memory for tx_buffer structures. The tx_buffer stores all
1128 * the information needed to transmit a packet on the wire.
1129 *
1130 **********************************************************************/
1131int
1132ixgb_allocate_transmit_structures(struct ixgb_softc *sc)
1133{
1134 if (!(sc->tx_buffer_area = mallocarray(sc->num_tx_desc,
1135 sizeof(struct ixgb_buffer), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008))) {
1136 printf("%s: Unable to allocate tx_buffer memory\n",
1137 sc->sc_dv.dv_xname);
1138 return (ENOMEM12);
1139 }
1140
1141 return (0);
1142}
1143
1144/*********************************************************************
1145 *
1146 * Allocate and initialize transmit structures.
1147 *
1148 **********************************************************************/
1149int
1150ixgb_setup_transmit_structures(struct ixgb_softc *sc)
1151{
1152 struct ixgb_buffer *tx_buffer;
1153 int error, i;
1154
1155 if ((error = ixgb_allocate_transmit_structures(sc)) != 0)
1156 goto fail;
1157
1158 bzero((void *)sc->tx_desc_base,__builtin_bzero(((void *)sc->tx_desc_base), ((sizeof(struct
ixgb_tx_desc)) * sc->num_tx_desc))
1159 (sizeof(struct ixgb_tx_desc)) * sc->num_tx_desc)__builtin_bzero(((void *)sc->tx_desc_base), ((sizeof(struct
ixgb_tx_desc)) * sc->num_tx_desc))
;
1160
1161 sc->txtag = sc->osdep.ixgb_pa.pa_dmat;
1162
1163 tx_buffer = sc->tx_buffer_area;
1164 for (i = 0; i < sc->num_tx_desc; i++) {
1165 error = bus_dmamap_create(sc->txtag, IXGB_MAX_JUMBO_FRAME_SIZE,(*(sc->txtag)->_dmamap_create)((sc->txtag), (0x3F00)
, (100), (0x3F00), (0), (0x0001), (&tx_buffer->map))
1166 IXGB_MAX_SCATTER, IXGB_MAX_JUMBO_FRAME_SIZE, 0,(*(sc->txtag)->_dmamap_create)((sc->txtag), (0x3F00)
, (100), (0x3F00), (0), (0x0001), (&tx_buffer->map))
1167 BUS_DMA_NOWAIT, &tx_buffer->map)(*(sc->txtag)->_dmamap_create)((sc->txtag), (0x3F00)
, (100), (0x3F00), (0), (0x0001), (&tx_buffer->map))
;
1168 if (error != 0) {
1169 printf("%s: Unable to create TX DMA map\n",
1170 sc->sc_dv.dv_xname);
1171 goto fail;
1172 }
1173 tx_buffer++;
1174 }
1175
1176 sc->next_avail_tx_desc = 0;
1177 sc->oldest_used_tx_desc = 0;
1178
1179 /* Set number of descriptors available */
1180 sc->num_tx_desc_avail = sc->num_tx_desc;
1181
1182 /* Set checksum context */
1183 sc->active_checksum_context = OFFLOAD_NONE;
1184 bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,(*(sc->txdma.dma_tag)->_dmamap_sync)((sc->txdma.dma_tag
), (sc->txdma.dma_map), (0), (sc->txdma.dma_size), (0x01
| 0x04))
1185 sc->txdma.dma_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->txdma.dma_tag)->_dmamap_sync)((sc->txdma.dma_tag
), (sc->txdma.dma_map), (0), (sc->txdma.dma_size), (0x01
| 0x04))
;
1186
1187 return (0);
1188
1189fail:
1190 ixgb_free_transmit_structures(sc);
1191 return (error);
1192}
1193
1194/*********************************************************************
1195 *
1196 * Enable transmit unit.
1197 *
1198 **********************************************************************/
1199void
1200ixgb_initialize_transmit_unit(struct ixgb_softc *sc)
1201{
1202 u_int32_t reg_tctl;
1203 u_int64_t bus_addr;
1204
1205 /* Setup the Base and Length of the Tx Descriptor Ring */
1206 bus_addr = sc->txdma.dma_map->dm_segs[0].ds_addr;
1207 IXGB_WRITE_REG(&sc->hw, TDBAL, (u_int32_t)bus_addr)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00608), ((u_int32_t)bus_addr)
))
;
1208 IXGB_WRITE_REG(&sc->hw, TDBAH, (u_int32_t)(bus_addr >> 32))((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x0060C), ((u_int32_t)(bus_addr
>> 32))))
;
1209 IXGB_WRITE_REG(&sc->hw, TDLEN,((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00610), (sc->num_tx_desc *
sizeof(struct ixgb_tx_desc))))
1210 sc->num_tx_desc *((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00610), (sc->num_tx_desc *
sizeof(struct ixgb_tx_desc))))
1211 sizeof(struct ixgb_tx_desc))((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00610), (sc->num_tx_desc *
sizeof(struct ixgb_tx_desc))))
;
1212
1213 /* Setup the HW Tx Head and Tail descriptor pointers */
1214 IXGB_WRITE_REG(&sc->hw, TDH, 0)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00618), (0)))
;
1215 IXGB_WRITE_REG(&sc->hw, TDT, 0)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00620), (0)))
;
1216
1217 HW_DEBUGOUT2("Base = %x, Length = %x\n",if (0) printf("Base = %x, Length = %x\n" "\n", ((((struct ixgb_osdep
*)(&sc->hw)->back)->mem_bus_space_tag)->read_4
((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_handle
), (0x00608))), ((((struct ixgb_osdep *)(&sc->hw)->
back)->mem_bus_space_tag)->read_4((((struct ixgb_osdep *
)(&sc->hw)->back)->mem_bus_space_handle), (0x00610
))))
1218 IXGB_READ_REG(&sc->hw, TDBAL),if (0) printf("Base = %x, Length = %x\n" "\n", ((((struct ixgb_osdep
*)(&sc->hw)->back)->mem_bus_space_tag)->read_4
((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_handle
), (0x00608))), ((((struct ixgb_osdep *)(&sc->hw)->
back)->mem_bus_space_tag)->read_4((((struct ixgb_osdep *
)(&sc->hw)->back)->mem_bus_space_handle), (0x00610
))))
1219 IXGB_READ_REG(&sc->hw, TDLEN))if (0) printf("Base = %x, Length = %x\n" "\n", ((((struct ixgb_osdep
*)(&sc->hw)->back)->mem_bus_space_tag)->read_4
((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_handle
), (0x00608))), ((((struct ixgb_osdep *)(&sc->hw)->
back)->mem_bus_space_tag)->read_4((((struct ixgb_osdep *
)(&sc->hw)->back)->mem_bus_space_handle), (0x00610
))))
;
1220
1221 IXGB_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00628), (sc->tx_int_delay)
))
;
1222
1223 /* Program the Transmit Control Register */
1224 reg_tctl = IXGB_READ_REG(&sc->hw, TCTL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00600)))
;
Value stored to 'reg_tctl' is never read
1225 reg_tctl = IXGB_TCTL_TCE0x00000001 | IXGB_TCTL_TXEN0x00000002 | IXGB_TCTL_TPDE0x00000004;
1226 IXGB_WRITE_REG(&sc->hw, TCTL, reg_tctl)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00600), (reg_tctl)))
;
1227
1228 /* Setup Transmit Descriptor Settings for this adapter */
1229 sc->txd_cmd = IXGB_TX_DESC_TYPE0x00100000 | IXGB_TX_DESC_CMD_RS0x08000000;
1230
1231 if (sc->tx_int_delay > 0)
1232 sc->txd_cmd |= IXGB_TX_DESC_CMD_IDE0x80000000;
1233}
1234
1235/*********************************************************************
1236 *
1237 * Free all transmit related data structures.
1238 *
1239 **********************************************************************/
1240void
1241ixgb_free_transmit_structures(struct ixgb_softc *sc)
1242{
1243 struct ixgb_buffer *tx_buffer;
1244 int i;
1245
1246 INIT_DEBUGOUT("free_transmit_structures: begin")if (0) printf("free_transmit_structures: begin" "\n");
1247
1248 if (sc->tx_buffer_area != NULL((void *)0)) {
1249 tx_buffer = sc->tx_buffer_area;
1250 for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
1251 if (tx_buffer->map != NULL((void *)0) &&
1252 tx_buffer->map->dm_nsegs > 0) {
1253 bus_dmamap_sync(sc->txtag, tx_buffer->map,(*(sc->txtag)->_dmamap_sync)((sc->txtag), (tx_buffer
->map), (0), (tx_buffer->map->dm_mapsize), (0x08))
1254 0, tx_buffer->map->dm_mapsize,(*(sc->txtag)->_dmamap_sync)((sc->txtag), (tx_buffer
->map), (0), (tx_buffer->map->dm_mapsize), (0x08))
1255 BUS_DMASYNC_POSTWRITE)(*(sc->txtag)->_dmamap_sync)((sc->txtag), (tx_buffer
->map), (0), (tx_buffer->map->dm_mapsize), (0x08))
;
1256 bus_dmamap_unload(sc->txtag,(*(sc->txtag)->_dmamap_unload)((sc->txtag), (tx_buffer
->map))
1257 tx_buffer->map)(*(sc->txtag)->_dmamap_unload)((sc->txtag), (tx_buffer
->map))
;
1258 }
1259
1260 if (tx_buffer->m_head != NULL((void *)0)) {
1261 m_freem(tx_buffer->m_head);
1262 tx_buffer->m_head = NULL((void *)0);
1263 }
1264 if (tx_buffer->map != NULL((void *)0)) {
1265 bus_dmamap_destroy(sc->txtag,(*(sc->txtag)->_dmamap_destroy)((sc->txtag), (tx_buffer
->map))
1266 tx_buffer->map)(*(sc->txtag)->_dmamap_destroy)((sc->txtag), (tx_buffer
->map))
;
1267 tx_buffer->map = NULL((void *)0);
1268 }
1269 }
1270 }
1271 if (sc->tx_buffer_area != NULL((void *)0)) {
1272 free(sc->tx_buffer_area, M_DEVBUF2, 0);
1273 sc->tx_buffer_area = NULL((void *)0);
1274 }
1275 if (sc->txtag != NULL((void *)0)) {
1276 sc->txtag = NULL((void *)0);
1277 }
1278}
1279
1280/*********************************************************************
1281 *
1282 * The offload context needs to be set when we transfer the first
1283 * packet of a particular protocol (TCP/UDP). We change the
1284 * context only if the protocol type changes.
1285 *
1286 **********************************************************************/
1287void
1288ixgb_transmit_checksum_setup(struct ixgb_softc *sc,
1289 struct mbuf *mp,
1290 u_int8_t *txd_popts)
1291{
1292 struct ixgb_context_desc *TXD;
1293 struct ixgb_buffer *tx_buffer;
1294 int curr_txd;
1295
1296 if (mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags) {
1297
1298 if (mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_TCP_CSUM_OUT0x0002) {
1299 *txd_popts = IXGB_TX_DESC_POPTS_TXSM0x02;
1300 if (sc->active_checksum_context == OFFLOAD_TCP_IP)
1301 return;
1302 else
1303 sc->active_checksum_context = OFFLOAD_TCP_IP;
1304
1305 } else if (mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_UDP_CSUM_OUT0x0004) {
1306 *txd_popts = IXGB_TX_DESC_POPTS_TXSM0x02;
1307 if (sc->active_checksum_context == OFFLOAD_UDP_IP)
1308 return;
1309 else
1310 sc->active_checksum_context = OFFLOAD_UDP_IP;
1311 } else {
1312 *txd_popts = 0;
1313 return;
1314 }
1315 } else {
1316 *txd_popts = 0;
1317 return;
1318 }
1319
1320 /*
1321 * If we reach this point, the checksum offload context needs to be
1322 * reset.
1323 */
1324 curr_txd = sc->next_avail_tx_desc;
1325 tx_buffer = &sc->tx_buffer_area[curr_txd];
1326 TXD = (struct ixgb_context_desc *) & sc->tx_desc_base[curr_txd];
1327
1328 TXD->tucss = ENET_HEADER_SIZE14 + sizeof(struct ip);
1329 TXD->tucse = 0;
1330
1331 TXD->mss = 0;
1332
1333 if (sc->active_checksum_context == OFFLOAD_TCP_IP) {
1334 TXD->tucso =
1335 ENET_HEADER_SIZE14 + sizeof(struct ip) +
1336 offsetof(struct tcphdr, th_sum)__builtin_offsetof(struct tcphdr, th_sum);
1337 } else if (sc->active_checksum_context == OFFLOAD_UDP_IP) {
1338 TXD->tucso =
1339 ENET_HEADER_SIZE14 + sizeof(struct ip) +
1340 offsetof(struct udphdr, uh_sum)__builtin_offsetof(struct udphdr, uh_sum);
1341 }
1342 TXD->cmd_type_len = htole32(IXGB_CONTEXT_DESC_CMD_TCP |((__uint32_t)(0x01000000 | 0x08000000 | 0x80000000))
1343 IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE)((__uint32_t)(0x01000000 | 0x08000000 | 0x80000000));
1344
1345 tx_buffer->m_head = NULL((void *)0);
1346
1347 if (++curr_txd == sc->num_tx_desc)
1348 curr_txd = 0;
1349
1350 sc->num_tx_desc_avail--;
1351 sc->next_avail_tx_desc = curr_txd;
1352}
1353
1354/**********************************************************************
1355 *
1356 * Examine each tx_buffer in the used queue. If the hardware is done
1357 * processing the packet then free associated resources. The
1358 * tx_buffer is put back on the free queue.
1359 *
1360 **********************************************************************/
1361void
1362ixgb_txeof(struct ixgb_softc *sc)
1363{
1364 int i, num_avail;
1365 struct ixgb_buffer *tx_buffer;
1366 struct ixgb_tx_desc *tx_desc;
1367 struct ifnet *ifp = &sc->interface_data.ac_if;
1368
1369 if (sc->num_tx_desc_avail == sc->num_tx_desc)
1370 return;
1371
1372 num_avail = sc->num_tx_desc_avail;
1373 i = sc->oldest_used_tx_desc;
1374
1375 tx_buffer = &sc->tx_buffer_area[i];
1376 tx_desc = &sc->tx_desc_base[i];
1377
1378 bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,(*(sc->txdma.dma_tag)->_dmamap_sync)((sc->txdma.dma_tag
), (sc->txdma.dma_map), (0), (sc->txdma.dma_map->dm_mapsize
), (0x02))
1379 sc->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->txdma.dma_tag)->_dmamap_sync)((sc->txdma.dma_tag
), (sc->txdma.dma_map), (0), (sc->txdma.dma_map->dm_mapsize
), (0x02))
;
1380 while (tx_desc->status & IXGB_TX_DESC_STATUS_DD0x01) {
1381
1382 tx_desc->status = 0;
1383 num_avail++;
1384
1385 if (tx_buffer->m_head != NULL((void *)0)) {
1386 if (tx_buffer->map->dm_nsegs > 0) {
1387 bus_dmamap_sync(sc->txtag, tx_buffer->map,(*(sc->txtag)->_dmamap_sync)((sc->txtag), (tx_buffer
->map), (0), (tx_buffer->map->dm_mapsize), (0x08))
1388 0, tx_buffer->map->dm_mapsize,(*(sc->txtag)->_dmamap_sync)((sc->txtag), (tx_buffer
->map), (0), (tx_buffer->map->dm_mapsize), (0x08))
1389 BUS_DMASYNC_POSTWRITE)(*(sc->txtag)->_dmamap_sync)((sc->txtag), (tx_buffer
->map), (0), (tx_buffer->map->dm_mapsize), (0x08))
;
1390 bus_dmamap_unload(sc->txtag, tx_buffer->map)(*(sc->txtag)->_dmamap_unload)((sc->txtag), (tx_buffer
->map))
;
1391 }
1392
1393 m_freem(tx_buffer->m_head);
1394 tx_buffer->m_head = NULL((void *)0);
1395 }
1396 if (++i == sc->num_tx_desc)
1397 i = 0;
1398
1399 tx_buffer = &sc->tx_buffer_area[i];
1400 tx_desc = &sc->tx_desc_base[i];
1401 }
1402 bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,(*(sc->txdma.dma_tag)->_dmamap_sync)((sc->txdma.dma_tag
), (sc->txdma.dma_map), (0), (sc->txdma.dma_map->dm_mapsize
), (0x01 | 0x04))
1403 sc->txdma.dma_map->dm_mapsize,(*(sc->txdma.dma_tag)->_dmamap_sync)((sc->txdma.dma_tag
), (sc->txdma.dma_map), (0), (sc->txdma.dma_map->dm_mapsize
), (0x01 | 0x04))
1404 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->txdma.dma_tag)->_dmamap_sync)((sc->txdma.dma_tag
), (sc->txdma.dma_map), (0), (sc->txdma.dma_map->dm_mapsize
), (0x01 | 0x04))
;
1405
1406 sc->oldest_used_tx_desc = i;
1407
1408 /*
1409 * If we have enough room, clear IFF_OACTIVE to tell the stack that
1410 * it is OK to send packets. If there are no pending descriptors,
1411 * clear the timeout. Otherwise, if some descriptors have been freed,
1412 * restart the timeout.
1413 */
1414 if (num_avail > IXGB_TX_CLEANUP_THRESHOLD(sc->num_tx_desc / 8))
1415 ifq_clr_oactive(&ifp->if_snd);
1416
1417 /* All clean, turn off the timer */
1418 if (num_avail == sc->num_tx_desc)
1419 ifp->if_timer = 0;
1420 /* Some cleaned, reset the timer */
1421 else if (num_avail != sc->num_tx_desc_avail)
1422 ifp->if_timer = IXGB_TX_TIMEOUT5;
1423
1424 sc->num_tx_desc_avail = num_avail;
1425}
1426
1427
1428/*********************************************************************
1429 *
1430 * Get a buffer from system mbuf buffer pool.
1431 *
1432 **********************************************************************/
1433int
1434ixgb_get_buf(struct ixgb_softc *sc, int i,
1435 struct mbuf *nmp)
1436{
1437 struct mbuf *mp = nmp;
1438 struct ixgb_buffer *rx_buffer;
1439 int error;
1440
1441 if (mp == NULL((void *)0)) {
1442 MGETHDR(mp, M_DONTWAIT, MT_DATA)mp = m_gethdr((0x0002), (1));
1443 if (mp == NULL((void *)0)) {
1444 sc->mbuf_alloc_failed++;
1445 return (ENOBUFS55);
1446 }
1447 MCLGET(mp, M_DONTWAIT)(void) m_clget((mp), (0x0002), (1 << 11));
1448 if ((mp->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0) {
1449 m_freem(mp);
1450 sc->mbuf_cluster_failed++;
1451 return (ENOBUFS55);
1452 }
1453 mp->m_lenm_hdr.mh_len = mp->m_pkthdrM_dat.MH.MH_pkthdr.len = MCLBYTES(1 << 11);
1454 } else {
1455 mp->m_lenm_hdr.mh_len = mp->m_pkthdrM_dat.MH.MH_pkthdr.len = MCLBYTES(1 << 11);
1456 mp->m_datam_hdr.mh_data = mp->m_extM_dat.MH.MH_dat.MH_ext.ext_buf;
1457 mp->m_nextm_hdr.mh_next = NULL((void *)0);
1458 }
1459
1460 if (sc->hw.max_frame_size <= (MCLBYTES(1 << 11) - ETHER_ALIGN2))
1461 m_adj(mp, ETHER_ALIGN2);
1462
1463 rx_buffer = &sc->rx_buffer_area[i];
1464
1465 /*
1466 * Using memory from the mbuf cluster pool, invoke the bus_dma
1467 * machinery to arrange the memory mapping.
1468 */
1469 error = bus_dmamap_load_mbuf(sc->rxtag, rx_buffer->map,(*(sc->rxtag)->_dmamap_load_mbuf)((sc->rxtag), (rx_buffer
->map), (mp), (0x0001))
1470 mp, BUS_DMA_NOWAIT)(*(sc->rxtag)->_dmamap_load_mbuf)((sc->rxtag), (rx_buffer
->map), (mp), (0x0001))
;
1471 if (error) {
1472 m_freem(mp);
1473 return (error);
1474 }
1475 rx_buffer->m_head = mp;
1476 bzero(&sc->rx_desc_base[i], sizeof(sc->rx_desc_base[i]))__builtin_bzero((&sc->rx_desc_base[i]), (sizeof(sc->
rx_desc_base[i])))
;
1477 sc->rx_desc_base[i].buff_addr = htole64(rx_buffer->map->dm_segs[0].ds_addr)((__uint64_t)(rx_buffer->map->dm_segs[0].ds_addr));
1478 bus_dmamap_sync(sc->rxtag, rx_buffer->map, 0,(*(sc->rxtag)->_dmamap_sync)((sc->rxtag), (rx_buffer
->map), (0), (rx_buffer->map->dm_mapsize), (0x01))
1479 rx_buffer->map->dm_mapsize, BUS_DMASYNC_PREREAD)(*(sc->rxtag)->_dmamap_sync)((sc->rxtag), (rx_buffer
->map), (0), (rx_buffer->map->dm_mapsize), (0x01))
;
1480
1481 return (0);
1482}
1483
1484/*********************************************************************
1485 *
1486 * Allocate memory for rx_buffer structures. Since we use one
1487 * rx_buffer per received packet, the maximum number of rx_buffer's
1488 * that we'll need is equal to the number of receive descriptors
1489 * that we've allocated.
1490 *
1491 **********************************************************************/
1492int
1493ixgb_allocate_receive_structures(struct ixgb_softc *sc)
1494{
1495 int i, error;
1496 struct ixgb_buffer *rx_buffer;
1497
1498 if (!(sc->rx_buffer_area = mallocarray(sc->num_rx_desc,
1499 sizeof(struct ixgb_buffer), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008))) {
1500 printf("%s: Unable to allocate rx_buffer memory\n",
1501 sc->sc_dv.dv_xname);
1502 return (ENOMEM12);
1503 }
1504
1505 sc->rxtag = sc->osdep.ixgb_pa.pa_dmat;
1506
1507 rx_buffer = sc->rx_buffer_area;
1508 for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
1509 error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1,(*(sc->rxtag)->_dmamap_create)((sc->rxtag), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0001), (&rx_buffer
->map))
1510 MCLBYTES, 0, BUS_DMA_NOWAIT,(*(sc->rxtag)->_dmamap_create)((sc->rxtag), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0001), (&rx_buffer
->map))
1511 &rx_buffer->map)(*(sc->rxtag)->_dmamap_create)((sc->rxtag), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0001), (&rx_buffer
->map))
;
1512 if (error != 0) {
1513 printf("%s: ixgb_allocate_receive_structures: "
1514 "bus_dmamap_create failed; error %u\n",
1515 sc->sc_dv.dv_xname, error);
1516 goto fail;
1517 }
1518 }
1519
1520 for (i = 0; i < sc->num_rx_desc; i++) {
1521 error = ixgb_get_buf(sc, i, NULL((void *)0));
1522 if (error != 0)
1523 goto fail;
1524 }
1525 bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,(*(sc->rxdma.dma_tag)->_dmamap_sync)((sc->rxdma.dma_tag
), (sc->rxdma.dma_map), (0), (sc->rxdma.dma_map->dm_mapsize
), (0x01 | 0x04))
1526 sc->rxdma.dma_map->dm_mapsize,(*(sc->rxdma.dma_tag)->_dmamap_sync)((sc->rxdma.dma_tag
), (sc->rxdma.dma_map), (0), (sc->rxdma.dma_map->dm_mapsize
), (0x01 | 0x04))
1527 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->rxdma.dma_tag)->_dmamap_sync)((sc->rxdma.dma_tag
), (sc->rxdma.dma_map), (0), (sc->rxdma.dma_map->dm_mapsize
), (0x01 | 0x04))
;
1528
1529 return (0);
1530
1531fail:
1532 ixgb_free_receive_structures(sc);
1533 return (error);
1534}
1535
1536/*********************************************************************
1537 *
1538 * Allocate and initialize receive structures.
1539 *
1540 **********************************************************************/
1541int
1542ixgb_setup_receive_structures(struct ixgb_softc *sc)
1543{
1544 bzero((void *)sc->rx_desc_base,__builtin_bzero(((void *)sc->rx_desc_base), ((sizeof(struct
ixgb_rx_desc)) * sc->num_rx_desc))
1545 (sizeof(struct ixgb_rx_desc)) * sc->num_rx_desc)__builtin_bzero(((void *)sc->rx_desc_base), ((sizeof(struct
ixgb_rx_desc)) * sc->num_rx_desc))
;
1546
1547 if (ixgb_allocate_receive_structures(sc))
1548 return (ENOMEM12);
1549
1550 /* Setup our descriptor pointers */
1551 sc->next_rx_desc_to_check = 0;
1552 sc->next_rx_desc_to_use = 0;
1553 return (0);
1554}
1555
1556/*********************************************************************
1557 *
1558 * Enable receive unit.
1559 *
1560 **********************************************************************/
1561void
1562ixgb_initialize_receive_unit(struct ixgb_softc *sc)
1563{
1564 u_int32_t reg_rctl;
1565 u_int32_t reg_rxcsum;
1566 u_int32_t reg_rxdctl;
1567 u_int64_t bus_addr;
1568
1569 /*
1570 * Make sure receives are disabled while setting up the descriptor
1571 * ring
1572 */
1573 reg_rctl = IXGB_READ_REG(&sc->hw, RCTL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00100)))
;
1574 IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00100), (reg_rctl & ~0x00000002
)))
;
1575
1576 /* Set the Receive Delay Timer Register */
1577 IXGB_WRITE_REG(&sc->hw, RDTR,((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00138), (sc->rx_int_delay)
))
1578 sc->rx_int_delay)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00138), (sc->rx_int_delay)
))
;
1579
1580 /* Setup the Base and Length of the Rx Descriptor Ring */
1581 bus_addr = sc->rxdma.dma_map->dm_segs[0].ds_addr;
1582 IXGB_WRITE_REG(&sc->hw, RDBAL, (u_int32_t)bus_addr)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00118), ((u_int32_t)bus_addr)
))
;
1583 IXGB_WRITE_REG(&sc->hw, RDBAH, (u_int32_t)(bus_addr >> 32))((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x0011C), ((u_int32_t)(bus_addr
>> 32))))
;
1584 IXGB_WRITE_REG(&sc->hw, RDLEN, sc->num_rx_desc *((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00120), (sc->num_rx_desc *
sizeof(struct ixgb_rx_desc))))
1585 sizeof(struct ixgb_rx_desc))((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00120), (sc->num_rx_desc *
sizeof(struct ixgb_rx_desc))))
;
1586
1587 /* Setup the HW Rx Head and Tail Descriptor Pointers */
1588 IXGB_WRITE_REG(&sc->hw, RDH, 0)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00128), (0)))
;
1589
1590 IXGB_WRITE_REG(&sc->hw, RDT, sc->num_rx_desc - 1)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00130), (sc->num_rx_desc -
1)))
;
1591
1592 reg_rxdctl = RXDCTL_WTHRESH_DEFAULT0 << IXGB_RXDCTL_WTHRESH_SHIFT18
1593 | RXDCTL_HTHRESH_DEFAULT0 << IXGB_RXDCTL_HTHRESH_SHIFT9
1594 | RXDCTL_PTHRESH_DEFAULT0 << IXGB_RXDCTL_PTHRESH_SHIFT0;
1595 IXGB_WRITE_REG(&sc->hw, RXDCTL, reg_rxdctl)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00140), (reg_rxdctl)))
;
1596
1597 sc->raidc = 1;
1598 if (sc->raidc) {
1599 uint32_t raidc;
1600 uint8_t poll_threshold;
1601#define IXGB_RAIDC_POLL_DEFAULT120 120
1602
1603 poll_threshold = ((sc->num_rx_desc - 1) >> 3);
1604 poll_threshold >>= 1;
1605 poll_threshold &= 0x3F;
1606 raidc = IXGB_RAIDC_EN0x80000000 | IXGB_RAIDC_RXT_GATE0x40000000 |
1607 (IXGB_RAIDC_POLL_DEFAULT120 << IXGB_RAIDC_POLL_SHIFT20) |
1608 (sc->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT11) |
1609 poll_threshold;
1610 IXGB_WRITE_REG(&sc->hw, RAIDC, raidc)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00148), (raidc)))
;
1611 }
1612
1613 /* Enable Receive Checksum Offload for TCP and UDP ? */
1614 reg_rxcsum = IXGB_READ_REG(&sc->hw, RXCSUM)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00158)))
;
1615 reg_rxcsum |= IXGB_RXCSUM_TUOFL0x00000200;
1616 IXGB_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00158), (reg_rxcsum)))
;
1617
1618 /* Setup the Receive Control Register */
1619 reg_rctl = IXGB_READ_REG(&sc->hw, RCTL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00100)))
;
1620 reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT12);
1621 reg_rctl |= IXGB_RCTL_BAM0x00008000 | IXGB_RCTL_RDMTS_1_20x00000000 | IXGB_RCTL_SECRC0x04000000 |
1622 IXGB_RCTL_CFF0x00800000 |
1623 (sc->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT12);
1624
1625 switch (sc->rx_buffer_len) {
1626 default:
1627 case IXGB_RXBUFFER_20482048:
1628 reg_rctl |= IXGB_RCTL_BSIZE_20480x00000000;
1629 break;
1630 case IXGB_RXBUFFER_40964096:
1631 reg_rctl |= IXGB_RCTL_BSIZE_40960x00010000;
1632 break;
1633 case IXGB_RXBUFFER_81928192:
1634 reg_rctl |= IXGB_RCTL_BSIZE_81920x00020000;
1635 break;
1636 case IXGB_RXBUFFER_1638416384:
1637 reg_rctl |= IXGB_RCTL_BSIZE_163840x00030000;
1638 break;
1639 }
1640
1641 reg_rctl |= IXGB_RCTL_RXEN0x00000002;
1642
1643 /* Enable Receives */
1644 IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00100), (reg_rctl)))
;
1645}
1646
1647/*********************************************************************
1648 *
1649 * Free receive related data structures.
1650 *
1651 **********************************************************************/
1652void
1653ixgb_free_receive_structures(struct ixgb_softc *sc)
1654{
1655 struct ixgb_buffer *rx_buffer;
1656 int i;
1657
1658 INIT_DEBUGOUT("free_receive_structures: begin")if (0) printf("free_receive_structures: begin" "\n");
1659
1660 if (sc->rx_buffer_area != NULL((void *)0)) {
1661 rx_buffer = sc->rx_buffer_area;
1662 for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
1663 if (rx_buffer->map != NULL((void *)0) &&
1664 rx_buffer->map->dm_nsegs > 0) {
1665 bus_dmamap_sync(sc->rxtag, rx_buffer->map,(*(sc->rxtag)->_dmamap_sync)((sc->rxtag), (rx_buffer
->map), (0), (rx_buffer->map->dm_mapsize), (0x02))
1666 0, rx_buffer->map->dm_mapsize,(*(sc->rxtag)->_dmamap_sync)((sc->rxtag), (rx_buffer
->map), (0), (rx_buffer->map->dm_mapsize), (0x02))
1667 BUS_DMASYNC_POSTREAD)(*(sc->rxtag)->_dmamap_sync)((sc->rxtag), (rx_buffer
->map), (0), (rx_buffer->map->dm_mapsize), (0x02))
;
1668 bus_dmamap_unload(sc->rxtag,(*(sc->rxtag)->_dmamap_unload)((sc->rxtag), (rx_buffer
->map))
1669 rx_buffer->map)(*(sc->rxtag)->_dmamap_unload)((sc->rxtag), (rx_buffer
->map))
;
1670 }
1671 if (rx_buffer->m_head != NULL((void *)0)) {
1672 m_freem(rx_buffer->m_head);
1673 rx_buffer->m_head = NULL((void *)0);
1674 }
1675 if (rx_buffer->map != NULL((void *)0)) {
1676 bus_dmamap_destroy(sc->rxtag,(*(sc->rxtag)->_dmamap_destroy)((sc->rxtag), (rx_buffer
->map))
1677 rx_buffer->map)(*(sc->rxtag)->_dmamap_destroy)((sc->rxtag), (rx_buffer
->map))
;
1678 rx_buffer->map = NULL((void *)0);
1679 }
1680 }
1681 }
1682 if (sc->rx_buffer_area != NULL((void *)0)) {
1683 free(sc->rx_buffer_area, M_DEVBUF2, 0);
1684 sc->rx_buffer_area = NULL((void *)0);
1685 }
1686 if (sc->rxtag != NULL((void *)0))
1687 sc->rxtag = NULL((void *)0);
1688}
1689
1690/*********************************************************************
1691 *
1692 * This routine executes in interrupt context. It replenishes
1693 * the mbufs in the descriptor and sends data which has been
1694 * dma'ed into host memory to upper layer.
1695 *
1696 * We loop at most count times if count is > 0, or until done if
1697 * count < 0.
1698 *
1699 *********************************************************************/
1700void
1701ixgb_rxeof(struct ixgb_softc *sc, int count)
1702{
1703 struct ifnet *ifp;
1704 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
1705 struct mbuf *mp;
1706 int eop = 0;
1707 int len;
1708 u_int8_t accept_frame = 0;
1709 int i;
1710 int next_to_use = 0;
1711 int eop_desc;
1712
1713 /* Pointer to the receive descriptor being examined. */
1714 struct ixgb_rx_desc *current_desc;
1715
1716 ifp = &sc->interface_data.ac_if;
1717 i = sc->next_rx_desc_to_check;
1718 next_to_use = sc->next_rx_desc_to_use;
1719 eop_desc = sc->next_rx_desc_to_check;
1720 current_desc = &sc->rx_desc_base[i];
1721 bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,(*(sc->rxdma.dma_tag)->_dmamap_sync)((sc->rxdma.dma_tag
), (sc->rxdma.dma_map), (0), (sc->rxdma.dma_map->dm_mapsize
), (0x02))
1722 sc->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->rxdma.dma_tag)->_dmamap_sync)((sc->rxdma.dma_tag
), (sc->rxdma.dma_map), (0), (sc->rxdma.dma_map->dm_mapsize
), (0x02))
;
1723
1724 if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD0x01))
1725 return;
1726
1727 while ((current_desc->status & IXGB_RX_DESC_STATUS_DD0x01) &&
1728 (count != 0) &&
1729 (ifp->if_flags & IFF_RUNNING0x40)) {
1730
1731 mp = sc->rx_buffer_area[i].m_head;
1732 bus_dmamap_sync(sc->rxtag, sc->rx_buffer_area[i].map,(*(sc->rxtag)->_dmamap_sync)((sc->rxtag), (sc->rx_buffer_area
[i].map), (0), (sc->rx_buffer_area[i].map->dm_mapsize),
(0x02))
1733 0, sc->rx_buffer_area[i].map->dm_mapsize,(*(sc->rxtag)->_dmamap_sync)((sc->rxtag), (sc->rx_buffer_area
[i].map), (0), (sc->rx_buffer_area[i].map->dm_mapsize),
(0x02))
1734 BUS_DMASYNC_POSTREAD)(*(sc->rxtag)->_dmamap_sync)((sc->rxtag), (sc->rx_buffer_area
[i].map), (0), (sc->rx_buffer_area[i].map->dm_mapsize),
(0x02))
;
1735 bus_dmamap_unload(sc->rxtag, sc->rx_buffer_area[i].map)(*(sc->rxtag)->_dmamap_unload)((sc->rxtag), (sc->
rx_buffer_area[i].map))
;
1736
1737 accept_frame = 1;
1738 if (current_desc->status & IXGB_RX_DESC_STATUS_EOP0x02) {
1739 count--;
1740 eop = 1;
1741 } else {
1742 eop = 0;
1743 }
1744 len = letoh16(current_desc->length)((__uint16_t)(current_desc->length));
1745
1746 if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE0x01 |
1747 IXGB_RX_DESC_ERRORS_SE0x02 | IXGB_RX_DESC_ERRORS_P0x08 |
1748 IXGB_RX_DESC_ERRORS_RXE0x80))
1749 accept_frame = 0;
1750 if (accept_frame) {
1751
1752 /* Assign correct length to the current fragment */
1753 mp->m_lenm_hdr.mh_len = len;
1754
1755 if (sc->fmp == NULL((void *)0)) {
1756 mp->m_pkthdrM_dat.MH.MH_pkthdr.len = len;
1757 sc->fmp = mp; /* Store the first mbuf */
1758 sc->lmp = mp;
1759 } else {
1760 /* Chain mbuf's together */
1761 mp->m_flagsm_hdr.mh_flags &= ~M_PKTHDR0x0002;
1762 sc->lmp->m_nextm_hdr.mh_next = mp;
1763 sc->lmp = sc->lmp->m_nextm_hdr.mh_next;
1764 sc->fmp->m_pkthdrM_dat.MH.MH_pkthdr.len += len;
1765 }
1766
1767 if (eop) {
1768 eop_desc = i;
1769 ixgb_receive_checksum(sc, current_desc, sc->fmp);
1770
1771#if NVLAN1 > 0
1772 if (current_desc->status & IXGB_RX_DESC_STATUS_VP0x08) {
1773 sc->fmp->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag =
1774 letoh16(current_desc->special)((__uint16_t)(current_desc->special));
1775 sc->fmp->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020;
1776 }
1777#endif
1778
1779
1780 ml_enqueue(&ml, sc->fmp);
1781 sc->fmp = NULL((void *)0);
1782 sc->lmp = NULL((void *)0);
1783 }
1784 sc->rx_buffer_area[i].m_head = NULL((void *)0);
1785 } else {
1786 sc->dropped_pkts++;
1787 m_freem(sc->fmp);
1788 sc->fmp = NULL((void *)0);
1789 sc->lmp = NULL((void *)0);
1790 }
1791
1792 /* Zero out the receive descriptors status */
1793 current_desc->status = 0;
1794 bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,(*(sc->rxdma.dma_tag)->_dmamap_sync)((sc->rxdma.dma_tag
), (sc->rxdma.dma_map), (0), (sc->rxdma.dma_map->dm_mapsize
), (0x01 | 0x04))
1795 sc->rxdma.dma_map->dm_mapsize,(*(sc->rxdma.dma_tag)->_dmamap_sync)((sc->rxdma.dma_tag
), (sc->rxdma.dma_map), (0), (sc->rxdma.dma_map->dm_mapsize
), (0x01 | 0x04))
1796 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->rxdma.dma_tag)->_dmamap_sync)((sc->rxdma.dma_tag
), (sc->rxdma.dma_map), (0), (sc->rxdma.dma_map->dm_mapsize
), (0x01 | 0x04))
;
1797
1798 /* Advance our pointers to the next descriptor */
1799 if (++i == sc->num_rx_desc) {
1800 i = 0;
1801 current_desc = sc->rx_desc_base;
1802 } else
1803 current_desc++;
1804 }
1805 sc->next_rx_desc_to_check = i;
1806
1807 if (--i < 0)
1808 i = (sc->num_rx_desc - 1);
1809
1810 /*
1811 * 82597EX: Workaround for redundant write back in receive descriptor ring (causes
1812 * memory corruption). Avoid using and re-submitting the most recently received RX
1813 * descriptor back to hardware.
1814 *
1815 * if(Last written back descriptor == EOP bit set descriptor)
1816 * then avoid re-submitting the most recently received RX descriptor
1817 * back to hardware.
1818 * if(Last written back descriptor != EOP bit set descriptor)
1819 * then avoid re-submitting the most recently received RX descriptors
1820 * till last EOP bit set descriptor.
1821 */
1822 if (eop_desc != i) {
1823 if (++eop_desc == sc->num_rx_desc)
1824 eop_desc = 0;
1825 i = eop_desc;
1826 }
1827 /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
1828 while (next_to_use != i) {
1829 current_desc = &sc->rx_desc_base[next_to_use];
1830 if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE0x01 |
1831 IXGB_RX_DESC_ERRORS_SE0x02 | IXGB_RX_DESC_ERRORS_P0x08 |
1832 IXGB_RX_DESC_ERRORS_RXE0x80))) {
1833 mp = sc->rx_buffer_area[next_to_use].m_head;
1834 ixgb_get_buf(sc, next_to_use, mp);
1835 } else {
1836 if (ixgb_get_buf(sc, next_to_use, NULL((void *)0)) == ENOBUFS55)
1837 break;
1838 }
1839 /* Advance our pointers to the next descriptor */
1840 if (++next_to_use == sc->num_rx_desc)
1841 next_to_use = 0;
1842 }
1843 sc->next_rx_desc_to_use = next_to_use;
1844 if (--next_to_use < 0)
1845 next_to_use = (sc->num_rx_desc - 1);
1846 /* Advance the IXGB's Receive Queue #0 "Tail Pointer" */
1847 IXGB_WRITE_REG(&sc->hw, RDT, next_to_use)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00130), (next_to_use)))
;
1848
1849 if_input(ifp, &ml);
1850}
1851
1852/*********************************************************************
1853 *
1854 * Verify that the hardware indicated that the checksum is valid.
1855 * Inform the stack about the status of checksum so that stack
1856 * doesn't spend time verifying the checksum.
1857 *
1858 *********************************************************************/
1859void
1860ixgb_receive_checksum(struct ixgb_softc *sc,
1861 struct ixgb_rx_desc *rx_desc,
1862 struct mbuf *mp)
1863{
1864 if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM0x04) {
1865 mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags = 0;
1866 return;
1867 }
1868
1869 if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS0x40) {
1870 /* Did it pass? */
1871 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE0x40)) {
1872 /* IP Checksum Good */
1873 mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK0x0008;
1874
1875 } else {
1876 mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags = 0;
1877 }
1878 }
1879 if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS0x20) {
1880 /* Did it pass? */
1881 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE0x20)) {
1882 mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |=
1883 M_TCP_CSUM_IN_OK0x0020 | M_UDP_CSUM_IN_OK0x0080;
1884 }
1885 }
1886}
1887
1888/*
1889 * This turns on the hardware offload of the VLAN
1890 * tag insertion and strip
1891 */
1892void
1893ixgb_enable_hw_vlans(struct ixgb_softc *sc)
1894{
1895 uint32_t ctrl;
1896
1897 ctrl = IXGB_READ_REG(&sc->hw, CTRL0)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00000)))
;
1898 ctrl |= IXGB_CTRL0_VME0x40000000;
1899 IXGB_WRITE_REG(&sc->hw, CTRL0, ctrl)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00000), (ctrl)))
;
1900}
1901
1902void
1903ixgb_enable_intr(struct ixgb_softc *sc)
1904{
1905 uint32_t val;
1906
1907 val = IXGB_INT_RXT00x00000080 | IXGB_INT_TXDW0x00000001 | IXGB_INT_RXDMT00x00000010 |
1908 IXGB_INT_LSC0x00000004 | IXGB_INT_RXO0x00000040;
1909 if (sc->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID0x108E)
1910 val |= IXGB_INT_GPI00x00000800;
1911 IXGB_WRITE_REG(&sc->hw, IMS, val)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00090), (val)))
;
1912}
1913
1914void
1915ixgb_disable_intr(struct ixgb_softc *sc)
1916{
1917 IXGB_WRITE_REG(&sc->hw, IMC, ~0)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x00098), (~0)))
;
1918}
1919
1920void
1921ixgb_write_pci_cfg(struct ixgb_hw *hw,
1922 uint32_t reg,
1923 uint16_t *value)
1924{
1925 struct pci_attach_args *pa = &((struct ixgb_osdep *)hw->back)->ixgb_pa;
1926 pci_chipset_tag_t pc = pa->pa_pc;
1927 /* Should we do read/mask/write...? 16 vs 32 bit!!! */
1928 pci_conf_write(pc, pa->pa_tag, reg, *value);
1929}
1930
1931/**********************************************************************
1932 *
1933 * Update the board statistics counters.
1934 *
1935 **********************************************************************/
1936void
1937ixgb_update_stats_counters(struct ixgb_softc *sc)
1938{
1939 struct ifnet *ifp;
1940
1941 sc->stats.crcerrs += IXGB_READ_REG(&sc->hw, CRCERRS)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02068)))
;
1942 sc->stats.gprcl += IXGB_READ_REG(&sc->hw, GPRCL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02008)))
;
1943 sc->stats.gprch += IXGB_READ_REG(&sc->hw, GPRCH)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x0200C)))
;
1944 sc->stats.gorcl += IXGB_READ_REG(&sc->hw, GORCL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02038)))
;
1945 sc->stats.gorch += IXGB_READ_REG(&sc->hw, GORCH)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x0203C)))
;
1946 sc->stats.bprcl += IXGB_READ_REG(&sc->hw, BPRCL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02010)))
;
1947 sc->stats.bprch += IXGB_READ_REG(&sc->hw, BPRCH)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02014)))
;
1948 sc->stats.mprcl += IXGB_READ_REG(&sc->hw, MPRCL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02018)))
;
1949 sc->stats.mprch += IXGB_READ_REG(&sc->hw, MPRCH)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x0201C)))
;
1950 sc->stats.roc += IXGB_READ_REG(&sc->hw, ROC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02058)))
;
1951
1952 sc->stats.mpc += IXGB_READ_REG(&sc->hw, MPC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02080)))
;
1953 sc->stats.dc += IXGB_READ_REG(&sc->hw, DC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02148)))
;
1954 sc->stats.rlec += IXGB_READ_REG(&sc->hw, RLEC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02060)))
;
1955 sc->stats.xonrxc += IXGB_READ_REG(&sc->hw, XONRXC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x021B8)))
;
1956 sc->stats.xontxc += IXGB_READ_REG(&sc->hw, XONTXC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x021C0)))
;
1957 sc->stats.xoffrxc += IXGB_READ_REG(&sc->hw, XOFFRXC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x021C8)))
;
1958 sc->stats.xofftxc += IXGB_READ_REG(&sc->hw, XOFFTXC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x021D0)))
;
1959 sc->stats.gptcl += IXGB_READ_REG(&sc->hw, GPTCL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02108)))
;
1960 sc->stats.gptch += IXGB_READ_REG(&sc->hw, GPTCH)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x0210C)))
;
1961 sc->stats.gotcl += IXGB_READ_REG(&sc->hw, GOTCL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02138)))
;
1962 sc->stats.gotch += IXGB_READ_REG(&sc->hw, GOTCH)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x0213C)))
;
1963 sc->stats.ruc += IXGB_READ_REG(&sc->hw, RUC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02050)))
;
1964 sc->stats.rfc += IXGB_READ_REG(&sc->hw, RFC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02188)))
;
1965 sc->stats.rjc += IXGB_READ_REG(&sc->hw, RJC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x021D8)))
;
1966 sc->stats.torl += IXGB_READ_REG(&sc->hw, TORL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02040)))
;
1967 sc->stats.torh += IXGB_READ_REG(&sc->hw, TORH)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02044)))
;
1968 sc->stats.totl += IXGB_READ_REG(&sc->hw, TOTL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02140)))
;
1969 sc->stats.toth += IXGB_READ_REG(&sc->hw, TOTH)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02144)))
;
1970 sc->stats.tprl += IXGB_READ_REG(&sc->hw, TPRL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02000)))
;
1971 sc->stats.tprh += IXGB_READ_REG(&sc->hw, TPRH)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02004)))
;
1972 sc->stats.tptl += IXGB_READ_REG(&sc->hw, TPTL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02100)))
;
1973 sc->stats.tpth += IXGB_READ_REG(&sc->hw, TPTH)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02104)))
;
1974 sc->stats.plt64c += IXGB_READ_REG(&sc->hw, PLT64C)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02150)))
;
1975 sc->stats.mptcl += IXGB_READ_REG(&sc->hw, MPTCL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02118)))
;
1976 sc->stats.mptch += IXGB_READ_REG(&sc->hw, MPTCH)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x0211C)))
;
1977 sc->stats.bptcl += IXGB_READ_REG(&sc->hw, BPTCL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02110)))
;
1978 sc->stats.bptch += IXGB_READ_REG(&sc->hw, BPTCH)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02114)))
;
1979
1980 sc->stats.uprcl += IXGB_READ_REG(&sc->hw, UPRCL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02020)))
;
1981 sc->stats.uprch += IXGB_READ_REG(&sc->hw, UPRCH)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02024)))
;
1982 sc->stats.vprcl += IXGB_READ_REG(&sc->hw, VPRCL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02028)))
;
1983 sc->stats.vprch += IXGB_READ_REG(&sc->hw, VPRCH)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x0202C)))
;
1984 sc->stats.jprcl += IXGB_READ_REG(&sc->hw, JPRCL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02030)))
;
1985 sc->stats.jprch += IXGB_READ_REG(&sc->hw, JPRCH)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02034)))
;
1986 sc->stats.rnbc += IXGB_READ_REG(&sc->hw, RNBC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02048)))
;
1987 sc->stats.icbc += IXGB_READ_REG(&sc->hw, ICBC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02070)))
;
1988 sc->stats.ecbc += IXGB_READ_REG(&sc->hw, ECBC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02078)))
;
1989 sc->stats.uptcl += IXGB_READ_REG(&sc->hw, UPTCL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02120)))
;
1990 sc->stats.uptch += IXGB_READ_REG(&sc->hw, UPTCH)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02124)))
;
1991 sc->stats.vptcl += IXGB_READ_REG(&sc->hw, VPTCL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02128)))
;
1992 sc->stats.vptch += IXGB_READ_REG(&sc->hw, VPTCH)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x0212C)))
;
1993 sc->stats.jptcl += IXGB_READ_REG(&sc->hw, JPTCL)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02130)))
;
1994 sc->stats.jptch += IXGB_READ_REG(&sc->hw, JPTCH)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02134)))
;
1995 sc->stats.tsctc += IXGB_READ_REG(&sc->hw, TSCTC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02170)))
;
1996 sc->stats.tsctfc += IXGB_READ_REG(&sc->hw, TSCTFC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02178)))
;
1997 sc->stats.ibic += IXGB_READ_REG(&sc->hw, IBIC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02180)))
;
1998 sc->stats.lfc += IXGB_READ_REG(&sc->hw, LFC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02190)))
;
1999 sc->stats.pfrc += IXGB_READ_REG(&sc->hw, PFRC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x02198)))
;
2000 sc->stats.pftc += IXGB_READ_REG(&sc->hw, PFTC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x021A0)))
;
2001 sc->stats.mcfrc += IXGB_READ_REG(&sc->hw, MCFRC)((((struct ixgb_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct ixgb_osdep *)(&sc->hw)->back
)->mem_bus_space_handle), (0x021A8)))
;
2002
2003 ifp = &sc->interface_data.ac_if;
2004
2005 /* Fill out the OS statistics structure */
2006 ifp->if_collisionsif_data.ifi_collisions = 0;
2007
2008 /* Rx Errors */
2009 ifp->if_ierrorsif_data.ifi_ierrors =
2010 sc->dropped_pkts +
2011 sc->stats.crcerrs +
2012 sc->stats.rnbc +
2013 sc->stats.mpc +
2014 sc->stats.rlec;
2015
2016 /* Tx Errors */
2017 ifp->if_oerrorsif_data.ifi_oerrors =
2018 sc->watchdog_events;
2019}
2020
2021#ifdef IXGB_DEBUG
2022/**********************************************************************
2023 *
2024 * This routine is called only when ixgb_display_debug_stats is enabled.
2025 * This routine provides a way to take a look at important statistics
2026 * maintained by the driver and hardware.
2027 *
2028 **********************************************************************/
2029void
2030ixgb_print_hw_stats(struct ixgb_softc *sc)
2031{
2032 char buf_speed[100], buf_type[100];
2033 ixgb_bus_speed bus_speed;
2034 ixgb_bus_type bus_type;
2035 const char * const unit = sc->sc_dv.dv_xname;
2036
2037 bus_speed = sc->hw.bus.speed;
2038 bus_type = sc->hw.bus.type;
2039 snprintf(buf_speed, sizeof(buf_speed),
2040 bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2041 bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2042 bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2043 bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2044 "UNKNOWN");
2045 printf("%s: PCI_Bus_Speed = %s\n", unit,
2046 buf_speed);
2047
2048 snprintf(buf_type, sizeof(buf_type),
2049 bus_type == ixgb_bus_type_pci ? "PCI" :
2050 bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2051 "UNKNOWN");
2052 printf("%s: PCI_Bus_Type = %s\n", unit,
2053 buf_type);
2054
2055 printf("%s: Tx Descriptors not Avail1 = %ld\n", unit,
2056 sc->no_tx_desc_avail1);
2057 printf("%s: Tx Descriptors not Avail2 = %ld\n", unit,
2058 sc->no_tx_desc_avail2);
2059 printf("%s: Std Mbuf Failed = %ld\n", unit,
2060 sc->mbuf_alloc_failed);
2061 printf("%s: Std Cluster Failed = %ld\n", unit,
2062 sc->mbuf_cluster_failed);
2063
2064 printf("%s: Defer count = %lld\n", unit,
2065 (long long)sc->stats.dc);
2066 printf("%s: Missed Packets = %lld\n", unit,
2067 (long long)sc->stats.mpc);
2068 printf("%s: Receive No Buffers = %lld\n", unit,
2069 (long long)sc->stats.rnbc);
2070 printf("%s: Receive length errors = %lld\n", unit,
2071 (long long)sc->stats.rlec);
2072 printf("%s: Crc errors = %lld\n", unit,
2073 (long long)sc->stats.crcerrs);
2074 printf("%s: Driver dropped packets = %ld\n", unit,
2075 sc->dropped_pkts);
2076
2077 printf("%s: XON Rcvd = %lld\n", unit,
2078 (long long)sc->stats.xonrxc);
2079 printf("%s: XON Xmtd = %lld\n", unit,
2080 (long long)sc->stats.xontxc);
2081 printf("%s: XOFF Rcvd = %lld\n", unit,
2082 (long long)sc->stats.xoffrxc);
2083 printf("%s: XOFF Xmtd = %lld\n", unit,
2084 (long long)sc->stats.xofftxc);
2085
2086 printf("%s: Good Packets Rcvd = %lld\n", unit,
2087 (long long)sc->stats.gprcl);
2088 printf("%s: Good Packets Xmtd = %lld\n", unit,
2089 (long long)sc->stats.gptcl);
2090
2091 printf("%s: Jumbo frames recvd = %lld\n", unit,
2092 (long long)sc->stats.jprcl);
2093 printf("%s: Jumbo frames Xmtd = %lld\n", unit,
2094 (long long)sc->stats.jptcl);
2095}
2096#endif