Bug Summary

File:dev/pci/if_ix.c
Warning:line 1450, column 25
Dereference of null pointer

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name if_ix.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/if_ix.c
1/* $OpenBSD: if_ix.c,v 1.180 2021/07/27 01:44:55 kevlo Exp $ */
2
3/******************************************************************************
4
5 Copyright (c) 2001-2013, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34******************************************************************************/
35/* FreeBSD: src/sys/dev/ixgbe/ixgbe.c 251964 Jun 18 21:28:19 2013 UTC */
36
37#include <dev/pci/if_ix.h>
38#include <dev/pci/ixgbe_type.h>
39
40/*********************************************************************
41 * Driver version
42 *********************************************************************/
43/* char ixgbe_driver_version[] = "2.5.13"; */
44
45/*********************************************************************
46 * PCI Device ID Table
47 *
48 * Used by probe to select devices to load on
49 *********************************************************************/
50
51const struct pci_matchid ixgbe_devices[] = {
52 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_825980x10b6 },
53 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598_BX0x1508 },
54 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598AF_DUAL0x10c6 },
55 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598AF0x10c7 },
56 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598AT0x10c8 },
57 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598AT20x150b },
58 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598AT_DUAL0x10d7 },
59 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598EB_CX40x10dd },
60 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598EB_CX4_DUAL0x10ec },
61 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598EB_XF_LR0x10f4 },
62 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598EB_SFP0x10db },
63 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598_SR_DUAL_EM0x10e1 },
64 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82598_DA_DUAL0x10f1 },
65 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_KX40x10f7 },
66 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_KX4_MEZZ0x1514 },
67 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_XAUI0x10fc },
68 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_COMBO_BP0x10f8 },
69 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_BPLANE_FCOE0x152a },
70 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_CX40x10f9 },
71 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_T3_LOM0x151c },
72 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_SFP0x10fb },
73 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_SFP_EM0x1507 },
74 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_SFP_SF_QP0x154a },
75 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_SFP_SF20x154d },
76 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_SFP_FCOE0x1529 },
77 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599EN_SFP0x1557 },
78 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82599_QSFP_SF_QP0x1558 },
79 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X540T0x1528 },
80 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X540T10x1560 },
81 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550T0x1563 },
82 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550T10x15d1 },
83 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_X_KX40x15aa },
84 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_X_KR0x15ab },
85 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_X_SFP0x15ac },
86 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_X_10G_T0x15ad },
87 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_X_1G_T0x15ae },
88 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_A_KR0x15c2 },
89 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_A_KR_L0x15c3 },
90 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_A_SFP_N0x15c4 },
91 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_A_SFP0x15ce },
92 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_A_SGMII0x15c6 },
93 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_A_SGMII_L0x15c7 },
94 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_A_10G_T0x15c8 },
95 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_A_1G_T0x15e4 },
96 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X550EM_A_1G_T_L0x15e5 }
97};
98
99/*********************************************************************
100 * Function prototypes
101 *********************************************************************/
102int ixgbe_probe(struct device *, void *, void *);
103void ixgbe_attach(struct device *, struct device *, void *);
104int ixgbe_detach(struct device *, int);
105void ixgbe_start(struct ifqueue *);
106int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
107int ixgbe_rxrinfo(struct ix_softc *, struct if_rxrinfo *);
108int ixgbe_get_sffpage(struct ix_softc *, struct if_sffpage *);
109void ixgbe_watchdog(struct ifnet *);
110void ixgbe_init(void *);
111void ixgbe_stop(void *);
112void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
113int ixgbe_media_change(struct ifnet *);
114void ixgbe_identify_hardware(struct ix_softc *);
115int ixgbe_allocate_pci_resources(struct ix_softc *);
116int ixgbe_allocate_legacy(struct ix_softc *);
117int ixgbe_allocate_msix(struct ix_softc *);
118void ixgbe_setup_msix(struct ix_softc *);
119int ixgbe_allocate_queues(struct ix_softc *);
120void ixgbe_free_pci_resources(struct ix_softc *);
121void ixgbe_local_timer(void *);
122void ixgbe_setup_interface(struct ix_softc *);
123void ixgbe_config_gpie(struct ix_softc *);
124void ixgbe_config_delay_values(struct ix_softc *);
125void ixgbe_add_media_types(struct ix_softc *);
126void ixgbe_config_link(struct ix_softc *);
127
128int ixgbe_allocate_transmit_buffers(struct tx_ring *);
129int ixgbe_setup_transmit_structures(struct ix_softc *);
130int ixgbe_setup_transmit_ring(struct tx_ring *);
131void ixgbe_initialize_transmit_units(struct ix_softc *);
132void ixgbe_free_transmit_structures(struct ix_softc *);
133void ixgbe_free_transmit_buffers(struct tx_ring *);
134
135int ixgbe_allocate_receive_buffers(struct rx_ring *);
136int ixgbe_setup_receive_structures(struct ix_softc *);
137int ixgbe_setup_receive_ring(struct rx_ring *);
138void ixgbe_initialize_receive_units(struct ix_softc *);
139void ixgbe_free_receive_structures(struct ix_softc *);
140void ixgbe_free_receive_buffers(struct rx_ring *);
141void ixgbe_initialize_rss_mapping(struct ix_softc *);
142int ixgbe_rxfill(struct rx_ring *);
143void ixgbe_rxrefill(void *);
144
145int ixgbe_intr(struct ix_softc *sc);
146void ixgbe_enable_intr(struct ix_softc *);
147void ixgbe_disable_intr(struct ix_softc *);
148int ixgbe_txeof(struct tx_ring *);
149int ixgbe_rxeof(struct rx_ring *);
150void ixgbe_rx_checksum(uint32_t, struct mbuf *, uint32_t);
151void ixgbe_iff(struct ix_softc *);
152void ixgbe_map_queue_statistics(struct ix_softc *);
153void ixgbe_update_link_status(struct ix_softc *);
154int ixgbe_get_buf(struct rx_ring *, int);
155int ixgbe_encap(struct tx_ring *, struct mbuf *);
156int ixgbe_dma_malloc(struct ix_softc *, bus_size_t,
157 struct ixgbe_dma_alloc *, int);
158void ixgbe_dma_free(struct ix_softc *, struct ixgbe_dma_alloc *);
159int ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *, uint32_t *,
160 uint32_t *);
161int ixgbe_tso_setup(struct tx_ring *, struct mbuf *, uint32_t *,
162 uint32_t *);
163void ixgbe_set_ivar(struct ix_softc *, uint8_t, uint8_t, int8_t);
164void ixgbe_configure_ivars(struct ix_softc *);
165uint8_t *ixgbe_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
166
167void ixgbe_setup_vlan_hw_support(struct ix_softc *);
168
169/* Support for pluggable optic modules */
170void ixgbe_handle_mod(struct ix_softc *);
171void ixgbe_handle_msf(struct ix_softc *);
172void ixgbe_handle_phy(struct ix_softc *);
173
174/* Legacy (single vector interrupt handler */
175int ixgbe_legacy_intr(void *);
176void ixgbe_enable_queue(struct ix_softc *, uint32_t);
177void ixgbe_enable_queues(struct ix_softc *);
178void ixgbe_disable_queue(struct ix_softc *, uint32_t);
179void ixgbe_rearm_queue(struct ix_softc *, uint32_t);
180
181/* MSI-X (multiple vectors interrupt handlers) */
182int ixgbe_link_intr(void *);
183int ixgbe_queue_intr(void *);
184
185#if NKSTAT0 > 0
186static void ix_kstats(struct ix_softc *);
187static void ix_rxq_kstats(struct ix_softc *, struct rx_ring *);
188static void ix_txq_kstats(struct ix_softc *, struct tx_ring *);
189static void ix_kstats_tick(void *);
190#endif
191
192/*********************************************************************
193 * OpenBSD Device Interface Entry Points
194 *********************************************************************/
195
196struct cfdriver ix_cd = {
197 NULL((void *)0), "ix", DV_IFNET
198};
199
200struct cfattach ix_ca = {
201 sizeof(struct ix_softc), ixgbe_probe, ixgbe_attach, ixgbe_detach
202};
203
204int ixgbe_smart_speed = ixgbe_smart_speed_on;
205int ixgbe_enable_msix = 1;
206
207/*********************************************************************
208 * Device identification routine
209 *
210 * ixgbe_probe determines if the driver should be loaded on
211 * adapter based on PCI vendor/device id of the adapter.
212 *
213 * return 0 on success, positive on failure
214 *********************************************************************/
215
216int
217ixgbe_probe(struct device *parent, void *match, void *aux)
218{
219 INIT_DEBUGOUT("ixgbe_probe: begin")if (0) printf("ixgbe_probe: begin" "\n");
220
221 return (pci_matchbyid((struct pci_attach_args *)aux, ixgbe_devices,
222 nitems(ixgbe_devices)(sizeof((ixgbe_devices)) / sizeof((ixgbe_devices)[0]))));
223}
224
225/*********************************************************************
226 * Device initialization routine
227 *
228 * The attach entry point is called when the driver is being loaded.
229 * This routine identifies the type of hardware, allocates all resources
230 * and initializes the hardware.
231 *
232 * return 0 on success, positive on failure
233 *********************************************************************/
234
235void
236ixgbe_attach(struct device *parent, struct device *self, void *aux)
237{
238 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
239 struct ix_softc *sc = (struct ix_softc *)self;
240 int error = 0;
241 uint16_t csum;
242 uint32_t ctrl_ext;
243 struct ixgbe_hw *hw = &sc->hw;
244
245 INIT_DEBUGOUT("ixgbe_attach: begin")if (0) printf("ixgbe_attach: begin" "\n");
246
247 sc->osdep.os_sc = sc;
248 sc->osdep.os_pa = *pa;
249
250 rw_init(&sc->sfflock, "ixsff")_rw_init_flags(&sc->sfflock, "ixsff", 0, ((void *)0));
251
252#if NKSTAT0 > 0
253 ix_kstats(sc);
254#endif
255
256 /* Determine hardware revision */
257 ixgbe_identify_hardware(sc);
258
259 /* Indicate to RX setup to use Jumbo Clusters */
260 sc->num_tx_desc = DEFAULT_TXD256;
261 sc->num_rx_desc = DEFAULT_RXD256;
262
263 /* Do base PCI setup - map BAR0 */
264 if (ixgbe_allocate_pci_resources(sc))
265 goto err_out;
266
267 /* Allocate our TX/RX Queues */
268 if (ixgbe_allocate_queues(sc))
269 goto err_out;
270
271 /* Allocate multicast array memory. */
272 sc->mta = mallocarray(IXGBE_ETH_LENGTH_OF_ADDRESS6,
273 MAX_NUM_MULTICAST_ADDRESSES128, M_DEVBUF2, M_NOWAIT0x0002);
274 if (sc->mta == NULL((void *)0)) {
275 printf(": Can not allocate multicast setup array\n");
276 goto err_late;
277 }
278
279 /* Initialize the shared code */
280 error = ixgbe_init_shared_code(hw);
281 if (error) {
282 printf(": Unable to initialize the shared code\n");
283 goto err_late;
284 }
285
286 /* Make sure we have a good EEPROM before we read from it */
287 if (sc->hw.eeprom.ops.validate_checksum(&sc->hw, &csum) < 0) {
288 printf(": The EEPROM Checksum Is Not Valid\n");
289 goto err_late;
290 }
291
292 error = ixgbe_init_hw(hw);
293 if (error == IXGBE_ERR_EEPROM_VERSION-24) {
294 printf(": This device is a pre-production adapter/"
295 "LOM. Please be aware there may be issues associated "
296 "with your hardware.\nIf you are experiencing problems "
297 "please contact your Intel or hardware representative "
298 "who provided you with this hardware.\n");
299 } else if (error && (error != IXGBE_ERR_SFP_NOT_PRESENT-20 &&
300 error != IXGBE_ERR_SFP_NOT_SUPPORTED-19)) {
301 printf(": Hardware Initialization Failure\n");
302 goto err_late;
303 }
304
305 bcopy(sc->hw.mac.addr, sc->arpcom.ac_enaddr,
306 IXGBE_ETH_LENGTH_OF_ADDRESS6);
307
308 if (sc->sc_intrmap)
309 error = ixgbe_allocate_msix(sc);
310 else
311 error = ixgbe_allocate_legacy(sc);
312 if (error)
313 goto err_late;
314
315 /* Enable the optics for 82599 SFP+ fiber */
316 if (sc->hw.mac.ops.enable_tx_laser)
317 sc->hw.mac.ops.enable_tx_laser(&sc->hw);
318
319 /* Enable power to the phy */
320 if (hw->phy.ops.set_phy_power)
321 hw->phy.ops.set_phy_power(&sc->hw, TRUE1);
322
323 /* Setup OS specific network interface */
324 ixgbe_setup_interface(sc);
325
326 /* Get the PCI-E bus info and determine LAN ID */
327 hw->mac.ops.get_bus_info(hw);
328
329 /* Set an initial default flow control value */
330 sc->fc = ixgbe_fc_full;
331
332 /* let hardware know driver is loaded */
333 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00018)))
;
334 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD0x10000000;
335 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00018), (ctrl_ext)))
;
336
337 printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
338
339 INIT_DEBUGOUT("ixgbe_attach: end")if (0) printf("ixgbe_attach: end" "\n");
340 return;
341
342err_late:
343 ixgbe_free_transmit_structures(sc);
344 ixgbe_free_receive_structures(sc);
345err_out:
346 ixgbe_free_pci_resources(sc);
347 free(sc->mta, M_DEVBUF2, IXGBE_ETH_LENGTH_OF_ADDRESS6 *
348 MAX_NUM_MULTICAST_ADDRESSES128);
349}
350
351/*********************************************************************
352 * Device removal routine
353 *
354 * The detach entry point is called when the driver is being removed.
355 * This routine stops the adapter and deallocates all the resources
356 * that were allocated for driver operation.
357 *
358 * return 0 on success, positive on failure
359 *********************************************************************/
360
361int
362ixgbe_detach(struct device *self, int flags)
363{
364 struct ix_softc *sc = (struct ix_softc *)self;
365 struct ifnet *ifp = &sc->arpcom.ac_if;
366 uint32_t ctrl_ext;
367
368 INIT_DEBUGOUT("ixgbe_detach: begin")if (0) printf("ixgbe_detach: begin" "\n");
369
370 ixgbe_stop(sc);
371
372 /* let hardware know driver is unloading */
373 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00018)))
;
374 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD0x10000000;
375 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00018), (ctrl_ext)))
;
376
377 ether_ifdetach(ifp);
378 if_detach(ifp);
379
380 ixgbe_free_pci_resources(sc);
381
382 ixgbe_free_transmit_structures(sc);
383 ixgbe_free_receive_structures(sc);
384 free(sc->mta, M_DEVBUF2, IXGBE_ETH_LENGTH_OF_ADDRESS6 *
385 MAX_NUM_MULTICAST_ADDRESSES128);
386
387 /* XXX kstat */
388
389 return (0);
390}
391
392/*********************************************************************
393 * Transmit entry point
394 *
395 * ixgbe_start is called by the stack to initiate a transmit.
396 * The driver will remain in this routine as long as there are
397 * packets to transmit and transmit resources are available.
398 * In case resources are not available stack is notified and
399 * the packet is requeued.
400 **********************************************************************/
401
402void
403ixgbe_start(struct ifqueue *ifq)
404{
405 struct ifnet *ifp = ifq->ifq_if;
406 struct ix_softc *sc = ifp->if_softc;
407 struct tx_ring *txr = ifq->ifq_softc_ifq_ptr._ifq_softc;
408 struct mbuf *m_head;
409 unsigned int head, free, used;
410 int post = 0;
411
412 if (!sc->link_up)
1
Assuming field 'link_up' is true
2
Taking false branch
413 return;
414
415 head = txr->next_avail_desc;
416 free = txr->next_to_clean;
417 if (free <= head)
3
Assuming 'free' is > 'head'
4
Taking false branch
418 free += sc->num_tx_desc;
419 free -= head;
420
421 membar_consumer()do { __asm volatile("" ::: "memory"); } while (0);
5
Loop condition is false. Exiting loop
422
423 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x08))
424 0, txr->txdma.dma_map->dm_mapsize,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x08))
425 BUS_DMASYNC_POSTWRITE)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x08))
;
426
427 for (;;) {
6
Loop condition is true. Entering loop body
428 /* Check that we have the minimal number of TX descriptors. */
429 if (free <= IXGBE_TX_OP_THRESHOLD(sc->num_segs + 2)) {
7
Assuming the condition is false
8
Taking false branch
430 ifq_set_oactive(ifq);
431 break;
432 }
433
434 m_head = ifq_dequeue(ifq);
435 if (m_head == NULL((void *)0))
9
Assuming 'm_head' is not equal to NULL
10
Taking false branch
436 break;
437
438 used = ixgbe_encap(txr, m_head);
11
Calling 'ixgbe_encap'
439 if (used == 0) {
440 m_freem(m_head);
441 continue;
442 }
443
444 free -= used;
445
446#if NBPFILTER1 > 0
447 if (ifp->if_bpf)
448 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT(1 << 1));
449#endif
450
451 /* Set timeout in case hardware has problems transmitting */
452 txr->watchdog_timer = IXGBE_TX_TIMEOUT5;
453 ifp->if_timer = IXGBE_TX_TIMEOUT5;
454
455 post = 1;
456 }
457
458 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x04))
459 0, txr->txdma.dma_map->dm_mapsize,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x04))
460 BUS_DMASYNC_PREWRITE)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x04))
;
461
462 /*
463 * Advance the Transmit Descriptor Tail (Tdt), this tells the
464 * hardware that this frame is available to transmit.
465 */
466 if (post)
467 IXGBE_WRITE_REG(&sc->hw, IXGBE_TDT(txr->me),((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x06018 + ((txr->me) * 0x40))), (txr->
next_avail_desc)))
468 txr->next_avail_desc)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x06018 + ((txr->me) * 0x40))), (txr->
next_avail_desc)))
;
469}
470
471/*********************************************************************
472 * Ioctl entry point
473 *
474 * ixgbe_ioctl is called when the user wants to configure the
475 * interface.
476 *
477 * return 0 on success, positive on failure
478 **********************************************************************/
479
480int
481ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
482{
483 struct ix_softc *sc = ifp->if_softc;
484 struct ifreq *ifr = (struct ifreq *) data;
485 int s, error = 0;
486
487 s = splnet()splraise(0x7);
488
489 switch (command) {
490 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
491 IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)")if (0) printf("ioctl: SIOCxIFADDR (Get/Set Interface Addr)" "\n"
)
;
492 ifp->if_flags |= IFF_UP0x1;
493 if (!(ifp->if_flags & IFF_RUNNING0x40))
494 ixgbe_init(sc);
495 break;
496
497 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
498 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)")if (0) printf("ioctl: SIOCSIFFLAGS (Set Interface Flags)" "\n"
)
;
499 if (ifp->if_flags & IFF_UP0x1) {
500 if (ifp->if_flags & IFF_RUNNING0x40)
501 error = ENETRESET52;
502 else
503 ixgbe_init(sc);
504 } else {
505 if (ifp->if_flags & IFF_RUNNING0x40)
506 ixgbe_stop(sc);
507 }
508 break;
509
510 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
511 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
512 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)")if (0) printf("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"
"\n")
;
513 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
514 break;
515
516 case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((170)))
:
517 error = ixgbe_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data);
518 break;
519
520 case SIOCGIFSFFPAGE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct if_sffpage) & 0x1fff) << 16) | ((('i')) <<
8) | ((57)))
:
521 error = rw_enter(&sc->sfflock, RW_WRITE0x0001UL|RW_INTR0x0010UL);
522 if (error != 0)
523 break;
524
525 error = ixgbe_get_sffpage(sc, (struct if_sffpage *)data);
526 rw_exit(&sc->sfflock);
527 break;
528
529 default:
530 error = ether_ioctl(ifp, &sc->arpcom, command, data);
531 }
532
533 if (error == ENETRESET52) {
534 if (ifp->if_flags & IFF_RUNNING0x40) {
535 ixgbe_disable_intr(sc);
536 ixgbe_iff(sc);
537 ixgbe_enable_intr(sc);
538 ixgbe_enable_queues(sc);
539 }
540 error = 0;
541 }
542
543 splx(s)spllower(s);
544 return (error);
545}
546
547int
548ixgbe_get_sffpage(struct ix_softc *sc, struct if_sffpage *sff)
549{
550 struct ixgbe_hw *hw = &sc->hw;
551 uint32_t swfw_mask = hw->phy.phy_semaphore_mask;
552 uint8_t page;
553 size_t i;
554 int error = EIO5;
555
556 if (hw->phy.type == ixgbe_phy_fw)
557 return (ENODEV19);
558
559 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
560 return (EBUSY16); /* XXX */
561
562 if (sff->sff_addr == IFSFF_ADDR_EEPROM0xa0) {
563 if (hw->phy.ops.read_i2c_byte_unlocked(hw, 127,
564 IFSFF_ADDR_EEPROM0xa0, &page))
565 goto error;
566 if (page != sff->sff_page &&
567 hw->phy.ops.write_i2c_byte_unlocked(hw, 127,
568 IFSFF_ADDR_EEPROM0xa0, sff->sff_page))
569 goto error;
570 }
571
572 for (i = 0; i < sizeof(sff->sff_data); i++) {
573 if (hw->phy.ops.read_i2c_byte_unlocked(hw, i,
574 sff->sff_addr, &sff->sff_data[i]))
575 goto error;
576 }
577
578 if (sff->sff_addr == IFSFF_ADDR_EEPROM0xa0) {
579 if (page != sff->sff_page &&
580 hw->phy.ops.write_i2c_byte_unlocked(hw, 127,
581 IFSFF_ADDR_EEPROM0xa0, page))
582 goto error;
583 }
584
585 error = 0;
586error:
587 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
588 return (error);
589}
590
591int
592ixgbe_rxrinfo(struct ix_softc *sc, struct if_rxrinfo *ifri)
593{
594 struct if_rxring_info *ifr, ifr1;
595 struct rx_ring *rxr;
596 int error, i;
597 u_int n = 0;
598
599 if (sc->num_queues > 1) {
600 if ((ifr = mallocarray(sc->num_queues, sizeof(*ifr), M_DEVBUF2,
601 M_WAITOK0x0001 | M_ZERO0x0008)) == NULL((void *)0))
602 return (ENOMEM12);
603 } else
604 ifr = &ifr1;
605
606 for (i = 0; i < sc->num_queues; i++) {
607 rxr = &sc->rx_rings[i];
608 ifr[n].ifr_size = MCLBYTES(1 << 11);
609 snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "%d", i);
610 ifr[n].ifr_info = rxr->rx_ring;
611 n++;
612 }
613
614 error = if_rxr_info_ioctl(ifri, sc->num_queues, ifr);
615
616 if (sc->num_queues > 1)
617 free(ifr, M_DEVBUF2, sc->num_queues * sizeof(*ifr));
618 return (error);
619}
620
621/*********************************************************************
622 * Watchdog entry point
623 *
624 **********************************************************************/
625
626void
627ixgbe_watchdog(struct ifnet * ifp)
628{
629 struct ix_softc *sc = (struct ix_softc *)ifp->if_softc;
630 struct tx_ring *txr = sc->tx_rings;
631 struct ixgbe_hw *hw = &sc->hw;
632 int tx_hang = FALSE0;
633 int i;
634
635 /*
636 * The timer is set to 5 every time ixgbe_start() queues a packet.
637 * Anytime all descriptors are clean the timer is set to 0.
638 */
639 for (i = 0; i < sc->num_queues; i++, txr++) {
640 if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
641 continue;
642 else {
643 tx_hang = TRUE1;
644 break;
645 }
646 }
647 if (tx_hang == FALSE0)
648 return;
649
650 /*
651 * If we are in this routine because of pause frames, then don't
652 * reset the hardware.
653 */
654 if (!(IXGBE_READ_REG(hw, IXGBE_TFCS)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x0CE00
)))
& IXGBE_TFCS_TXON0x00000001)) {
655 for (i = 0; i < sc->num_queues; i++, txr++)
656 txr->watchdog_timer = IXGBE_TX_TIMEOUT5;
657 ifp->if_timer = IXGBE_TX_TIMEOUT5;
658 return;
659 }
660
661
662 printf("%s: Watchdog timeout -- resetting\n", ifp->if_xname);
663 for (i = 0; i < sc->num_queues; i++, txr++) {
664 printf("%s: Queue(%d) tdh = %d, hw tdt = %d\n", ifp->if_xname, i,
665 IXGBE_READ_REG(hw, IXGBE_TDH(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06010
+ ((i) * 0x40)))))
,
666 IXGBE_READ_REG(hw, IXGBE_TDT(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06018
+ ((i) * 0x40)))))
);
667 printf("%s: TX(%d) Next TX to Clean = %d\n", ifp->if_xname,
668 i, txr->next_to_clean);
669 }
670 ifp->if_flags &= ~IFF_RUNNING0x40;
671
672 ixgbe_init(sc);
673}
674
675/*********************************************************************
676 * Init entry point
677 *
678 * This routine is used in two ways. It is used by the stack as
679 * init entry point in network interface structure. It is also used
680 * by the driver as a hw/sw initialization routine to get to a
681 * consistent state.
682 *
683 * return 0 on success, positive on failure
684 **********************************************************************/
685#define IXGBE_MHADD_MFS_SHIFT16 16
686
687void
688ixgbe_init(void *arg)
689{
690 struct ix_softc *sc = (struct ix_softc *)arg;
691 struct ifnet *ifp = &sc->arpcom.ac_if;
692 struct rx_ring *rxr = sc->rx_rings;
693 uint32_t k, txdctl, rxdctl, rxctrl, mhadd, itr;
694 int i, s, err;
695
696 INIT_DEBUGOUT("ixgbe_init: begin")if (0) printf("ixgbe_init: begin" "\n");
697
698 s = splnet()splraise(0x7);
699
700 ixgbe_stop(sc);
701
702 /* reprogram the RAR[0] in case user changed it. */
703 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV0x80000000);
704
705 /* Get the latest mac address, User can use a LAA */
706 bcopy(sc->arpcom.ac_enaddr, sc->hw.mac.addr,
707 IXGBE_ETH_LENGTH_OF_ADDRESS6);
708 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, 1);
709 sc->hw.addr_ctrl.rar_used_count = 1;
710
711 /* Prepare transmit descriptors and buffers */
712 if (ixgbe_setup_transmit_structures(sc)) {
713 printf("%s: Could not setup transmit structures\n",
714 ifp->if_xname);
715 ixgbe_stop(sc);
716 splx(s)spllower(s);
717 return;
718 }
719
720 ixgbe_init_hw(&sc->hw);
721 ixgbe_initialize_transmit_units(sc);
722
723 /* Use 2k clusters, even for jumbo frames */
724 sc->rx_mbuf_sz = MCLBYTES(1 << 11) + ETHER_ALIGN2;
725
726 /* Prepare receive descriptors and buffers */
727 if (ixgbe_setup_receive_structures(sc)) {
728 printf("%s: Could not setup receive structures\n",
729 ifp->if_xname);
730 ixgbe_stop(sc);
731 splx(s)spllower(s);
732 return;
733 }
734
735 /* Configure RX settings */
736 ixgbe_initialize_receive_units(sc);
737
738 /* Enable SDP & MSIX interrupts based on adapter */
739 ixgbe_config_gpie(sc);
740
741 /* Program promiscuous mode and multicast filters. */
742 ixgbe_iff(sc);
743
744 /* Set MRU size */
745 mhadd = IXGBE_READ_REG(&sc->hw, IXGBE_MHADD)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x04268)))
;
746 mhadd &= ~IXGBE_MHADD_MFS_MASK0xFFFF0000;
747 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT16;
748 IXGBE_WRITE_REG(&sc->hw, IXGBE_MHADD, mhadd)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x04268), (mhadd)))
;
749
750 /* Now enable all the queues */
751 for (i = 0; i < sc->num_queues; i++) {
752 txdctl = IXGBE_READ_REG(&sc->hw, IXGBE_TXDCTL(i))((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x06028 + ((i) * 0x40)))))
;
753 txdctl |= IXGBE_TXDCTL_ENABLE0x02000000;
754 /* Set WTHRESH to 8, burst writeback */
755 txdctl |= (8 << 16);
756 /*
757 * When the internal queue falls below PTHRESH (16),
758 * start prefetching as long as there are at least
759 * HTHRESH (1) buffers ready.
760 */
761 txdctl |= (16 << 0) | (1 << 8);
762 IXGBE_WRITE_REG(&sc->hw, IXGBE_TXDCTL(i), txdctl)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x06028 + ((i) * 0x40))), (txdctl)))
;
763 }
764
765 for (i = 0; i < sc->num_queues; i++) {
766 rxdctl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i))((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((((i) < 64) ? (0x01028 + ((i) * 0x40)) : (
0x0D028 + (((i) - 64) * 0x40))))))
;
767 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
768 /*
769 * PTHRESH = 21
770 * HTHRESH = 4
771 * WTHRESH = 8
772 */
773 rxdctl &= ~0x3FFFFF;
774 rxdctl |= 0x080420;
775 }
776 rxdctl |= IXGBE_RXDCTL_ENABLE0x02000000;
777 IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), rxdctl)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((((i) < 64) ? (0x01028 + ((i) * 0x40)) : (
0x0D028 + (((i) - 64) * 0x40)))), (rxdctl)))
;
778 for (k = 0; k < 10; k++) {
779 if (IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i))((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((((i) < 64) ? (0x01028 + ((i) * 0x40)) : (
0x0D028 + (((i) - 64) * 0x40))))))
&
780 IXGBE_RXDCTL_ENABLE0x02000000)
781 break;
782 else
783 msec_delay(1)(*delay_func)(1000 * (1));
784 }
785 IXGBE_WRITE_FLUSH(&sc->hw)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00008)))
;
786 IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i), rxr->last_desc_filled)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((((i) < 64) ? (0x01018 + ((i) * 0x40)) : (
0x0D018 + (((i) - 64) * 0x40)))), (rxr->last_desc_filled))
)
;
787 }
788
789 /* Set up VLAN support and filter */
790 ixgbe_setup_vlan_hw_support(sc);
791
792 /* Enable Receive engine */
793 rxctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXCTRL)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x03000)))
;
794 if (sc->hw.mac.type == ixgbe_mac_82598EB)
795 rxctrl |= IXGBE_RXCTRL_DMBYPS0x00000002;
796 rxctrl |= IXGBE_RXCTRL_RXEN0x00000001;
797 sc->hw.mac.ops.enable_rx_dma(&sc->hw, rxctrl);
798
799 /* Set up MSI/X routing */
800 if (sc->sc_intrmap) {
801 ixgbe_configure_ivars(sc);
802 /* Set up auto-mask */
803 if (sc->hw.mac.type == ixgbe_mac_82598EB)
804 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00890), (0x0000FFFF)))
;
805 else {
806 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x00AD0 + (0) * 4)), (0xFFFFFFFF)))
;
807 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x00AD0 + (1) * 4)), (0xFFFFFFFF)))
;
808 }
809 } else { /* Simple settings for Legacy/MSI */
810 ixgbe_set_ivar(sc, 0, 0, 0);
811 ixgbe_set_ivar(sc, 0, 0, 1);
812 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00890), (0x0000FFFF)))
;
813 }
814
815 /* Check on any SFP devices that need to be kick-started */
816 if (sc->hw.phy.type == ixgbe_phy_none) {
817 err = sc->hw.phy.ops.identify(&sc->hw);
818 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED-19) {
819 printf("Unsupported SFP+ module type was detected.\n");
820 splx(s)spllower(s);
821 return;
822 }
823 }
824
825 /* Setup interrupt moderation */
826 itr = (4000000 / IXGBE_INTS_PER_SEC8000) & 0xff8;
827 if (sc->hw.mac.type != ixgbe_mac_82598EB)
828 itr |= IXGBE_EITR_LLI_MOD0x00008000 | IXGBE_EITR_CNT_WDIS0x80000000;
829 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(0), itr)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((((0) <= 23) ? (0x00820 + ((0) * 4)) : (0x012300
+ (((0) - 24) * 4)))), (itr)))
;
830
831 if (sc->sc_intrmap) {
832 /* Set moderation on the Link interrupt */
833 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(sc->linkvec),((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((((sc->linkvec) <= 23) ? (0x00820 + ((
sc->linkvec) * 4)) : (0x012300 + (((sc->linkvec) - 24) *
4)))), (1000)))
834 IXGBE_LINK_ITR)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((((sc->linkvec) <= 23) ? (0x00820 + ((
sc->linkvec) * 4)) : (0x012300 + (((sc->linkvec) - 24) *
4)))), (1000)))
;
835 }
836
837 /* Enable power to the phy */
838 if (sc->hw.phy.ops.set_phy_power)
839 sc->hw.phy.ops.set_phy_power(&sc->hw, TRUE1);
840
841 /* Config/Enable Link */
842 ixgbe_config_link(sc);
843
844 /* Hardware Packet Buffer & Flow Control setup */
845 ixgbe_config_delay_values(sc);
846
847 /* Initialize the FC settings */
848 sc->hw.mac.ops.start_hw(&sc->hw);
849
850 /* And now turn on interrupts */
851 ixgbe_enable_intr(sc);
852 ixgbe_enable_queues(sc);
853
854 /* Now inform the stack we're ready */
855 ifp->if_flags |= IFF_RUNNING0x40;
856 for (i = 0; i < sc->num_queues; i++)
857 ifq_clr_oactive(ifp->if_ifqs[i]);
858
859#if NKSTAT0 > 0
860 ix_kstats_tick(sc);
861#endif
862
863 splx(s)spllower(s);
864}
865
866void
867ixgbe_config_gpie(struct ix_softc *sc)
868{
869 struct ixgbe_hw *hw = &sc->hw;
870 uint32_t gpie;
871
872 gpie = IXGBE_READ_REG(&sc->hw, IXGBE_GPIE)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00898)))
;
873
874 /* Fan Failure Interrupt */
875 if (hw->device_id == IXGBE_DEV_ID_82598AT0x10C8)
876 gpie |= IXGBE_SDP1_GPIEN0x00000002;
877
878 if (sc->hw.mac.type == ixgbe_mac_82599EB) {
879 /* Add for Module detection */
880 gpie |= IXGBE_SDP2_GPIEN0x00000004;
881
882 /* Media ready */
883 if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP0x1558)
884 gpie |= IXGBE_SDP1_GPIEN0x00000002;
885
886 /*
887 * Set LL interval to max to reduce the number of low latency
888 * interrupts hitting the card when the ring is getting full.
889 */
890 gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT7;
891 }
892
893 if (sc->hw.mac.type == ixgbe_mac_X540 ||
894 sc->hw.mac.type == ixgbe_mac_X550EM_x ||
895 sc->hw.mac.type == ixgbe_mac_X550EM_a) {
896 /*
897 * Thermal Failure Detection (X540)
898 * Link Detection (X552 SFP+, X552/X557-AT)
899 */
900 gpie |= IXGBE_SDP0_GPIEN_X5400x00000002;
901
902 /*
903 * Set LL interval to max to reduce the number of low latency
904 * interrupts hitting the card when the ring is getting full.
905 */
906 gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT7;
907 }
908
909 if (sc->sc_intrmap) {
910 /* Enable Enhanced MSIX mode */
911 gpie |= IXGBE_GPIE_MSIX_MODE0x00000010;
912 gpie |= IXGBE_GPIE_EIAME0x40000000 | IXGBE_GPIE_PBA_SUPPORT0x80000000 |
913 IXGBE_GPIE_OCD0x00000020;
914 }
915
916 IXGBE_WRITE_REG(&sc->hw, IXGBE_GPIE, gpie)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00898), (gpie)))
;
917}
918
919/*
920 * Requires sc->max_frame_size to be set.
921 */
922void
923ixgbe_config_delay_values(struct ix_softc *sc)
924{
925 struct ixgbe_hw *hw = &sc->hw;
926 uint32_t rxpb, frame, size, tmp;
927
928 frame = sc->max_frame_size;
929
930 /* Calculate High Water */
931 switch (hw->mac.type) {
932 case ixgbe_mac_X540:
933 case ixgbe_mac_X550:
934 case ixgbe_mac_X550EM_x:
935 case ixgbe_mac_X550EM_a:
936 tmp = IXGBE_DV_X540(frame, frame)((36 * ((frame * 8) + 672 + (2 * 5556) + (2 * (8192 + (2 * 2048
) + 25600)) + 6144) / 25 + 1) + 2 * (frame * 8))
;
937 break;
938 default:
939 tmp = IXGBE_DV(frame, frame)((36 * ((frame * 8) + 672 + (2 * 5556) + (2 * (4096 + (2 * 1024
) + 12800)) + 6144) / 25 + 1) + 2 * (frame * 8))
;
940 break;
941 }
942 size = IXGBE_BT2KB(tmp)((tmp + (8 * 1024 - 1)) / (8 * 1024));
943 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x03C00
+ ((0) * 4)))))
>> 10;
944 hw->fc.high_water[0] = rxpb - size;
945
946 /* Now calculate Low Water */
947 switch (hw->mac.type) {
948 case ixgbe_mac_X540:
949 case ixgbe_mac_X550:
950 case ixgbe_mac_X550EM_x:
951 case ixgbe_mac_X550EM_a:
952 tmp = IXGBE_LOW_DV_X540(frame)(2 * (frame * 8) + (36 * 10000 / 25) + 1);
953 break;
954 default:
955 tmp = IXGBE_LOW_DV(frame)(2 * (2 * (frame * 8) + (36 * 10000 / 25) + 1));
956 break;
957 }
958 hw->fc.low_water[0] = IXGBE_BT2KB(tmp)((tmp + (8 * 1024 - 1)) / (8 * 1024));
959
960 hw->fc.requested_mode = sc->fc;
961 hw->fc.pause_time = IXGBE_FC_PAUSE0xFFFF;
962 hw->fc.send_xon = TRUE1;
963}
964
965/*
966 * MSIX Interrupt Handlers
967 */
968void
969ixgbe_enable_queue(struct ix_softc *sc, uint32_t vector)
970{
971 uint64_t queue = 1ULL << vector;
972 uint32_t mask;
973
974 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
975 mask = (IXGBE_EIMS_RTX_QUEUE0x0000FFFF & queue);
976 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS, mask)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00880), (mask)))
;
977 } else {
978 mask = (queue & 0xFFFFFFFF);
979 if (mask)
980 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(0), mask)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x00AA0 + (0) * 4)), (mask)))
;
981 mask = (queue >> 32);
982 if (mask)
983 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(1), mask)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x00AA0 + (1) * 4)), (mask)))
;
984 }
985}
986
987void
988ixgbe_enable_queues(struct ix_softc *sc)
989{
990 struct ix_queue *que;
991 int i;
992
993 for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++)
994 ixgbe_enable_queue(sc, que->msix);
995}
996
997void
998ixgbe_disable_queue(struct ix_softc *sc, uint32_t vector)
999{
1000 uint64_t queue = 1ULL << vector;
1001 uint32_t mask;
1002
1003 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
1004 mask = (IXGBE_EIMS_RTX_QUEUE0x0000FFFF & queue);
1005 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, mask)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00888), (mask)))
;
1006 } else {
1007 mask = (queue & 0xFFFFFFFF);
1008 if (mask)
1009 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), mask)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x00AB0 + (0) * 4)), (mask)))
;
1010 mask = (queue >> 32);
1011 if (mask)
1012 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), mask)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x00AB0 + (1) * 4)), (mask)))
;
1013 }
1014}
1015
1016/*
1017 * MSIX Interrupt Handlers
1018 */
1019int
1020ixgbe_link_intr(void *vsc)
1021{
1022 struct ix_softc *sc = (struct ix_softc *)vsc;
1023
1024 return ixgbe_intr(sc);
1025}
1026
1027int
1028ixgbe_queue_intr(void *vque)
1029{
1030 struct ix_queue *que = vque;
1031 struct ix_softc *sc = que->sc;
1032 struct ifnet *ifp = &sc->arpcom.ac_if;
1033 struct rx_ring *rxr = que->rxr;
1034 struct tx_ring *txr = que->txr;
1035
1036 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
1037 ixgbe_rxeof(rxr);
1038 ixgbe_txeof(txr);
1039 ixgbe_rxrefill(rxr);
1040 }
1041
1042 ixgbe_enable_queue(sc, que->msix);
1043
1044 return (1);
1045}
1046
1047/*********************************************************************
1048 *
1049 * Legacy Interrupt Service routine
1050 *
1051 **********************************************************************/
1052
1053int
1054ixgbe_legacy_intr(void *arg)
1055{
1056 struct ix_softc *sc = (struct ix_softc *)arg;
1057 struct ifnet *ifp = &sc->arpcom.ac_if;
1058 struct rx_ring *rxr = sc->rx_rings;
1059 struct tx_ring *txr = sc->tx_rings;
1060 int rv;
1061
1062 rv = ixgbe_intr(sc);
1063 if (rv == 0) {
1064 return (0);
1065 }
1066
1067 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
1068 ixgbe_rxeof(rxr);
1069 ixgbe_txeof(txr);
1070 ixgbe_rxrefill(rxr);
1071 }
1072
1073 ixgbe_enable_queues(sc);
1074 return (rv);
1075}
1076
1077int
1078ixgbe_intr(struct ix_softc *sc)
1079{
1080 struct ifnet *ifp = &sc->arpcom.ac_if;
1081 struct ixgbe_hw *hw = &sc->hw;
1082 uint32_t reg_eicr, mod_mask, msf_mask;
1083
1084 if (sc->sc_intrmap) {
1085 /* Pause other interrupts */
1086 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00888
), (0x80000000)))
;
1087 /* First get the cause */
1088 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00808
)))
;
1089 /* Be sure the queue bits are not cleared */
1090 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE0x0000FFFF;
1091 /* Clear interrupt with write */
1092 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00800
), (reg_eicr)))
;
1093 } else {
1094 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00800
)))
;
1095 if (reg_eicr == 0) {
1096 ixgbe_enable_intr(sc);
1097 ixgbe_enable_queues(sc);
1098 return (0);
1099 }
1100 }
1101
1102 /* Link status change */
1103 if (reg_eicr & IXGBE_EICR_LSC0x00100000) {
1104 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00888
), (0x00100000)))
;
1105 KERNEL_LOCK()_kernel_lock();
1106 ixgbe_update_link_status(sc);
1107 KERNEL_UNLOCK()_kernel_unlock();
1108 }
1109
1110 if (hw->mac.type != ixgbe_mac_82598EB) {
1111 if (reg_eicr & IXGBE_EICR_ECC0x10000000) {
1112 printf("%s: CRITICAL: ECC ERROR!! "
1113 "Please Reboot!!\n", sc->dev.dv_xname);
1114 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00800
), (0x10000000)))
;
1115 }
1116 /* Check for over temp condition */
1117 if (reg_eicr & IXGBE_EICR_TS0x00800000) {
1118 printf("%s: CRITICAL: OVER TEMP!! "
1119 "PHY IS SHUT DOWN!!\n", ifp->if_xname);
1120 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00800
), (0x00800000)))
;
1121 }
1122 }
1123
1124 /* Pluggable optics-related interrupt */
1125 if (ixgbe_is_sfp(hw)) {
1126 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP0x15AC) {
1127 mod_mask = IXGBE_EICR_GPI_SDP0_X5400x02000000;
1128 msf_mask = IXGBE_EICR_GPI_SDP1_X5400x04000000;
1129 } else if (hw->mac.type == ixgbe_mac_X540 ||
1130 hw->mac.type == ixgbe_mac_X550 ||
1131 hw->mac.type == ixgbe_mac_X550EM_x) {
1132 mod_mask = IXGBE_EICR_GPI_SDP2_X5400x08000000;
1133 msf_mask = IXGBE_EICR_GPI_SDP1_X5400x04000000;
1134 } else {
1135 mod_mask = IXGBE_EICR_GPI_SDP20x04000000;
1136 msf_mask = IXGBE_EICR_GPI_SDP10x02000000;
1137 }
1138 if (reg_eicr & mod_mask) {
1139 /* Clear the interrupt */
1140 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00800
), (mod_mask)))
;
1141 KERNEL_LOCK()_kernel_lock();
1142 ixgbe_handle_mod(sc);
1143 KERNEL_UNLOCK()_kernel_unlock();
1144 } else if ((hw->phy.media_type != ixgbe_media_type_copper) &&
1145 (reg_eicr & msf_mask)) {
1146 /* Clear the interrupt */
1147 IXGBE_WRITE_REG(hw, IXGBE_EICR, msf_mask)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00800
), (msf_mask)))
;
1148 KERNEL_LOCK()_kernel_lock();
1149 ixgbe_handle_msf(sc);
1150 KERNEL_UNLOCK()_kernel_unlock();
1151 }
1152 }
1153
1154 /* Check for fan failure */
1155 if ((hw->device_id == IXGBE_DEV_ID_82598AT0x10C8) &&
1156 (reg_eicr & IXGBE_EICR_GPI_SDP10x02000000)) {
1157 printf("%s: CRITICAL: FAN FAILURE!! "
1158 "REPLACE IMMEDIATELY!!\n", ifp->if_xname);
1159 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00800
), (0x02000000)))
;
1160 }
1161
1162 /* External PHY interrupt */
1163 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T0x15AD &&
1164 (reg_eicr & IXGBE_EICR_GPI_SDP0_X5400x02000000)) {
1165 /* Clear the interrupt */
1166 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00800
), (0x02000000)))
;
1167 KERNEL_LOCK()_kernel_lock();
1168 ixgbe_handle_phy(sc);
1169 KERNEL_UNLOCK()_kernel_unlock();
1170 }
1171
1172 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00880
), (0x80000000 | 0x00100000)))
;
1173
1174 return (1);
1175}
1176
1177/*********************************************************************
1178 *
1179 * Media Ioctl callback
1180 *
1181 * This routine is called whenever the user queries the status of
1182 * the interface using ifconfig.
1183 *
1184 **********************************************************************/
1185void
1186ixgbe_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
1187{
1188 struct ix_softc *sc = ifp->if_softc;
1189 uint64_t layer;
1190
1191 ifmr->ifm_active = IFM_ETHER0x0000000000000100ULL;
1192 ifmr->ifm_status = IFM_AVALID0x0000000000000001ULL;
1193
1194 INIT_DEBUGOUT("ixgbe_media_status: begin")if (0) printf("ixgbe_media_status: begin" "\n");
1195 ixgbe_update_link_status(sc);
1196
1197 if (!LINK_STATE_IS_UP(ifp->if_link_state)((ifp->if_data.ifi_link_state) >= 4 || (ifp->if_data
.ifi_link_state) == 0)
)
1198 return;
1199
1200 ifmr->ifm_status |= IFM_ACTIVE0x0000000000000002ULL;
1201 layer = sc->phy_layer;
1202
1203 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T0x00001 ||
1204 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T0x00002 ||
1205 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX0x00004 ||
1206 layer & IXGBE_PHYSICAL_LAYER_10BASE_T0x08000) {
1207 switch (sc->link_speed) {
1208 case IXGBE_LINK_SPEED_10GB_FULL0x0080:
1209 ifmr->ifm_active |= IFM_10G_T22 | IFM_FDX0x0000010000000000ULL;
1210 break;
1211 case IXGBE_LINK_SPEED_1GB_FULL0x0020:
1212 ifmr->ifm_active |= IFM_1000_T16 | IFM_FDX0x0000010000000000ULL;
1213 break;
1214 case IXGBE_LINK_SPEED_100_FULL0x0008:
1215 ifmr->ifm_active |= IFM_100_TX6 | IFM_FDX0x0000010000000000ULL;
1216 break;
1217 case IXGBE_LINK_SPEED_10_FULL0x0002:
1218 ifmr->ifm_active |= IFM_10_T3 | IFM_FDX0x0000010000000000ULL;
1219 break;
1220 }
1221 }
1222 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU0x00008 ||
1223 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA0x02000) {
1224 switch (sc->link_speed) {
1225 case IXGBE_LINK_SPEED_10GB_FULL0x0080:
1226 ifmr->ifm_active |= IFM_10G_SFP_CU23 | IFM_FDX0x0000010000000000ULL;
1227 break;
1228 }
1229 }
1230 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR0x00010) {
1231 switch (sc->link_speed) {
1232 case IXGBE_LINK_SPEED_10GB_FULL0x0080:
1233 ifmr->ifm_active |= IFM_10G_LR18 | IFM_FDX0x0000010000000000ULL;
1234 break;
1235 case IXGBE_LINK_SPEED_1GB_FULL0x0020:
1236 ifmr->ifm_active |= IFM_1000_LX14 | IFM_FDX0x0000010000000000ULL;
1237 break;
1238 }
1239 }
1240 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR0x00040 ||
1241 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX0x04000) {
1242 switch (sc->link_speed) {
1243 case IXGBE_LINK_SPEED_10GB_FULL0x0080:
1244 ifmr->ifm_active |= IFM_10G_SR19 | IFM_FDX0x0000010000000000ULL;
1245 break;
1246 case IXGBE_LINK_SPEED_1GB_FULL0x0020:
1247 ifmr->ifm_active |= IFM_1000_SX11 | IFM_FDX0x0000010000000000ULL;
1248 break;
1249 }
1250 }
1251 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX40x00100) {
1252 switch (sc->link_speed) {
1253 case IXGBE_LINK_SPEED_10GB_FULL0x0080:
1254 ifmr->ifm_active |= IFM_10G_CX420 | IFM_FDX0x0000010000000000ULL;
1255 break;
1256 }
1257 }
1258 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR0x00800) {
1259 switch (sc->link_speed) {
1260 case IXGBE_LINK_SPEED_10GB_FULL0x0080:
1261 ifmr->ifm_active |= IFM_10G_KR30 | IFM_FDX0x0000010000000000ULL;
1262 break;
1263 case IXGBE_LINK_SPEED_2_5GB_FULL0x0400:
1264 ifmr->ifm_active |= IFM_2500_KX33 | IFM_FDX0x0000010000000000ULL;
1265 break;
1266 case IXGBE_LINK_SPEED_1GB_FULL0x0020:
1267 ifmr->ifm_active |= IFM_1000_KX28 | IFM_FDX0x0000010000000000ULL;
1268 break;
1269 }
1270 } else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX40x00080 ||
1271 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX0x10000 ||
1272 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX0x00200) {
1273 switch (sc->link_speed) {
1274 case IXGBE_LINK_SPEED_10GB_FULL0x0080:
1275 ifmr->ifm_active |= IFM_10G_KX429 | IFM_FDX0x0000010000000000ULL;
1276 break;
1277 case IXGBE_LINK_SPEED_2_5GB_FULL0x0400:
1278 ifmr->ifm_active |= IFM_2500_KX33 | IFM_FDX0x0000010000000000ULL;
1279 break;
1280 case IXGBE_LINK_SPEED_1GB_FULL0x0020:
1281 ifmr->ifm_active |= IFM_1000_KX28 | IFM_FDX0x0000010000000000ULL;
1282 break;
1283 }
1284 }
1285
1286 switch (sc->hw.fc.current_mode) {
1287 case ixgbe_fc_tx_pause:
1288 ifmr->ifm_active |= IFM_FLOW0x0000040000000000ULL | IFM_ETH_TXPAUSE0x0000000000040000ULL;
1289 break;
1290 case ixgbe_fc_rx_pause:
1291 ifmr->ifm_active |= IFM_FLOW0x0000040000000000ULL | IFM_ETH_RXPAUSE0x0000000000020000ULL;
1292 break;
1293 case ixgbe_fc_full:
1294 ifmr->ifm_active |= IFM_FLOW0x0000040000000000ULL | IFM_ETH_RXPAUSE0x0000000000020000ULL |
1295 IFM_ETH_TXPAUSE0x0000000000040000ULL;
1296 break;
1297 default:
1298 ifmr->ifm_active &= ~(IFM_FLOW0x0000040000000000ULL | IFM_ETH_RXPAUSE0x0000000000020000ULL |
1299 IFM_ETH_TXPAUSE0x0000000000040000ULL);
1300 break;
1301 }
1302}
1303
1304/*********************************************************************
1305 *
1306 * Media Ioctl callback
1307 *
1308 * This routine is called when the user changes speed/duplex using
1309 * media/mediopt option with ifconfig.
1310 *
1311 **********************************************************************/
1312int
1313ixgbe_media_change(struct ifnet *ifp)
1314{
1315 struct ix_softc *sc = ifp->if_softc;
1316 struct ixgbe_hw *hw = &sc->hw;
1317 struct ifmedia *ifm = &sc->media;
1318 ixgbe_link_speed speed = 0;
1319
1320 if (IFM_TYPE(ifm->ifm_media)((ifm->ifm_media) & 0x000000000000ff00ULL) != IFM_ETHER0x0000000000000100ULL)
1321 return (EINVAL22);
1322
1323 if (hw->phy.media_type == ixgbe_media_type_backplane)
1324 return (ENODEV19);
1325
1326 switch (IFM_SUBTYPE(ifm->ifm_media)((ifm->ifm_media) & 0x00000000000000ffULL)) {
1327 case IFM_AUTO0ULL:
1328 case IFM_10G_T22:
1329 speed |= IXGBE_LINK_SPEED_100_FULL0x0008;
1330 speed |= IXGBE_LINK_SPEED_1GB_FULL0x0020;
1331 speed |= IXGBE_LINK_SPEED_10GB_FULL0x0080;
1332 break;
1333 case IFM_10G_SR19:
1334 case IFM_10G_KR30:
1335 case IFM_10G_LR18:
1336 case IFM_10G_LRM24:
1337 case IFM_10G_CX420:
1338 case IFM_10G_KX429:
1339 speed |= IXGBE_LINK_SPEED_1GB_FULL0x0020;
1340 speed |= IXGBE_LINK_SPEED_10GB_FULL0x0080;
1341 break;
1342 case IFM_10G_SFP_CU23:
1343 speed |= IXGBE_LINK_SPEED_10GB_FULL0x0080;
1344 break;
1345 case IFM_1000_T16:
1346 speed |= IXGBE_LINK_SPEED_100_FULL0x0008;
1347 speed |= IXGBE_LINK_SPEED_1GB_FULL0x0020;
1348 break;
1349 case IFM_1000_LX14:
1350 case IFM_1000_SX11:
1351 case IFM_1000_CX15:
1352 case IFM_1000_KX28:
1353 speed |= IXGBE_LINK_SPEED_1GB_FULL0x0020;
1354 break;
1355 case IFM_100_TX6:
1356 speed |= IXGBE_LINK_SPEED_100_FULL0x0008;
1357 break;
1358 case IFM_10_T3:
1359 speed |= IXGBE_LINK_SPEED_10_FULL0x0002;
1360 break;
1361 default:
1362 return (EINVAL22);
1363 }
1364
1365 hw->mac.autotry_restart = TRUE1;
1366 hw->mac.ops.setup_link(hw, speed, TRUE1);
1367
1368 return (0);
1369}
1370
1371/*********************************************************************
1372 *
1373 * This routine maps the mbufs to tx descriptors, allowing the
1374 * TX engine to transmit the packets.
1375 * - return 0 on success, positive on failure
1376 *
1377 **********************************************************************/
1378
1379int
1380ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
1381{
1382 struct ix_softc *sc = txr->sc;
1383 uint32_t olinfo_status = 0, cmd_type_len;
1384 int i, j, ntxc;
1385 int first, last = 0;
1386 bus_dmamap_t map;
1387 struct ixgbe_tx_buf *txbuf;
1388 union ixgbe_adv_tx_desc *txd = NULL((void *)0);
12
'txd' initialized to a null pointer value
1389
1390 /* Basic descriptor defines */
1391 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA0x00300000 |
1392 IXGBE_ADVTXD_DCMD_IFCS0x02000000 | IXGBE_ADVTXD_DCMD_DEXT0x20000000);
1393
1394#if NVLAN1 > 0
1395 if (m_head->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020)
13
Assuming the condition is false
14
Taking false branch
1396 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE0x40000000;
1397#endif
1398
1399 /*
1400 * Important to capture the first descriptor
1401 * used because it will contain the index of
1402 * the one we tell the hardware to report back
1403 */
1404 first = txr->next_avail_desc;
1405 txbuf = &txr->tx_buffers[first];
1406 map = txbuf->map;
1407
1408 /*
1409 * Map the packet for DMA.
1410 */
1411 switch (bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,(*(txr->txdma.dma_tag)->_dmamap_load_mbuf)((txr->txdma
.dma_tag), (map), (m_head), (0x0001))
15
Control jumps to 'case 0:' at line 1413
1412 m_head, BUS_DMA_NOWAIT)(*(txr->txdma.dma_tag)->_dmamap_load_mbuf)((txr->txdma
.dma_tag), (map), (m_head), (0x0001))
) {
1413 case 0:
1414 break;
16
Execution continues on line 1429
1415 case EFBIG27:
1416 if (m_defrag(m_head, M_NOWAIT0x0002) == 0 &&
1417 bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,(*(txr->txdma.dma_tag)->_dmamap_load_mbuf)((txr->txdma
.dma_tag), (map), (m_head), (0x0001))
1418 m_head, BUS_DMA_NOWAIT)(*(txr->txdma.dma_tag)->_dmamap_load_mbuf)((txr->txdma
.dma_tag), (map), (m_head), (0x0001))
== 0)
1419 break;
1420 /* FALLTHROUGH */
1421 default:
1422 return (0);
1423 }
1424
1425 /*
1426 * Set the appropriate offload context
1427 * this will becomes the first descriptor.
1428 */
1429 ntxc = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
1430 if (ntxc == -1)
17
Taking false branch
1431 goto xmit_fail;
1432
1433 i = txr->next_avail_desc + ntxc;
1434 if (i >= sc->num_tx_desc)
18
Assuming 'i' is < field 'num_tx_desc'
19
Taking false branch
1435 i -= sc->num_tx_desc;
1436
1437 for (j = 0; j < map->dm_nsegs; j++) {
20
Assuming 'j' is >= field 'dm_nsegs'
21
Loop condition is false. Execution continues on line 1450
1438 txd = &txr->tx_base[i];
1439
1440 txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr)((__uint64_t)(map->dm_segs[j].ds_addr));
1441 txd->read.cmd_type_len = htole32(txr->txd_cmd |((__uint32_t)(txr->txd_cmd | cmd_type_len | map->dm_segs
[j].ds_len))
1442 cmd_type_len | map->dm_segs[j].ds_len)((__uint32_t)(txr->txd_cmd | cmd_type_len | map->dm_segs
[j].ds_len))
;
1443 txd->read.olinfo_status = htole32(olinfo_status)((__uint32_t)(olinfo_status));
1444 last = i; /* descriptor that will get completion IRQ */
1445
1446 if (++i == sc->num_tx_desc)
1447 i = 0;
1448 }
1449
1450 txd->read.cmd_type_len |=
22
Dereference of null pointer
1451 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS)((__uint32_t)(0x01000000 | 0x08000000));
1452
1453 bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (map), (0), (map->dm_mapsize), (0x04))
1454 BUS_DMASYNC_PREWRITE)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (map), (0), (map->dm_mapsize), (0x04))
;
1455
1456 /* Set the index of the descriptor that will be marked done */
1457 txbuf->m_head = m_head;
1458 txbuf->eop_index = last;
1459
1460 membar_producer()do { __asm volatile("" ::: "memory"); } while (0);
1461
1462 txr->next_avail_desc = i;
1463
1464 return (ntxc + j);
1465
1466xmit_fail:
1467 bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map)(*(txr->txdma.dma_tag)->_dmamap_unload)((txr->txdma.
dma_tag), (txbuf->map))
;
1468 return (0);
1469}
1470
1471void
1472ixgbe_iff(struct ix_softc *sc)
1473{
1474 struct ifnet *ifp = &sc->arpcom.ac_if;
1475 struct arpcom *ac = &sc->arpcom;
1476 uint32_t fctrl;
1477 uint8_t *mta;
1478 uint8_t *update_ptr;
1479 struct ether_multi *enm;
1480 struct ether_multistep step;
1481 int mcnt = 0;
1482
1483 IOCTL_DEBUGOUT("ixgbe_iff: begin")if (0) printf("ixgbe_iff: begin" "\n");
1484
1485 mta = sc->mta;
1486 bzero(mta, sizeof(uint8_t) * IXGBE_ETH_LENGTH_OF_ADDRESS *__builtin_bzero((mta), (sizeof(uint8_t) * 6 * 128))
1487 MAX_NUM_MULTICAST_ADDRESSES)__builtin_bzero((mta), (sizeof(uint8_t) * 6 * 128));
1488
1489 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x05080)))
;
1490 fctrl &= ~(IXGBE_FCTRL_MPE0x00000100 | IXGBE_FCTRL_UPE0x00000200);
1491 ifp->if_flags &= ~IFF_ALLMULTI0x200;
1492
1493 if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0 ||
1494 ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES128) {
1495 ifp->if_flags |= IFF_ALLMULTI0x200;
1496 fctrl |= IXGBE_FCTRL_MPE0x00000100;
1497 if (ifp->if_flags & IFF_PROMISC0x100)
1498 fctrl |= IXGBE_FCTRL_UPE0x00000200;
1499 } else {
1500 ETHER_FIRST_MULTI(step, &sc->arpcom, enm)do { (step).e_enm = ((&(&sc->arpcom)->ac_multiaddrs
)->lh_first); do { if ((((enm)) = ((step)).e_enm) != ((void
*)0)) ((step)).e_enm = ((((enm)))->enm_list.le_next); } while
( 0); } while ( 0)
;
1501 while (enm != NULL((void *)0)) {
1502 bcopy(enm->enm_addrlo,
1503 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS6],
1504 IXGBE_ETH_LENGTH_OF_ADDRESS6);
1505 mcnt++;
1506
1507 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
1508 }
1509
1510 update_ptr = mta;
1511 sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt,
1512 ixgbe_mc_array_itr, TRUE1);
1513 }
1514
1515 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x05080), (fctrl)))
;
1516}
1517
1518/*
1519 * This is an iterator function now needed by the multicast
1520 * shared code. It simply feeds the shared code routine the
1521 * addresses in the array of ixgbe_iff() one by one.
1522 */
1523uint8_t *
1524ixgbe_mc_array_itr(struct ixgbe_hw *hw, uint8_t **update_ptr, uint32_t *vmdq)
1525{
1526 uint8_t *addr = *update_ptr;
1527 uint8_t *newptr;
1528 *vmdq = 0;
1529
1530 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS6;
1531 *update_ptr = newptr;
1532 return addr;
1533}
1534
1535void
1536ixgbe_update_link_status(struct ix_softc *sc)
1537{
1538 struct ifnet *ifp = &sc->arpcom.ac_if;
1539 int link_state = LINK_STATE_DOWN2;
1540
1541 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
1542
1543 ifp->if_baudrateif_data.ifi_baudrate = 0;
1544 if (sc->link_up) {
1545 link_state = LINK_STATE_FULL_DUPLEX6;
1546
1547 switch (sc->link_speed) {
1548 case IXGBE_LINK_SPEED_UNKNOWN0:
1549 ifp->if_baudrateif_data.ifi_baudrate = 0;
1550 break;
1551 case IXGBE_LINK_SPEED_100_FULL0x0008:
1552 ifp->if_baudrateif_data.ifi_baudrate = IF_Mbps(100)((((100) * 1000ULL) * 1000ULL));
1553 break;
1554 case IXGBE_LINK_SPEED_1GB_FULL0x0020:
1555 ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL)));
1556 break;
1557 case IXGBE_LINK_SPEED_10GB_FULL0x0080:
1558 ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL)));
1559 break;
1560 }
1561
1562 /* Update any Flow Control changes */
1563 sc->hw.mac.ops.fc_enable(&sc->hw);
1564 }
1565 if (ifp->if_link_stateif_data.ifi_link_state != link_state) {
1566 ifp->if_link_stateif_data.ifi_link_state = link_state;
1567 if_link_state_change(ifp);
1568 }
1569}
1570
1571
1572/*********************************************************************
1573 *
1574 * This routine disables all traffic on the adapter by issuing a
1575 * global reset on the MAC and deallocates TX/RX buffers.
1576 *
1577 **********************************************************************/
1578
1579void
1580ixgbe_stop(void *arg)
1581{
1582 struct ix_softc *sc = arg;
1583 struct ifnet *ifp = &sc->arpcom.ac_if;
1584 int i;
1585
1586 /* Tell the stack that the interface is no longer active */
1587 ifp->if_flags &= ~IFF_RUNNING0x40;
1588
1589#if NKSTAT0 > 0
1590 timeout_del(&sc->sc_kstat_tmo);
1591#endif
1592
1593 INIT_DEBUGOUT("ixgbe_stop: begin\n")if (0) printf("ixgbe_stop: begin\n" "\n");
1594 ixgbe_disable_intr(sc);
1595
1596 sc->hw.mac.ops.reset_hw(&sc->hw);
1597 sc->hw.adapter_stopped = FALSE0;
1598 sc->hw.mac.ops.stop_adapter(&sc->hw);
1599 if (sc->hw.mac.type == ixgbe_mac_82599EB)
1600 sc->hw.mac.ops.stop_mac_link_on_d3(&sc->hw);
1601 /* Turn off the laser */
1602 if (sc->hw.mac.ops.disable_tx_laser)
1603 sc->hw.mac.ops.disable_tx_laser(&sc->hw);
1604
1605 /* reprogram the RAR[0] in case user changed it. */
1606 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV0x80000000);
1607
1608 intr_barrier(sc->tag);
1609 for (i = 0; i < sc->num_queues; i++) {
1610 struct ifqueue *ifq = ifp->if_ifqs[i];
1611 ifq_barrier(ifq);
1612 ifq_clr_oactive(ifq);
1613
1614 if (sc->queues[i].tag != NULL((void *)0))
1615 intr_barrier(sc->queues[i].tag);
1616 timeout_del(&sc->rx_rings[i].rx_refill);
1617 }
1618
1619 KASSERT((ifp->if_flags & IFF_RUNNING) == 0)(((ifp->if_flags & 0x40) == 0) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_ix.c", 1619, "(ifp->if_flags & IFF_RUNNING) == 0"
))
;
1620
1621 /* Should we really clear all structures on stop? */
1622 ixgbe_free_transmit_structures(sc);
1623 ixgbe_free_receive_structures(sc);
1624
1625 ixgbe_update_link_status(sc);
1626}
1627
1628
1629/*********************************************************************
1630 *
1631 * Determine hardware revision.
1632 *
1633 **********************************************************************/
1634void
1635ixgbe_identify_hardware(struct ix_softc *sc)
1636{
1637 struct ixgbe_osdep *os = &sc->osdep;
1638 struct pci_attach_args *pa = &os->os_pa;
1639 uint32_t reg;
1640
1641 /* Save off the information about this board */
1642 sc->hw.vendor_id = PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff);
1643 sc->hw.device_id = PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff);
1644
1645 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG0x08);
1646 sc->hw.revision_id = PCI_REVISION(reg)(((reg) >> 0) & 0xff);
1647
1648 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG0x2c);
1649 sc->hw.subsystem_vendor_id = PCI_VENDOR(reg)(((reg) >> 0) & 0xffff);
1650 sc->hw.subsystem_device_id = PCI_PRODUCT(reg)(((reg) >> 16) & 0xffff);
1651
1652 /* We need this here to set the num_segs below */
1653 ixgbe_set_mac_type(&sc->hw);
1654
1655 /* Pick up the 82599 and VF settings */
1656 if (sc->hw.mac.type != ixgbe_mac_82598EB)
1657 sc->hw.phy.smart_speed = ixgbe_smart_speed;
1658 sc->num_segs = IXGBE_82599_SCATTER32;
1659}
1660
1661/*********************************************************************
1662 *
1663 * Setup the Legacy or MSI Interrupt handler
1664 *
1665 **********************************************************************/
1666int
1667ixgbe_allocate_legacy(struct ix_softc *sc)
1668{
1669 struct ixgbe_osdep *os = &sc->osdep;
1670 struct pci_attach_args *pa = &os->os_pa;
1671 const char *intrstr = NULL((void *)0);
1672 pci_chipset_tag_t pc = pa->pa_pc;
1673 pci_intr_handle_t ih;
1674
1675 /* We allocate a single interrupt resource */
1676 if (pci_intr_map_msi(pa, &ih) != 0 &&
1677 pci_intr_map(pa, &ih) != 0) {
1678 printf(": couldn't map interrupt\n");
1679 return (ENXIO6);
1680 }
1681
1682#if 0
1683 /* XXX */
1684 /* Tasklets for Link, SFP and Multispeed Fiber */
1685 TASK_INIT(&sc->link_task, 0, ixgbe_handle_link, sc);
1686 TASK_INIT(&sc->mod_task, 0, ixgbe_handle_mod, sc);
1687 TASK_INIT(&sc->msf_task, 0, ixgbe_handle_msf, sc);
1688#endif
1689
1690 intrstr = pci_intr_string(pc, ih);
1691 sc->tag = pci_intr_establish(pc, ih, IPL_NET0x7 | IPL_MPSAFE0x100,
1692 ixgbe_legacy_intr, sc, sc->dev.dv_xname);
1693 if (sc->tag == NULL((void *)0)) {
1694 printf(": couldn't establish interrupt");
1695 if (intrstr != NULL((void *)0))
1696 printf(" at %s", intrstr);
1697 printf("\n");
1698 return (ENXIO6);
1699 }
1700 printf(": %s", intrstr);
1701
1702 /* For simplicity in the handlers */
1703 sc->que_mask = IXGBE_EIMS_ENABLE_MASK( 0x0000FFFF | 0x00100000 | 0x40000000 | 0x80000000);
1704
1705 return (0);
1706}
1707
1708/*********************************************************************
1709 *
1710 * Setup the MSI-X Interrupt handlers
1711 *
1712 **********************************************************************/
1713int
1714ixgbe_allocate_msix(struct ix_softc *sc)
1715{
1716 struct ixgbe_osdep *os = &sc->osdep;
1717 struct pci_attach_args *pa = &os->os_pa;
1718 int i = 0, error = 0;
1719 struct ix_queue *que;
1720 pci_intr_handle_t ih;
1721
1722 for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++) {
1723 if (pci_intr_map_msix(pa, i, &ih)) {
1724 printf("ixgbe_allocate_msix: "
1725 "pci_intr_map_msix vec %d failed\n", i);
1726 error = ENOMEM12;
1727 goto fail;
1728 }
1729
1730 que->tag = pci_intr_establish_cpu(pa->pa_pc, ih,
1731 IPL_NET0x7 | IPL_MPSAFE0x100, intrmap_cpu(sc->sc_intrmap, i),
1732 ixgbe_queue_intr, que, que->name);
1733 if (que->tag == NULL((void *)0)) {
1734 printf("ixgbe_allocate_msix: "
1735 "pci_intr_establish vec %d failed\n", i);
1736 error = ENOMEM12;
1737 goto fail;
1738 }
1739
1740 que->msix = i;
1741 }
1742
1743 /* Now the link status/control last MSI-X vector */
1744 if (pci_intr_map_msix(pa, i, &ih)) {
1745 printf("ixgbe_allocate_msix: "
1746 "pci_intr_map_msix link vector failed\n");
1747 error = ENOMEM12;
1748 goto fail;
1749 }
1750
1751 sc->tag = pci_intr_establish(pa->pa_pc, ih, IPL_NET0x7 | IPL_MPSAFE0x100,
1752 ixgbe_link_intr, sc, sc->dev.dv_xname);
1753 if (sc->tag == NULL((void *)0)) {
1754 printf("ixgbe_allocate_msix: "
1755 "pci_intr_establish link vector failed\n");
1756 error = ENOMEM12;
1757 goto fail;
1758 }
1759 sc->linkvec = i;
1760 printf(", %s, %d queue%s", pci_intr_string(pa->pa_pc, ih),
1761 i, (i > 1) ? "s" : "");
1762
1763 return (0);
1764fail:
1765 for (que = sc->queues; i > 0; i--, que++) {
1766 if (que->tag == NULL((void *)0))
1767 continue;
1768 pci_intr_disestablish(pa->pa_pc, que->tag);
1769 que->tag = NULL((void *)0);
1770 }
1771
1772 return (error);
1773}
1774
1775void
1776ixgbe_setup_msix(struct ix_softc *sc)
1777{
1778 struct ixgbe_osdep *os = &sc->osdep;
1779 struct pci_attach_args *pa = &os->os_pa;
1780 int nmsix;
1781 unsigned int maxq;
1782
1783 if (!ixgbe_enable_msix)
1784 return;
1785
1786 nmsix = pci_intr_msix_count(pa);
1787 if (nmsix <= 1)
1788 return;
1789
1790 /* give one vector to events */
1791 nmsix--;
1792
1793 /* XXX the number of queues is limited to what we can keep stats on */
1794 maxq = (sc->hw.mac.type == ixgbe_mac_82598EB) ? 8 : 16;
1795
1796 sc->sc_intrmap = intrmap_create(&sc->dev, nmsix, maxq, 0);
1797 sc->num_queues = intrmap_count(sc->sc_intrmap);
1798}
1799
1800int
1801ixgbe_allocate_pci_resources(struct ix_softc *sc)
1802{
1803 struct ixgbe_osdep *os = &sc->osdep;
1804 struct pci_attach_args *pa = &os->os_pa;
1805 int val;
1806
1807 val = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_BAR(0)(0x10 + (0) * 4));
1808 if (PCI_MAPREG_TYPE(val)((val) & 0x00000001) != PCI_MAPREG_TYPE_MEM0x00000000) {
1809 printf(": mmba is not mem space\n");
1810 return (ENXIO6);
1811 }
1812
1813 if (pci_mapreg_map(pa, PCIR_BAR(0)(0x10 + (0) * 4), PCI_MAPREG_MEM_TYPE(val)((val) & 0x00000006), 0,
1814 &os->os_memt, &os->os_memh, &os->os_membase, &os->os_memsize, 0)) {
1815 printf(": cannot find mem space\n");
1816 return (ENXIO6);
1817 }
1818 sc->hw.hw_addr = (uint8_t *)os->os_membase;
1819
1820 /* Legacy defaults */
1821 sc->num_queues = 1;
1822 sc->hw.back = os;
1823
1824 /* Now setup MSI or MSI/X, return us the number of supported vectors. */
1825 ixgbe_setup_msix(sc);
1826
1827 return (0);
1828}
1829
1830void
1831ixgbe_free_pci_resources(struct ix_softc * sc)
1832{
1833 struct ixgbe_osdep *os = &sc->osdep;
1834 struct pci_attach_args *pa = &os->os_pa;
1835 struct ix_queue *que = sc->queues;
1836 int i;
1837
1838 /* Release all msix queue resources: */
1839 for (i = 0; i < sc->num_queues; i++, que++) {
1840 if (que->tag)
1841 pci_intr_disestablish(pa->pa_pc, que->tag);
1842 que->tag = NULL((void *)0);
1843 }
1844
1845 if (sc->tag)
1846 pci_intr_disestablish(pa->pa_pc, sc->tag);
1847 sc->tag = NULL((void *)0);
1848 if (os->os_membase != 0)
1849 bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
1850 os->os_membase = 0;
1851}
1852
1853/*********************************************************************
1854 *
1855 * Setup networking device structure and register an interface.
1856 *
1857 **********************************************************************/
1858void
1859ixgbe_setup_interface(struct ix_softc *sc)
1860{
1861 struct ifnet *ifp = &sc->arpcom.ac_if;
1862 int i;
1863
1864 strlcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ16);
1865 ifp->if_softc = sc;
1866 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
1867 ifp->if_xflags = IFXF_MPSAFE0x1;
1868 ifp->if_ioctl = ixgbe_ioctl;
1869 ifp->if_qstart = ixgbe_start;
1870 ifp->if_timer = 0;
1871 ifp->if_watchdog = ixgbe_watchdog;
1872 ifp->if_hardmtu = IXGBE_MAX_FRAME_SIZE9216 -
1873 ETHER_HDR_LEN((6 * 2) + 2) - ETHER_CRC_LEN4;
1874 ifq_set_maxlen(&ifp->if_snd, sc->num_tx_desc - 1)((&ifp->if_snd)->ifq_maxlen = (sc->num_tx_desc -
1))
;
1875
1876 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010;
1877
1878#if NVLAN1 > 0
1879 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020;
1880#endif
1881
1882 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_CSUM_TCPv40x00000002 | IFCAP_CSUM_UDPv40x00000004;
1883
1884 /*
1885 * Specify the media types supported by this sc and register
1886 * callbacks to update media and link information
1887 */
1888 ifmedia_init(&sc->media, IFM_IMASK0xff00000000000000ULL, ixgbe_media_change,
1889 ixgbe_media_status);
1890 ixgbe_add_media_types(sc);
1891 ifmedia_set(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL);
1892
1893 if_attach(ifp);
1894 ether_ifattach(ifp);
1895
1896 if_attach_queues(ifp, sc->num_queues);
1897 if_attach_iqueues(ifp, sc->num_queues);
1898 for (i = 0; i < sc->num_queues; i++) {
1899 struct ifqueue *ifq = ifp->if_ifqs[i];
1900 struct ifiqueue *ifiq = ifp->if_iqs[i];
1901 struct tx_ring *txr = &sc->tx_rings[i];
1902 struct rx_ring *rxr = &sc->rx_rings[i];
1903
1904 ifq->ifq_softc_ifq_ptr._ifq_softc = txr;
1905 txr->ifq = ifq;
1906
1907 ifiq->ifiq_softc_ifiq_ptr._ifiq_softc = rxr;
1908 rxr->ifiq = ifiq;
1909
1910#if NKSTAT0 > 0
1911 ix_txq_kstats(sc, txr);
1912 ix_rxq_kstats(sc, rxr);
1913#endif
1914 }
1915
1916 sc->max_frame_size = IXGBE_MAX_FRAME_SIZE9216;
1917}
1918
1919void
1920ixgbe_add_media_types(struct ix_softc *sc)
1921{
1922 struct ixgbe_hw *hw = &sc->hw;
1923 uint64_t layer;
1924
1925 sc->phy_layer = hw->mac.ops.get_supported_physical_layer(hw);
1926 layer = sc->phy_layer;
1927
1928 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T0x00001)
1929 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_10G_T22, 0, NULL((void *)0));
1930 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T0x00002)
1931 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_1000_T16, 0, NULL((void *)0));
1932 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX0x00004)
1933 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_100_TX6, 0, NULL((void *)0));
1934 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU0x00008 ||
1935 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA0x02000)
1936 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_10G_SFP_CU23, 0, NULL((void *)0));
1937 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR0x00010) {
1938 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_10G_LR18, 0, NULL((void *)0));
1939 if (hw->phy.multispeed_fiber)
1940 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_1000_LX14, 0,
1941 NULL((void *)0));
1942 }
1943 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR0x00040) {
1944 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_10G_SR19, 0, NULL((void *)0));
1945 if (hw->phy.multispeed_fiber)
1946 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_1000_SX11, 0,
1947 NULL((void *)0));
1948 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX0x04000)
1949 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_1000_SX11, 0, NULL((void *)0));
1950 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX40x00100)
1951 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_10G_CX420, 0, NULL((void *)0));
1952 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR0x00800)
1953 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_10G_KR30, 0, NULL((void *)0));
1954 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX40x00080)
1955 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_10G_KX429, 0, NULL((void *)0));
1956 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX0x00200)
1957 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_1000_KX28, 0, NULL((void *)0));
1958 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX0x10000)
1959 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_2500_KX33, 0, NULL((void *)0));
1960
1961 if (hw->device_id == IXGBE_DEV_ID_82598AT0x10C8) {
1962 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_1000_T16 | IFM_FDX0x0000010000000000ULL, 0,
1963 NULL((void *)0));
1964 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_1000_T16, 0, NULL((void *)0));
1965 }
1966
1967 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0));
1968}
1969
1970void
1971ixgbe_config_link(struct ix_softc *sc)
1972{
1973 uint32_t autoneg, err = 0;
1974 bool_Bool negotiate;
1975
1976 if (ixgbe_is_sfp(&sc->hw)) {
1977 if (sc->hw.phy.multispeed_fiber) {
1978 sc->hw.mac.ops.setup_sfp(&sc->hw);
1979 if (sc->hw.mac.ops.enable_tx_laser)
1980 sc->hw.mac.ops.enable_tx_laser(&sc->hw);
1981 ixgbe_handle_msf(sc);
1982 } else
1983 ixgbe_handle_mod(sc);
1984 } else {
1985 if (sc->hw.mac.ops.check_link)
1986 err = sc->hw.mac.ops.check_link(&sc->hw, &autoneg,
1987 &sc->link_up, FALSE0);
1988 if (err)
1989 return;
1990 autoneg = sc->hw.phy.autoneg_advertised;
1991 if ((!autoneg) && (sc->hw.mac.ops.get_link_capabilities))
1992 err = sc->hw.mac.ops.get_link_capabilities(&sc->hw,
1993 &autoneg, &negotiate);
1994 if (err)
1995 return;
1996 if (sc->hw.mac.ops.setup_link)
1997 sc->hw.mac.ops.setup_link(&sc->hw,
1998 autoneg, sc->link_up);
1999 }
2000}
2001
2002/********************************************************************
2003 * Manage DMA'able memory.
2004 *******************************************************************/
2005int
2006ixgbe_dma_malloc(struct ix_softc *sc, bus_size_t size,
2007 struct ixgbe_dma_alloc *dma, int mapflags)
2008{
2009 struct ifnet *ifp = &sc->arpcom.ac_if;
2010 struct ixgbe_osdep *os = &sc->osdep;
2011 int r;
2012
2013 dma->dma_tag = os->os_pa.pa_dmat;
2014 r = bus_dmamap_create(dma->dma_tag, size, 1,(*(dma->dma_tag)->_dmamap_create)((dma->dma_tag), (size
), (1), (size), (0), (0x0001), (&dma->dma_map))
2015 size, 0, BUS_DMA_NOWAIT, &dma->dma_map)(*(dma->dma_tag)->_dmamap_create)((dma->dma_tag), (size
), (1), (size), (0), (0x0001), (&dma->dma_map))
;
2016 if (r != 0) {
2017 printf("%s: ixgbe_dma_malloc: bus_dmamap_create failed; "
2018 "error %u\n", ifp->if_xname, r);
2019 goto fail_0;
2020 }
2021
2022 r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,(*(dma->dma_tag)->_dmamem_alloc)((dma->dma_tag), (size
), ((1 << 12)), (0), (&dma->dma_seg), (1), (&
dma->dma_nseg), (0x0001))
2023 1, &dma->dma_nseg, BUS_DMA_NOWAIT)(*(dma->dma_tag)->_dmamem_alloc)((dma->dma_tag), (size
), ((1 << 12)), (0), (&dma->dma_seg), (1), (&
dma->dma_nseg), (0x0001))
;
2024 if (r != 0) {
2025 printf("%s: ixgbe_dma_malloc: bus_dmamem_alloc failed; "
2026 "error %u\n", ifp->if_xname, r);
2027 goto fail_1;
2028 }
2029
2030 r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,(*(dma->dma_tag)->_dmamem_map)((dma->dma_tag), (&
dma->dma_seg), (dma->dma_nseg), (size), (&dma->dma_vaddr
), (0x0001))
2031 &dma->dma_vaddr, BUS_DMA_NOWAIT)(*(dma->dma_tag)->_dmamem_map)((dma->dma_tag), (&
dma->dma_seg), (dma->dma_nseg), (size), (&dma->dma_vaddr
), (0x0001))
;
2032 if (r != 0) {
2033 printf("%s: ixgbe_dma_malloc: bus_dmamem_map failed; "
2034 "error %u\n", ifp->if_xname, r);
2035 goto fail_2;
2036 }
2037
2038 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,(*(dma->dma_tag)->_dmamap_load)((dma->dma_tag), (dma
->dma_map), (dma->dma_vaddr), (size), (((void *)0)), (mapflags
| 0x0001))
2039 size, NULL, mapflags | BUS_DMA_NOWAIT)(*(dma->dma_tag)->_dmamap_load)((dma->dma_tag), (dma
->dma_map), (dma->dma_vaddr), (size), (((void *)0)), (mapflags
| 0x0001))
;
2040 if (r != 0) {
2041 printf("%s: ixgbe_dma_malloc: bus_dmamap_load failed; "
2042 "error %u\n", ifp->if_xname, r);
2043 goto fail_3;
2044 }
2045
2046 dma->dma_size = size;
2047 return (0);
2048fail_3:
2049 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size)(*(dma->dma_tag)->_dmamem_unmap)((dma->dma_tag), (dma
->dma_vaddr), (size))
;
2050fail_2:
2051 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg)(*(dma->dma_tag)->_dmamem_free)((dma->dma_tag), (&
dma->dma_seg), (dma->dma_nseg))
;
2052fail_1:
2053 bus_dmamap_destroy(dma->dma_tag, dma->dma_map)(*(dma->dma_tag)->_dmamap_destroy)((dma->dma_tag), (
dma->dma_map))
;
2054fail_0:
2055 dma->dma_map = NULL((void *)0);
2056 dma->dma_tag = NULL((void *)0);
2057 return (r);
2058}
2059
2060void
2061ixgbe_dma_free(struct ix_softc *sc, struct ixgbe_dma_alloc *dma)
2062{
2063 if (dma->dma_tag == NULL((void *)0))
2064 return;
2065
2066 if (dma->dma_map != NULL((void *)0)) {
2067 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma
->dma_map), (0), (dma->dma_map->dm_mapsize), (0x02 |
0x08))
2068 dma->dma_map->dm_mapsize,(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma
->dma_map), (0), (dma->dma_map->dm_mapsize), (0x02 |
0x08))
2069 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma
->dma_map), (0), (dma->dma_map->dm_mapsize), (0x02 |
0x08))
;
2070 bus_dmamap_unload(dma->dma_tag, dma->dma_map)(*(dma->dma_tag)->_dmamap_unload)((dma->dma_tag), (dma
->dma_map))
;
2071 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size)(*(dma->dma_tag)->_dmamem_unmap)((dma->dma_tag), (dma
->dma_vaddr), (dma->dma_size))
;
2072 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg)(*(dma->dma_tag)->_dmamem_free)((dma->dma_tag), (&
dma->dma_seg), (dma->dma_nseg))
;
2073 bus_dmamap_destroy(dma->dma_tag, dma->dma_map)(*(dma->dma_tag)->_dmamap_destroy)((dma->dma_tag), (
dma->dma_map))
;
2074 dma->dma_map = NULL((void *)0);
2075 }
2076}
2077
2078
2079/*********************************************************************
2080 *
2081 * Allocate memory for the transmit and receive rings, and then
2082 * the descriptors associated with each, called only once at attach.
2083 *
2084 **********************************************************************/
2085int
2086ixgbe_allocate_queues(struct ix_softc *sc)
2087{
2088 struct ifnet *ifp = &sc->arpcom.ac_if;
2089 struct ix_queue *que;
2090 struct tx_ring *txr;
2091 struct rx_ring *rxr;
2092 int rsize, tsize;
2093 int txconf = 0, rxconf = 0, i;
2094
2095 /* First allocate the top level queue structs */
2096 if (!(sc->queues = mallocarray(sc->num_queues,
2097 sizeof(struct ix_queue), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008))) {
2098 printf("%s: Unable to allocate queue memory\n", ifp->if_xname);
2099 goto fail;
2100 }
2101
2102 /* Then allocate the TX ring struct memory */
2103 if (!(sc->tx_rings = mallocarray(sc->num_queues,
2104 sizeof(struct tx_ring), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008))) {
2105 printf("%s: Unable to allocate TX ring memory\n", ifp->if_xname);
2106 goto fail;
2107 }
2108
2109 /* Next allocate the RX */
2110 if (!(sc->rx_rings = mallocarray(sc->num_queues,
2111 sizeof(struct rx_ring), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008))) {
2112 printf("%s: Unable to allocate RX ring memory\n", ifp->if_xname);
2113 goto rx_fail;
2114 }
2115
2116 /* For the ring itself */
2117 tsize = roundup2(sc->num_tx_desc *(((sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)) + (128
) - 1) & ~((128) - 1))
2118 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN)(((sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)) + (128
) - 1) & ~((128) - 1))
;
2119
2120 /*
2121 * Now set up the TX queues, txconf is needed to handle the
2122 * possibility that things fail midcourse and we need to
2123 * undo memory gracefully
2124 */
2125 for (i = 0; i < sc->num_queues; i++, txconf++) {
2126 /* Set up some basics */
2127 txr = &sc->tx_rings[i];
2128 txr->sc = sc;
2129 txr->me = i;
2130
2131 if (ixgbe_dma_malloc(sc, tsize,
2132 &txr->txdma, BUS_DMA_NOWAIT0x0001)) {
2133 printf("%s: Unable to allocate TX Descriptor memory\n",
2134 ifp->if_xname);
2135 goto err_tx_desc;
2136 }
2137 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2138 bzero((void *)txr->tx_base, tsize)__builtin_bzero(((void *)txr->tx_base), (tsize));
2139 }
2140
2141 /*
2142 * Next the RX queues...
2143 */
2144 rsize = roundup2(sc->num_rx_desc *(((sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)) + (4096
) - 1) & ~((4096) - 1))
2145 sizeof(union ixgbe_adv_rx_desc), 4096)(((sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)) + (4096
) - 1) & ~((4096) - 1))
;
2146 for (i = 0; i < sc->num_queues; i++, rxconf++) {
2147 rxr = &sc->rx_rings[i];
2148 /* Set up some basics */
2149 rxr->sc = sc;
2150 rxr->me = i;
2151 timeout_set(&rxr->rx_refill, ixgbe_rxrefill, rxr);
2152
2153 if (ixgbe_dma_malloc(sc, rsize,
2154 &rxr->rxdma, BUS_DMA_NOWAIT0x0001)) {
2155 printf("%s: Unable to allocate RxDescriptor memory\n",
2156 ifp->if_xname);
2157 goto err_rx_desc;
2158 }
2159 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2160 bzero((void *)rxr->rx_base, rsize)__builtin_bzero(((void *)rxr->rx_base), (rsize));
2161 }
2162
2163 /*
2164 * Finally set up the queue holding structs
2165 */
2166 for (i = 0; i < sc->num_queues; i++) {
2167 que = &sc->queues[i];
2168 que->sc = sc;
2169 que->txr = &sc->tx_rings[i];
2170 que->rxr = &sc->rx_rings[i];
2171 snprintf(que->name, sizeof(que->name), "%s:%d",
2172 sc->dev.dv_xname, i);
2173 }
2174
2175 return (0);
2176
2177err_rx_desc:
2178 for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
2179 ixgbe_dma_free(sc, &rxr->rxdma);
2180err_tx_desc:
2181 for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
2182 ixgbe_dma_free(sc, &txr->txdma);
2183 free(sc->rx_rings, M_DEVBUF2, sc->num_queues * sizeof(struct rx_ring));
2184 sc->rx_rings = NULL((void *)0);
2185rx_fail:
2186 free(sc->tx_rings, M_DEVBUF2, sc->num_queues * sizeof(struct tx_ring));
2187 sc->tx_rings = NULL((void *)0);
2188fail:
2189 return (ENOMEM12);
2190}
2191
2192/*********************************************************************
2193 *
2194 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2195 * the information needed to transmit a packet on the wire. This is
2196 * called only once at attach, setup is done every reset.
2197 *
2198 **********************************************************************/
2199int
2200ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
2201{
2202 struct ix_softc *sc = txr->sc;
2203 struct ifnet *ifp = &sc->arpcom.ac_if;
2204 struct ixgbe_tx_buf *txbuf;
2205 int error, i;
2206
2207 if (!(txr->tx_buffers = mallocarray(sc->num_tx_desc,
2208 sizeof(struct ixgbe_tx_buf), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008))) {
2209 printf("%s: Unable to allocate tx_buffer memory\n",
2210 ifp->if_xname);
2211 error = ENOMEM12;
2212 goto fail;
2213 }
2214 txr->txtag = txr->txdma.dma_tag;
2215
2216 /* Create the descriptor buffer dma maps */
2217 for (i = 0; i < sc->num_tx_desc; i++) {
2218 txbuf = &txr->tx_buffers[i];
2219 error = bus_dmamap_create(txr->txdma.dma_tag, IXGBE_TSO_SIZE,(*(txr->txdma.dma_tag)->_dmamap_create)((txr->txdma.
dma_tag), (262140), (sc->num_segs), ((1 << 12)), (0)
, (0x0001), (&txbuf->map))
2220 sc->num_segs, PAGE_SIZE, 0,(*(txr->txdma.dma_tag)->_dmamap_create)((txr->txdma.
dma_tag), (262140), (sc->num_segs), ((1 << 12)), (0)
, (0x0001), (&txbuf->map))
2221 BUS_DMA_NOWAIT, &txbuf->map)(*(txr->txdma.dma_tag)->_dmamap_create)((txr->txdma.
dma_tag), (262140), (sc->num_segs), ((1 << 12)), (0)
, (0x0001), (&txbuf->map))
;
2222
2223 if (error != 0) {
2224 printf("%s: Unable to create TX DMA map\n",
2225 ifp->if_xname);
2226 goto fail;
2227 }
2228 }
2229
2230 return 0;
2231fail:
2232 return (error);
2233}
2234
2235/*********************************************************************
2236 *
2237 * Initialize a transmit ring.
2238 *
2239 **********************************************************************/
2240int
2241ixgbe_setup_transmit_ring(struct tx_ring *txr)
2242{
2243 struct ix_softc *sc = txr->sc;
2244 int error;
2245
2246 /* Now allocate transmit buffers for the ring */
2247 if ((error = ixgbe_allocate_transmit_buffers(txr)) != 0)
2248 return (error);
2249
2250 /* Clear the old ring contents */
2251 bzero((void *)txr->tx_base,__builtin_bzero(((void *)txr->tx_base), ((sizeof(union ixgbe_adv_tx_desc
)) * sc->num_tx_desc))
2252 (sizeof(union ixgbe_adv_tx_desc)) * sc->num_tx_desc)__builtin_bzero(((void *)txr->tx_base), ((sizeof(union ixgbe_adv_tx_desc
)) * sc->num_tx_desc))
;
2253
2254 /* Reset indices */
2255 txr->next_avail_desc = 0;
2256 txr->next_to_clean = 0;
2257
2258 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x01 | 0x04))
2259 0, txr->txdma.dma_map->dm_mapsize,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x01 | 0x04))
2260 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x01 | 0x04))
;
2261
2262 return (0);
2263}
2264
2265/*********************************************************************
2266 *
2267 * Initialize all transmit rings.
2268 *
2269 **********************************************************************/
2270int
2271ixgbe_setup_transmit_structures(struct ix_softc *sc)
2272{
2273 struct tx_ring *txr = sc->tx_rings;
2274 int i, error;
2275
2276 for (i = 0; i < sc->num_queues; i++, txr++) {
2277 if ((error = ixgbe_setup_transmit_ring(txr)) != 0)
2278 goto fail;
2279 }
2280
2281 return (0);
2282fail:
2283 ixgbe_free_transmit_structures(sc);
2284 return (error);
2285}
2286
2287/*********************************************************************
2288 *
2289 * Enable transmit unit.
2290 *
2291 **********************************************************************/
2292void
2293ixgbe_initialize_transmit_units(struct ix_softc *sc)
2294{
2295 struct ifnet *ifp = &sc->arpcom.ac_if;
2296 struct tx_ring *txr;
2297 struct ixgbe_hw *hw = &sc->hw;
2298 int i;
2299 uint64_t tdba;
2300 uint32_t txctrl;
2301
2302 /* Setup the Base and Length of the Tx Descriptor Ring */
2303
2304 for (i = 0; i < sc->num_queues; i++) {
2305 txr = &sc->tx_rings[i];
2306
2307 /* Setup descriptor base address */
2308 tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
2309 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06000
+ ((i) * 0x40))), ((tdba & 0x00000000ffffffffULL))))
2310 (tdba & 0x00000000ffffffffULL))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06000
+ ((i) * 0x40))), ((tdba & 0x00000000ffffffffULL))))
;
2311 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06004
+ ((i) * 0x40))), ((tdba >> 32))))
;
2312 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06008
+ ((i) * 0x40))), (sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc
))))
2313 sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06008
+ ((i) * 0x40))), (sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc
))))
;
2314
2315 /* Setup the HW Tx Head and Tail descriptor pointers */
2316 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06010
+ ((i) * 0x40))), (0)))
;
2317 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06018
+ ((i) * 0x40))), (0)))
;
2318
2319 /* Setup Transmit Descriptor Cmd Settings */
2320 txr->txd_cmd = IXGBE_TXD_CMD_IFCS0x02000000;
2321 txr->queue_status = IXGBE_QUEUE_IDLE;
2322 txr->watchdog_timer = 0;
2323
2324 /* Disable Head Writeback */
2325 switch (hw->mac.type) {
2326 case ixgbe_mac_82598EB:
2327 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x07200
+ ((i) * 4)))))
;
2328 break;
2329 case ixgbe_mac_82599EB:
2330 case ixgbe_mac_X540:
2331 default:
2332 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x0600C
+ ((i) * 0x40)))))
;
2333 break;
2334 }
2335 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN(1 << 11);
2336 switch (hw->mac.type) {
2337 case ixgbe_mac_82598EB:
2338 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x07200
+ ((i) * 4))), (txctrl)))
;
2339 break;
2340 case ixgbe_mac_82599EB:
2341 case ixgbe_mac_X540:
2342 default:
2343 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x0600C
+ ((i) * 0x40))), (txctrl)))
;
2344 break;
2345 }
2346 }
2347 ifp->if_timer = 0;
2348
2349 if (hw->mac.type != ixgbe_mac_82598EB) {
2350 uint32_t dmatxctl, rttdcs;
2351 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x04A80
)))
;
2352 dmatxctl |= IXGBE_DMATXCTL_TE0x1;
2353 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x04A80
), (dmatxctl)))
;
2354 /* Disable arbiter to set MTQC */
2355 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x04900
)))
;
2356 rttdcs |= IXGBE_RTTDCS_ARBDIS0x00000040;
2357 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x04900
), (rttdcs)))
;
2358 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x08120
), (0x0)))
;
2359 rttdcs &= ~IXGBE_RTTDCS_ARBDIS0x00000040;
2360 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x04900
), (rttdcs)))
;
2361 }
2362}
2363
2364/*********************************************************************
2365 *
2366 * Free all transmit rings.
2367 *
2368 **********************************************************************/
2369void
2370ixgbe_free_transmit_structures(struct ix_softc *sc)
2371{
2372 struct tx_ring *txr = sc->tx_rings;
2373 int i;
2374
2375 for (i = 0; i < sc->num_queues; i++, txr++)
2376 ixgbe_free_transmit_buffers(txr);
2377}
2378
2379/*********************************************************************
2380 *
2381 * Free transmit ring related data structures.
2382 *
2383 **********************************************************************/
2384void
2385ixgbe_free_transmit_buffers(struct tx_ring *txr)
2386{
2387 struct ix_softc *sc = txr->sc;
2388 struct ixgbe_tx_buf *tx_buffer;
2389 int i;
2390
2391 INIT_DEBUGOUT("free_transmit_ring: begin")if (0) printf("free_transmit_ring: begin" "\n");
2392
2393 if (txr->tx_buffers == NULL((void *)0))
2394 return;
2395
2396 tx_buffer = txr->tx_buffers;
2397 for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
2398 if (tx_buffer->map != NULL((void *)0) && tx_buffer->map->dm_nsegs > 0) {
2399 bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (tx_buffer->map), (0), (tx_buffer->map->dm_mapsize
), (0x08))
2400 0, tx_buffer->map->dm_mapsize,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (tx_buffer->map), (0), (tx_buffer->map->dm_mapsize
), (0x08))
2401 BUS_DMASYNC_POSTWRITE)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (tx_buffer->map), (0), (tx_buffer->map->dm_mapsize
), (0x08))
;
2402 bus_dmamap_unload(txr->txdma.dma_tag,(*(txr->txdma.dma_tag)->_dmamap_unload)((txr->txdma.
dma_tag), (tx_buffer->map))
2403 tx_buffer->map)(*(txr->txdma.dma_tag)->_dmamap_unload)((txr->txdma.
dma_tag), (tx_buffer->map))
;
2404 }
2405 if (tx_buffer->m_head != NULL((void *)0)) {
2406 m_freem(tx_buffer->m_head);
2407 tx_buffer->m_head = NULL((void *)0);
2408 }
2409 if (tx_buffer->map != NULL((void *)0)) {
2410 bus_dmamap_destroy(txr->txdma.dma_tag,(*(txr->txdma.dma_tag)->_dmamap_destroy)((txr->txdma
.dma_tag), (tx_buffer->map))
2411 tx_buffer->map)(*(txr->txdma.dma_tag)->_dmamap_destroy)((txr->txdma
.dma_tag), (tx_buffer->map))
;
2412 tx_buffer->map = NULL((void *)0);
2413 }
2414 }
2415
2416 if (txr->tx_buffers != NULL((void *)0))
2417 free(txr->tx_buffers, M_DEVBUF2,
2418 sc->num_tx_desc * sizeof(struct ixgbe_tx_buf));
2419 txr->tx_buffers = NULL((void *)0);
2420 txr->txtag = NULL((void *)0);
2421}
2422
2423/*********************************************************************
2424 *
2425 * Advanced Context Descriptor setup for VLAN or CSUM
2426 *
2427 **********************************************************************/
2428
2429int
2430ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
2431 uint32_t *cmd_type_len, uint32_t *olinfo_status)
2432{
2433 struct ixgbe_adv_tx_context_desc *TXD;
2434 struct ixgbe_tx_buf *tx_buffer;
2435#if NVLAN1 > 0
2436 struct ether_vlan_header *eh;
2437#else
2438 struct ether_header *eh;
2439#endif
2440 struct ip *ip;
2441#ifdef notyet
2442 struct ip6_hdr *ip6;
2443#endif
2444 struct mbuf *m;
2445 int ipoff;
2446 uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2447 int ehdrlen, ip_hlen = 0;
2448 uint16_t etype;
2449 uint8_t ipproto = 0;
2450 int offload = TRUE1;
2451 int ctxd = txr->next_avail_desc;
2452#if NVLAN1 > 0
2453 uint16_t vtag = 0;
2454#endif
2455
2456#if notyet
2457 /* First check if TSO is to be used */
2458 if (mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & CSUM_TSO)
2459 return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
2460#endif
2461
2462 if ((mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & (M_TCP_CSUM_OUT0x0002 | M_UDP_CSUM_OUT0x0004)) == 0)
2463 offload = FALSE0;
2464
2465 /* Indicate the whole packet as payload when not doing TSO */
2466 *olinfo_status |= mp->m_pkthdrM_dat.MH.MH_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT14;
2467
2468 /* Now ready a context descriptor */
2469 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2470 tx_buffer = &txr->tx_buffers[ctxd];
2471
2472 /*
2473 * In advanced descriptors the vlan tag must
2474 * be placed into the descriptor itself. Hence
2475 * we need to make one even if not doing offloads.
2476 */
2477#if NVLAN1 > 0
2478 if (mp->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020) {
2479 vtag = mp->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag;
2480 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT16);
2481 } else
2482#endif
2483 if (offload == FALSE0)
2484 return (0); /* No need for CTX */
2485
2486 /*
2487 * Determine where frame payload starts.
2488 * Jump over vlan headers if already present,
2489 * helpful for QinQ too.
2490 */
2491 if (mp->m_lenm_hdr.mh_len < sizeof(struct ether_header))
2492 return (-1);
2493#if NVLAN1 > 0
2494 eh = mtod(mp, struct ether_vlan_header *)((struct ether_vlan_header *)((mp)->m_hdr.mh_data));
2495 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)(__uint16_t)(__builtin_constant_p(0x8100) ? (__uint16_t)(((__uint16_t
)(0x8100) & 0xffU) << 8 | ((__uint16_t)(0x8100) &
0xff00U) >> 8) : __swap16md(0x8100))
) {
2496 if (mp->m_lenm_hdr.mh_len < sizeof(struct ether_vlan_header))
2497 return (-1);
2498 etype = ntohs(eh->evl_proto)(__uint16_t)(__builtin_constant_p(eh->evl_proto) ? (__uint16_t
)(((__uint16_t)(eh->evl_proto) & 0xffU) << 8 | (
(__uint16_t)(eh->evl_proto) & 0xff00U) >> 8) : __swap16md
(eh->evl_proto))
;
2499 ehdrlen = ETHER_HDR_LEN((6 * 2) + 2) + ETHER_VLAN_ENCAP_LEN4;
2500 } else {
2501 etype = ntohs(eh->evl_encap_proto)(__uint16_t)(__builtin_constant_p(eh->evl_encap_proto) ? (
__uint16_t)(((__uint16_t)(eh->evl_encap_proto) & 0xffU
) << 8 | ((__uint16_t)(eh->evl_encap_proto) & 0xff00U
) >> 8) : __swap16md(eh->evl_encap_proto))
;
2502 ehdrlen = ETHER_HDR_LEN((6 * 2) + 2);
2503 }
2504#else
2505 eh = mtod(mp, struct ether_header *)((struct ether_header *)((mp)->m_hdr.mh_data));
2506 etype = ntohs(eh->ether_type)(__uint16_t)(__builtin_constant_p(eh->ether_type) ? (__uint16_t
)(((__uint16_t)(eh->ether_type) & 0xffU) << 8 | (
(__uint16_t)(eh->ether_type) & 0xff00U) >> 8) : __swap16md
(eh->ether_type))
;
2507 ehdrlen = ETHER_HDR_LEN((6 * 2) + 2);
2508#endif
2509
2510 /* Set the ether header length */
2511 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT9;
2512
2513 switch (etype) {
2514 case ETHERTYPE_IP0x0800:
2515 if (mp->m_pkthdrM_dat.MH.MH_pkthdr.len < ehdrlen + sizeof(*ip))
2516 return (-1);
2517 m = m_getptr(mp, ehdrlen, &ipoff);
2518 KASSERT(m != NULL && m->m_len - ipoff >= sizeof(*ip))((m != ((void *)0) && m->m_hdr.mh_len - ipoff >=
sizeof(*ip)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_ix.c"
, 2518, "m != NULL && m->m_len - ipoff >= sizeof(*ip)"
))
;
2519 ip = (struct ip *)(m->m_datam_hdr.mh_data + ipoff);
2520 ip_hlen = ip->ip_hl << 2;
2521 ipproto = ip->ip_p;
2522 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV40x00000400;
2523 break;
2524#ifdef notyet
2525 case ETHERTYPE_IPV60x86DD:
2526 if (mp->m_pkthdrM_dat.MH.MH_pkthdr.len < ehdrlen + sizeof(*ip6))
2527 return (-1);
2528 m = m_getptr(mp, ehdrlen, &ipoff);
2529 KASSERT(m != NULL && m->m_len - ipoff >= sizeof(*ip6))((m != ((void *)0) && m->m_hdr.mh_len - ipoff >=
sizeof(*ip6)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_ix.c"
, 2529, "m != NULL && m->m_len - ipoff >= sizeof(*ip6)"
))
;
2530 ip6 = (struct ip6 *)(m->m_datam_hdr.mh_data + ipoff);
2531 ip_hlen = sizeof(*ip6);
2532 /* XXX-BZ this will go badly in case of ext hdrs. */
2533 ipproto = ip6->ip6_nxt;
2534 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV60x00000000;
2535 break;
2536#endif
2537 default:
2538 offload = FALSE0;
2539 break;
2540 }
2541
2542 vlan_macip_lens |= ip_hlen;
2543 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT0x20000000 | IXGBE_ADVTXD_DTYP_CTXT0x00200000;
2544
2545 switch (ipproto) {
2546 case IPPROTO_TCP6:
2547 if (mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_TCP_CSUM_OUT0x0002)
2548 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP0x00000800;
2549 break;
2550 case IPPROTO_UDP17:
2551 if (mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_UDP_CSUM_OUT0x0004)
2552 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP0x00000000;
2553 break;
2554 default:
2555 offload = FALSE0;
2556 break;
2557 }
2558
2559 if (offload) /* For the TX descriptor setup */
2560 *olinfo_status |= IXGBE_TXD_POPTS_TXSM0x02 << 8;
2561
2562 /* Now copy bits into descriptor */
2563 TXD->vlan_macip_lens = htole32(vlan_macip_lens)((__uint32_t)(vlan_macip_lens));
2564 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl)((__uint32_t)(type_tucmd_mlhl));
2565 TXD->seqnum_seed = htole32(0)((__uint32_t)(0));
2566 TXD->mss_l4len_idx = htole32(0)((__uint32_t)(0));
2567
2568 tx_buffer->m_head = NULL((void *)0);
2569 tx_buffer->eop_index = -1;
2570
2571 return (1);
2572}
2573
2574/**********************************************************************
2575 *
2576 * Examine each tx_buffer in the used queue. If the hardware is done
2577 * processing the packet then free associated resources. The
2578 * tx_buffer is put back on the free queue.
2579 *
2580 **********************************************************************/
2581int
2582ixgbe_txeof(struct tx_ring *txr)
2583{
2584 struct ix_softc *sc = txr->sc;
2585 struct ifqueue *ifq = txr->ifq;
2586 struct ifnet *ifp = &sc->arpcom.ac_if;
2587 unsigned int head, tail, last;
2588 struct ixgbe_tx_buf *tx_buffer;
2589 struct ixgbe_legacy_tx_desc *tx_desc;
2590
2591 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2592 return FALSE0;
2593
2594 head = txr->next_avail_desc;
2595 tail = txr->next_to_clean;
2596
2597 membar_consumer()do { __asm volatile("" ::: "memory"); } while (0);
2598
2599 if (head == tail)
2600 return (FALSE0);
2601
2602 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x02))
2603 0, txr->txdma.dma_map->dm_mapsize,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x02))
2604 BUS_DMASYNC_POSTREAD)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x02))
;
2605
2606 for (;;) {
2607 tx_buffer = &txr->tx_buffers[tail];
2608 last = tx_buffer->eop_index;
2609 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2610
2611 if (!ISSET(tx_desc->upper.fields.status, IXGBE_TXD_STAT_DD)((tx_desc->upper.fields.status) & (0x00000001)))
2612 break;
2613
2614 bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (tx_buffer->map), (0), (tx_buffer->map->dm_mapsize
), (0x08))
2615 0, tx_buffer->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (tx_buffer->map), (0), (tx_buffer->map->dm_mapsize
), (0x08))
;
2616 bus_dmamap_unload(txr->txdma.dma_tag, tx_buffer->map)(*(txr->txdma.dma_tag)->_dmamap_unload)((txr->txdma.
dma_tag), (tx_buffer->map))
;
2617 m_freem(tx_buffer->m_head);
2618
2619 tx_buffer->m_head = NULL((void *)0);
2620 tx_buffer->eop_index = -1;
2621
2622 tail = last + 1;
2623 if (tail == sc->num_tx_desc)
2624 tail = 0;
2625 if (head == tail) {
2626 /* All clean, turn off the timer */
2627 ifp->if_timer = 0;
2628 break;
2629 }
2630 }
2631
2632 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x01))
2633 0, txr->txdma.dma_map->dm_mapsize,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x01))
2634 BUS_DMASYNC_PREREAD)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x01))
;
2635
2636 membar_producer()do { __asm volatile("" ::: "memory"); } while (0);
2637
2638 txr->next_to_clean = tail;
2639
2640 if (ifq_is_oactive(ifq))
2641 ifq_restart(ifq);
2642
2643 return TRUE1;
2644}
2645
2646/*********************************************************************
2647 *
2648 * Get a buffer from system mbuf buffer pool.
2649 *
2650 **********************************************************************/
2651int
2652ixgbe_get_buf(struct rx_ring *rxr, int i)
2653{
2654 struct ix_softc *sc = rxr->sc;
2655 struct ixgbe_rx_buf *rxbuf;
2656 struct mbuf *mp;
2657 int error;
2658 union ixgbe_adv_rx_desc *rxdesc;
2659
2660 rxbuf = &rxr->rx_buffers[i];
2661 rxdesc = &rxr->rx_base[i];
2662 if (rxbuf->buf) {
2663 printf("%s: ixgbe_get_buf: slot %d already has an mbuf\n",
2664 sc->dev.dv_xname, i);
2665 return (ENOBUFS55);
2666 }
2667
2668 /* needed in any case so prealocate since this one will fail for sure */
2669 mp = MCLGETL(NULL, M_DONTWAIT, sc->rx_mbuf_sz)m_clget((((void *)0)), (0x0002), (sc->rx_mbuf_sz));
2670 if (!mp)
2671 return (ENOBUFS55);
2672
2673 mp->m_datam_hdr.mh_data += (mp->m_extM_dat.MH.MH_dat.MH_ext.ext_size - sc->rx_mbuf_sz);
2674 mp->m_lenm_hdr.mh_len = mp->m_pkthdrM_dat.MH.MH_pkthdr.len = sc->rx_mbuf_sz;
2675
2676 error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map,(*(rxr->rxdma.dma_tag)->_dmamap_load_mbuf)((rxr->rxdma
.dma_tag), (rxbuf->map), (mp), (0x0001))
2677 mp, BUS_DMA_NOWAIT)(*(rxr->rxdma.dma_tag)->_dmamap_load_mbuf)((rxr->rxdma
.dma_tag), (rxbuf->map), (mp), (0x0001))
;
2678 if (error) {
2679 m_freem(mp);
2680 return (error);
2681 }
2682
2683 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x01
))
2684 0, rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x01
))
;
2685 rxbuf->buf = mp;
2686
2687 rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr)((__uint64_t)(rxbuf->map->dm_segs[0].ds_addr));
2688
2689 return (0);
2690}
2691
2692/*********************************************************************
2693 *
2694 * Allocate memory for rx_buffer structures. Since we use one
2695 * rx_buffer per received packet, the maximum number of rx_buffer's
2696 * that we'll need is equal to the number of receive descriptors
2697 * that we've allocated.
2698 *
2699 **********************************************************************/
2700int
2701ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
2702{
2703 struct ix_softc *sc = rxr->sc;
2704 struct ifnet *ifp = &sc->arpcom.ac_if;
2705 struct ixgbe_rx_buf *rxbuf;
2706 int i, error;
2707
2708 if (!(rxr->rx_buffers = mallocarray(sc->num_rx_desc,
2709 sizeof(struct ixgbe_rx_buf), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008))) {
2710 printf("%s: Unable to allocate rx_buffer memory\n",
2711 ifp->if_xname);
2712 error = ENOMEM12;
2713 goto fail;
2714 }
2715
2716 rxbuf = rxr->rx_buffers;
2717 for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
2718 error = bus_dmamap_create(rxr->rxdma.dma_tag, 16 * 1024, 1,(*(rxr->rxdma.dma_tag)->_dmamap_create)((rxr->rxdma.
dma_tag), (16 * 1024), (1), (16 * 1024), (0), (0x0001), (&
rxbuf->map))
2719 16 * 1024, 0, BUS_DMA_NOWAIT, &rxbuf->map)(*(rxr->rxdma.dma_tag)->_dmamap_create)((rxr->rxdma.
dma_tag), (16 * 1024), (1), (16 * 1024), (0), (0x0001), (&
rxbuf->map))
;
2720 if (error) {
2721 printf("%s: Unable to create Pack DMA map\n",
2722 ifp->if_xname);
2723 goto fail;
2724 }
2725 }
2726 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize
), (0x01 | 0x04))
2727 rxr->rxdma.dma_map->dm_mapsize,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize
), (0x01 | 0x04))
2728 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize
), (0x01 | 0x04))
;
2729
2730 return (0);
2731
2732fail:
2733 return (error);
2734}
2735
2736/*********************************************************************
2737 *
2738 * Initialize a receive ring and its buffers.
2739 *
2740 **********************************************************************/
2741int
2742ixgbe_setup_receive_ring(struct rx_ring *rxr)
2743{
2744 struct ix_softc *sc = rxr->sc;
2745 struct ifnet *ifp = &sc->arpcom.ac_if;
2746 int rsize, error;
2747
2748 rsize = roundup2(sc->num_rx_desc *(((sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)) + (4096
) - 1) & ~((4096) - 1))
2749 sizeof(union ixgbe_adv_rx_desc), 4096)(((sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)) + (4096
) - 1) & ~((4096) - 1))
;
2750 /* Clear the ring contents */
2751 bzero((void *)rxr->rx_base, rsize)__builtin_bzero(((void *)rxr->rx_base), (rsize));
2752
2753 if ((error = ixgbe_allocate_receive_buffers(rxr)) != 0)
2754 return (error);
2755
2756 /* Setup our descriptor indices */
2757 rxr->next_to_check = 0;
2758 rxr->last_desc_filled = sc->num_rx_desc - 1;
2759
2760 if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES(1 << 11)) + 1),
2761 sc->num_rx_desc - 1);
2762
2763 ixgbe_rxfill(rxr);
2764 if (if_rxr_inuse(&rxr->rx_ring)((&rxr->rx_ring)->rxr_alive) == 0) {
2765 printf("%s: unable to fill any rx descriptors\n",
2766 sc->dev.dv_xname);
2767 return (ENOBUFS55);
2768 }
2769
2770 return (0);
2771}
2772
2773int
2774ixgbe_rxfill(struct rx_ring *rxr)
2775{
2776 struct ix_softc *sc = rxr->sc;
2777 int post = 0;
2778 u_int slots;
2779 int i;
2780
2781 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize
), (0x08))
2782 0, rxr->rxdma.dma_map->dm_mapsize,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize
), (0x08))
2783 BUS_DMASYNC_POSTWRITE)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize
), (0x08))
;
2784
2785 i = rxr->last_desc_filled;
2786 for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc);
2787 slots > 0; slots--) {
2788 if (++i == sc->num_rx_desc)
2789 i = 0;
2790
2791 if (ixgbe_get_buf(rxr, i) != 0)
2792 break;
2793
2794 rxr->last_desc_filled = i;
2795 post = 1;
2796 }
2797
2798 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize
), (0x04))
2799 0, rxr->rxdma.dma_map->dm_mapsize,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize
), (0x04))
2800 BUS_DMASYNC_PREWRITE)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize
), (0x04))
;
2801
2802 if_rxr_put(&rxr->rx_ring, slots)do { (&rxr->rx_ring)->rxr_alive -= (slots); } while
(0)
;
2803
2804 return (post);
2805}
2806
2807void
2808ixgbe_rxrefill(void *xrxr)
2809{
2810 struct rx_ring *rxr = xrxr;
2811 struct ix_softc *sc = rxr->sc;
2812
2813 if (ixgbe_rxfill(rxr)) {
2814 /* Advance the Rx Queue "Tail Pointer" */
2815 IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(rxr->me),((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((((rxr->me) < 64) ? (0x01018 + ((rxr->
me) * 0x40)) : (0x0D018 + (((rxr->me) - 64) * 0x40)))), (rxr
->last_desc_filled)))
2816 rxr->last_desc_filled)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((((rxr->me) < 64) ? (0x01018 + ((rxr->
me) * 0x40)) : (0x0D018 + (((rxr->me) - 64) * 0x40)))), (rxr
->last_desc_filled)))
;
2817 } else if (if_rxr_inuse(&rxr->rx_ring)((&rxr->rx_ring)->rxr_alive) == 0)
2818 timeout_add(&rxr->rx_refill, 1);
2819
2820}
2821
2822/*********************************************************************
2823 *
2824 * Initialize all receive rings.
2825 *
2826 **********************************************************************/
2827int
2828ixgbe_setup_receive_structures(struct ix_softc *sc)
2829{
2830 struct rx_ring *rxr = sc->rx_rings;
2831 int i;
2832
2833 for (i = 0; i < sc->num_queues; i++, rxr++)
2834 if (ixgbe_setup_receive_ring(rxr))
2835 goto fail;
2836
2837 return (0);
2838fail:
2839 ixgbe_free_receive_structures(sc);
2840 return (ENOBUFS55);
2841}
2842
2843/*********************************************************************
2844 *
2845 * Setup receive registers and features.
2846 *
2847 **********************************************************************/
2848#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT2 2
2849
2850void
2851ixgbe_initialize_receive_units(struct ix_softc *sc)
2852{
2853 struct rx_ring *rxr = sc->rx_rings;
2854 struct ixgbe_hw *hw = &sc->hw;
2855 uint32_t bufsz, fctrl, srrctl, rxcsum;
2856 uint32_t hlreg;
2857 int i;
2858
2859 /*
2860 * Make sure receives are disabled while
2861 * setting up the descriptor ring
2862 */
2863 ixgbe_disable_rx(hw);
2864
2865 /* Enable broadcasts */
2866 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x05080
)))
;
2867 fctrl |= IXGBE_FCTRL_BAM0x00000400;
2868 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2869 fctrl |= IXGBE_FCTRL_DPF0x00002000;
2870 fctrl |= IXGBE_FCTRL_PMCF0x00001000;
2871 }
2872 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x05080
), (fctrl)))
;
2873
2874 /* Always enable jumbo frame reception */
2875 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x04240
)))
;
2876 hlreg |= IXGBE_HLREG0_JUMBOEN0x00000004;
2877 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x04240
), (hlreg)))
;
2878
2879 bufsz = (sc->rx_mbuf_sz - ETHER_ALIGN2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT10;
2880
2881 for (i = 0; i < sc->num_queues; i++, rxr++) {
2882 uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
2883
2884 /* Setup the Base and Length of the Rx Descriptor Ring */
2885 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((i) <
64) ? (0x01000 + ((i) * 0x40)) : (0x0D000 + (((i) - 64) * 0x40
)))), ((rdba & 0x00000000ffffffffULL))))
2886 (rdba & 0x00000000ffffffffULL))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((i) <
64) ? (0x01000 + ((i) * 0x40)) : (0x0D000 + (((i) - 64) * 0x40
)))), ((rdba & 0x00000000ffffffffULL))))
;
2887 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((i) <
64) ? (0x01004 + ((i) * 0x40)) : (0x0D004 + (((i) - 64) * 0x40
)))), ((rdba >> 32))))
;
2888 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((i) <
64) ? (0x01008 + ((i) * 0x40)) : (0x0D008 + (((i) - 64) * 0x40
)))), (sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)))
)
2889 sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((i) <
64) ? (0x01008 + ((i) * 0x40)) : (0x0D008 + (((i) - 64) * 0x40
)))), (sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)))
)
;
2890
2891 /* Set up the SRRCTL register */
2892 srrctl = bufsz | IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF0x02000000;
2893 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((i) <=
15) ? (0x02100 + ((i) * 4)) : (((i) < 64) ? (0x01014 + ((
i) * 0x40)) : (0x0D014 + (((i) - 64) * 0x40))))), (srrctl)))
;
2894
2895 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2896 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((i) <
64) ? (0x01010 + ((i) * 0x40)) : (0x0D010 + (((i) - 64) * 0x40
)))), (0)))
;
2897 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((i) <
64) ? (0x01018 + ((i) * 0x40)) : (0x0D018 + (((i) - 64) * 0x40
)))), (0)))
;
2898 }
2899
2900 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2901 uint32_t psrtype = IXGBE_PSRTYPE_TCPHDR0x00000010 |
2902 IXGBE_PSRTYPE_UDPHDR0x00000020 |
2903 IXGBE_PSRTYPE_IPV4HDR0x00000100 |
2904 IXGBE_PSRTYPE_IPV6HDR0x00000200;
2905 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((((0) <=
15) ? (0x05480 + ((0) * 4)) : (0x0EA00 + ((0) * 4)))), (psrtype
)))
;
2906 }
2907
2908 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x05000
)))
;
2909 rxcsum &= ~IXGBE_RXCSUM_PCSD0x00002000;
2910
2911 ixgbe_initialize_rss_mapping(sc);
2912
2913 /* Setup RSS */
2914 if (sc->num_queues > 1) {
2915 /* RSS and RX IPP Checksum are mutually exclusive */
2916 rxcsum |= IXGBE_RXCSUM_PCSD0x00002000;
2917 }
2918
2919 /* Map QPRC/QPRDC/QPTC on a per queue basis */
2920 ixgbe_map_queue_statistics(sc);
2921
2922 /* This is useful for calculating UDP/IP fragment checksums */
2923 if (!(rxcsum & IXGBE_RXCSUM_PCSD0x00002000))
2924 rxcsum |= IXGBE_RXCSUM_IPPCSE0x00001000;
2925
2926 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x05000
), (rxcsum)))
;
2927}
2928
2929void
2930ixgbe_initialize_rss_mapping(struct ix_softc *sc)
2931{
2932 struct ixgbe_hw *hw = &sc->hw;
2933 uint32_t reta = 0, mrqc, rss_key[10];
2934 int i, j, queue_id, table_size, index_mult;
2935
2936 /* set up random bits */
2937 stoeplitz_to_key(&rss_key, sizeof(rss_key));
2938
2939 /* Set multiplier for RETA setup and table size based on MAC */
2940 index_mult = 0x1;
2941 table_size = 128;
2942 switch (sc->hw.mac.type) {
2943 case ixgbe_mac_82598EB:
2944 index_mult = 0x11;
2945 break;
2946 case ixgbe_mac_X550:
2947 case ixgbe_mac_X550EM_x:
2948 case ixgbe_mac_X550EM_a:
2949 table_size = 512;
2950 break;
2951 default:
2952 break;
2953 }
2954
2955 /* Set up the redirection table */
2956 for (i = 0, j = 0; i < table_size; i++, j++) {
2957 if (j == sc->num_queues) j = 0;
2958 queue_id = (j * index_mult);
2959 /*
2960 * The low 8 bits are for hash value (n+0);
2961 * The next 8 bits are for hash value (n+1), etc.
2962 */
2963 reta = reta >> 8;
2964 reta = reta | ( ((uint32_t) queue_id) << 24);
2965 if ((i & 3) == 3) {
2966 if (i < 128)
2967 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x05C00
+ ((i >> 2) * 4))), (reta)))
;
2968 else
2969 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x0EE80
+ (((i >> 2) - 32) * 4))), (reta)))
2970 reta)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x0EE80
+ (((i >> 2) - 32) * 4))), (reta)))
;
2971 reta = 0;
2972 }
2973 }
2974
2975 /* Now fill our hash function seeds */
2976 for (i = 0; i < 10; i++)
2977 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i])((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x05C80
+ ((i) * 4))), (rss_key[i])))
;
2978
2979 /*
2980 * Disable UDP - IP fragments aren't currently being handled
2981 * and so we end up with a mix of 2-tuple and 4-tuple
2982 * traffic.
2983 */
2984 mrqc = IXGBE_MRQC_RSSEN0x00000001
2985 | IXGBE_MRQC_RSS_FIELD_IPV40x00020000
2986 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP0x00010000
2987 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP0x00040000
2988 | IXGBE_MRQC_RSS_FIELD_IPV6_EX0x00080000
2989 | IXGBE_MRQC_RSS_FIELD_IPV60x00100000
2990 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP0x00200000
2991 ;
2992 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x05818
), (mrqc)))
;
2993}
2994
2995/*********************************************************************
2996 *
2997 * Free all receive rings.
2998 *
2999 **********************************************************************/
3000void
3001ixgbe_free_receive_structures(struct ix_softc *sc)
3002{
3003 struct rx_ring *rxr;
3004 int i;
3005
3006 for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++)
3007 if_rxr_init(&rxr->rx_ring, 0, 0);
3008
3009 for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++)
3010 ixgbe_free_receive_buffers(rxr);
3011}
3012
3013/*********************************************************************
3014 *
3015 * Free receive ring data structures
3016 *
3017 **********************************************************************/
3018void
3019ixgbe_free_receive_buffers(struct rx_ring *rxr)
3020{
3021 struct ix_softc *sc;
3022 struct ixgbe_rx_buf *rxbuf;
3023 int i;
3024
3025 sc = rxr->sc;
3026 if (rxr->rx_buffers != NULL((void *)0)) {
3027 for (i = 0; i < sc->num_rx_desc; i++) {
3028 rxbuf = &rxr->rx_buffers[i];
3029 if (rxbuf->buf != NULL((void *)0)) {
3030 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x02
))
3031 0, rxbuf->map->dm_mapsize,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x02
))
3032 BUS_DMASYNC_POSTREAD)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x02
))
;
3033 bus_dmamap_unload(rxr->rxdma.dma_tag,(*(rxr->rxdma.dma_tag)->_dmamap_unload)((rxr->rxdma.
dma_tag), (rxbuf->map))
3034 rxbuf->map)(*(rxr->rxdma.dma_tag)->_dmamap_unload)((rxr->rxdma.
dma_tag), (rxbuf->map))
;
3035 m_freem(rxbuf->buf);
3036 rxbuf->buf = NULL((void *)0);
3037 }
3038 bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map)(*(rxr->rxdma.dma_tag)->_dmamap_destroy)((rxr->rxdma
.dma_tag), (rxbuf->map))
;
3039 rxbuf->map = NULL((void *)0);
3040 }
3041 free(rxr->rx_buffers, M_DEVBUF2,
3042 sc->num_rx_desc * sizeof(struct ixgbe_rx_buf));
3043 rxr->rx_buffers = NULL((void *)0);
3044 }
3045}
3046
3047/*********************************************************************
3048 *
3049 * This routine executes in interrupt context. It replenishes
3050 * the mbufs in the descriptor and sends data which has been
3051 * dma'ed into host memory to upper layer.
3052 *
3053 *********************************************************************/
3054int
3055ixgbe_rxeof(struct rx_ring *rxr)
3056{
3057 struct ix_softc *sc = rxr->sc;
3058 struct ifnet *ifp = &sc->arpcom.ac_if;
3059 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
3060 struct mbuf *mp, *sendmp;
3061 uint8_t eop = 0;
3062 uint16_t len, vtag;
3063 uint32_t staterr = 0, ptype;
3064 struct ixgbe_rx_buf *rxbuf, *nxbuf;
3065 union ixgbe_adv_rx_desc *rxdesc;
3066 size_t dsize = sizeof(union ixgbe_adv_rx_desc);
3067 int i, nextp;
3068
3069 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
3070 return FALSE0;
3071
3072 i = rxr->next_to_check;
3073 while (if_rxr_inuse(&rxr->rx_ring)((&rxr->rx_ring)->rxr_alive) > 0) {
3074 uint32_t hash;
3075 uint16_t hashtype;
3076
3077 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (dsize * i), (dsize), (0x02))
3078 dsize * i, dsize, BUS_DMASYNC_POSTREAD)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (dsize * i), (dsize), (0x02))
;
3079
3080 rxdesc = &rxr->rx_base[i];
3081 staterr = letoh32(rxdesc->wb.upper.status_error)((__uint32_t)(rxdesc->wb.upper.status_error));
3082 if (!ISSET(staterr, IXGBE_RXD_STAT_DD)((staterr) & (0x01))) {
3083 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (dsize * i), (dsize), (0x01))
3084 dsize * i, dsize,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (dsize * i), (dsize), (0x01))
3085 BUS_DMASYNC_PREREAD)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (dsize * i), (dsize), (0x01))
;
3086 break;
3087 }
3088
3089 /* Zero out the receive descriptors status */
3090 rxdesc->wb.upper.status_error = 0;
3091 rxbuf = &rxr->rx_buffers[i];
3092
3093 /* pull the mbuf off the ring */
3094 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x02
))
3095 rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x02
))
;
3096 bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map)(*(rxr->rxdma.dma_tag)->_dmamap_unload)((rxr->rxdma.
dma_tag), (rxbuf->map))
;
3097
3098 mp = rxbuf->buf;
3099 len = letoh16(rxdesc->wb.upper.length)((__uint16_t)(rxdesc->wb.upper.length));
3100 ptype = letoh32(rxdesc->wb.lower.lo_dword.data)((__uint32_t)(rxdesc->wb.lower.lo_dword.data)) &
3101 IXGBE_RXDADV_PKTTYPE_MASK0x0000FFF0;
3102 vtag = letoh16(rxdesc->wb.upper.vlan)((__uint16_t)(rxdesc->wb.upper.vlan));
3103 eop = ((staterr & IXGBE_RXD_STAT_EOP0x02) != 0);
3104 hash = lemtoh32(&rxdesc->wb.lower.hi_dword.rss)((__uint32_t)(*(__uint32_t *)(&rxdesc->wb.lower.hi_dword
.rss)))
;
3105 hashtype =
3106 lemtoh16(&rxdesc->wb.lower.lo_dword.hs_rss.pkt_info)((__uint16_t)(*(__uint16_t *)(&rxdesc->wb.lower.lo_dword
.hs_rss.pkt_info)))
&
3107 IXGBE_RXDADV_RSSTYPE_MASK0x0000000F;
3108
3109 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK( 0x01000000 | 0x02000000 | 0x08000000 | 0x10000000 | 0x20000000
)
) {
3110 if (rxbuf->fmp) {
3111 m_freem(rxbuf->fmp);
3112 rxbuf->fmp = NULL((void *)0);
3113 }
3114
3115 m_freem(mp);
3116 rxbuf->buf = NULL((void *)0);
3117 goto next_desc;
3118 }
3119
3120 if (mp == NULL((void *)0)) {
3121 panic("%s: ixgbe_rxeof: NULL mbuf in slot %d "
3122 "(nrx %d, filled %d)", sc->dev.dv_xname,
3123 i, if_rxr_inuse(&rxr->rx_ring)((&rxr->rx_ring)->rxr_alive),
3124 rxr->last_desc_filled);
3125 }
3126
3127 /* Currently no HW RSC support of 82599 */
3128 if (!eop) {
3129 /*
3130 * Figure out the next descriptor of this frame.
3131 */
3132 nextp = i + 1;
3133 if (nextp == sc->num_rx_desc)
3134 nextp = 0;
3135 nxbuf = &rxr->rx_buffers[nextp];
3136 /* prefetch(nxbuf); */
3137 }
3138
3139 /*
3140 * Rather than using the fmp/lmp global pointers
3141 * we now keep the head of a packet chain in the
3142 * buffer struct and pass this along from one
3143 * descriptor to the next, until we get EOP.
3144 */
3145 mp->m_lenm_hdr.mh_len = len;
3146 /*
3147 * See if there is a stored head
3148 * that determines what we are
3149 */
3150 sendmp = rxbuf->fmp;
3151 rxbuf->buf = rxbuf->fmp = NULL((void *)0);
3152
3153 if (sendmp != NULL((void *)0)) /* secondary frag */
3154 sendmp->m_pkthdrM_dat.MH.MH_pkthdr.len += mp->m_lenm_hdr.mh_len;
3155 else {
3156 /* first desc of a non-ps chain */
3157 sendmp = mp;
3158 sendmp->m_pkthdrM_dat.MH.MH_pkthdr.len = mp->m_lenm_hdr.mh_len;
3159#if NVLAN1 > 0
3160 if (staterr & IXGBE_RXD_STAT_VP0x08) {
3161 sendmp->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag = vtag;
3162 sendmp->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020;
3163 }
3164#endif
3165 }
3166
3167 /* Pass the head pointer on */
3168 if (eop == 0) {
3169 nxbuf->fmp = sendmp;
3170 sendmp = NULL((void *)0);
3171 mp->m_nextm_hdr.mh_next = nxbuf->buf;
3172 } else { /* Sending this frame? */
3173 ixgbe_rx_checksum(staterr, sendmp, ptype);
3174
3175 if (hashtype != IXGBE_RXDADV_RSSTYPE_NONE0x00000000) {
3176 sendmp->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid = hash;
3177 SET(sendmp->m_pkthdr.csum_flags, M_FLOWID)((sendmp->M_dat.MH.MH_pkthdr.csum_flags) |= (0x4000));
3178 }
3179
3180 ml_enqueue(&ml, sendmp);
3181 }
3182next_desc:
3183 if_rxr_put(&rxr->rx_ring, 1)do { (&rxr->rx_ring)->rxr_alive -= (1); } while (0);
3184 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (dsize * i), (dsize), (0x01))
3185 dsize * i, dsize,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (dsize * i), (dsize), (0x01))
3186 BUS_DMASYNC_PREREAD)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (dsize * i), (dsize), (0x01))
;
3187
3188 /* Advance our pointers to the next descriptor. */
3189 if (++i == sc->num_rx_desc)
3190 i = 0;
3191 }
3192 rxr->next_to_check = i;
3193
3194 if (ifiq_input(rxr->ifiq, &ml))
3195 if_rxr_livelocked(&rxr->rx_ring);
3196
3197 if (!(staterr & IXGBE_RXD_STAT_DD0x01))
3198 return FALSE0;
3199
3200 return TRUE1;
3201}
3202
3203/*********************************************************************
3204 *
3205 * Verify that the hardware indicated that the checksum is valid.
3206 * Inform the stack about the status of checksum so that stack
3207 * doesn't spend time verifying the checksum.
3208 *
3209 *********************************************************************/
3210void
3211ixgbe_rx_checksum(uint32_t staterr, struct mbuf * mp, uint32_t ptype)
3212{
3213 uint16_t status = (uint16_t) staterr;
3214 uint8_t errors = (uint8_t) (staterr >> 24);
3215
3216 if (status & IXGBE_RXD_STAT_IPCS0x40) {
3217 if (!(errors & IXGBE_RXD_ERR_IPE0x80)) {
3218 /* IP Checksum Good */
3219 mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK0x0008;
3220 } else
3221 mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags = 0;
3222 }
3223 if (status & IXGBE_RXD_STAT_L4CS0x20) {
3224 if (!(errors & IXGBE_RXD_ERR_TCPE0x40))
3225 mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |=
3226 M_TCP_CSUM_IN_OK0x0020 | M_UDP_CSUM_IN_OK0x0080;
3227 }
3228}
3229
3230void
3231ixgbe_setup_vlan_hw_support(struct ix_softc *sc)
3232{
3233 uint32_t ctrl;
3234 int i;
3235
3236 /*
3237 * A soft reset zero's out the VFTA, so
3238 * we need to repopulate it now.
3239 */
3240 for (i = 0; i < IXGBE_VFTA_SIZE128; i++) {
3241 if (sc->shadow_vfta[i] != 0)
3242 IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTA(i),((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x0A000 + ((i) * 4))), (sc->shadow_vfta[
i])))
3243 sc->shadow_vfta[i])((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x0A000 + ((i) * 4))), (sc->shadow_vfta[
i])))
;
3244 }
3245
3246 ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_VLNCTRL)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x05088)))
;
3247#if 0
3248 /* Enable the Filter Table if enabled */
3249 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3250 ctrl &= ~IXGBE_VLNCTRL_CFIEN0x20000000;
3251 ctrl |= IXGBE_VLNCTRL_VFE0x40000000;
3252 }
3253#endif
3254 if (sc->hw.mac.type == ixgbe_mac_82598EB)
3255 ctrl |= IXGBE_VLNCTRL_VME0x80000000;
3256 IXGBE_WRITE_REG(&sc->hw, IXGBE_VLNCTRL, ctrl)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x05088), (ctrl)))
;
3257
3258 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3259 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
3260 for (i = 0; i < sc->num_queues; i++) {
3261 ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i))((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((((i) < 64) ? (0x01028 + ((i) * 0x40)) : (
0x0D028 + (((i) - 64) * 0x40))))))
;
3262 ctrl |= IXGBE_RXDCTL_VME0x40000000;
3263 IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), ctrl)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((((i) < 64) ? (0x01028 + ((i) * 0x40)) : (
0x0D028 + (((i) - 64) * 0x40)))), (ctrl)))
;
3264 }
3265 }
3266}
3267
3268void
3269ixgbe_enable_intr(struct ix_softc *sc)
3270{
3271 struct ixgbe_hw *hw = &sc->hw;
3272 uint32_t mask, fwsm;
3273
3274 mask = (IXGBE_EIMS_ENABLE_MASK( 0x0000FFFF | 0x00100000 | 0x40000000 | 0x80000000) & ~IXGBE_EIMS_RTX_QUEUE0x0000FFFF);
3275 /* Enable Fan Failure detection */
3276 if (hw->device_id == IXGBE_DEV_ID_82598AT0x10C8)
3277 mask |= IXGBE_EIMS_GPI_SDP10x02000000;
3278
3279 switch (sc->hw.mac.type) {
3280 case ixgbe_mac_82599EB:
3281 mask |= IXGBE_EIMS_ECC0x10000000;
3282 /* Temperature sensor on some adapters */
3283 mask |= IXGBE_EIMS_GPI_SDP00x01000000;
3284 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3285 mask |= IXGBE_EIMS_GPI_SDP10x02000000;
3286 mask |= IXGBE_EIMS_GPI_SDP20x04000000;
3287 break;
3288 case ixgbe_mac_X540:
3289 mask |= IXGBE_EIMS_ECC0x10000000;
3290 /* Detect if Thermal Sensor is enabled */
3291 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x10148
)))
;
3292 if (fwsm & IXGBE_FWSM_TS_ENABLED0x1)
3293 mask |= IXGBE_EIMS_TS0x00800000;
3294 break;
3295 case ixgbe_mac_X550:
3296 case ixgbe_mac_X550EM_x:
3297 case ixgbe_mac_X550EM_a:
3298 mask |= IXGBE_EIMS_ECC0x10000000;
3299 /* MAC thermal sensor is automatically enabled */
3300 mask |= IXGBE_EIMS_TS0x00800000;
3301 /* Some devices use SDP0 for important information */
3302 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP0x15AC ||
3303 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T0x15AD)
3304 mask |= IXGBE_EIMS_GPI_SDP0_X5400x02000000;
3305 default:
3306 break;
3307 }
3308
3309 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00880
), (mask)))
;
3310
3311 /* With MSI-X we use auto clear */
3312 if (sc->sc_intrmap) {
3313 mask = IXGBE_EIMS_ENABLE_MASK( 0x0000FFFF | 0x00100000 | 0x40000000 | 0x80000000);
3314 /* Don't autoclear Link */
3315 mask &= ~IXGBE_EIMS_OTHER0x80000000;
3316 mask &= ~IXGBE_EIMS_LSC0x00100000;
3317 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00810
), (mask)))
;
3318 }
3319
3320 IXGBE_WRITE_FLUSH(hw)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00008
)))
;
3321}
3322
3323void
3324ixgbe_disable_intr(struct ix_softc *sc)
3325{
3326 if (sc->sc_intrmap)
3327 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00810), (0)))
;
3328 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3329 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00888), (~0)))
;
3330 } else {
3331 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00888), (0xFFFF0000)))
;
3332 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x00AB0 + (0) * 4)), (~0)))
;
3333 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x00AB0 + (1) * 4)), (~0)))
;
3334 }
3335 IXGBE_WRITE_FLUSH(&sc->hw)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00008)))
;
3336}
3337
3338uint16_t
3339ixgbe_read_pci_cfg(struct ixgbe_hw *hw, uint32_t reg)
3340{
3341 struct pci_attach_args *pa;
3342 uint32_t value;
3343 int high = 0;
3344
3345 if (reg & 0x2) {
3346 high = 1;
3347 reg &= ~0x2;
3348 }
3349 pa = &((struct ixgbe_osdep *)hw->back)->os_pa;
3350 value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3351
3352 if (high)
3353 value >>= 16;
3354
3355 return (value & 0xffff);
3356}
3357
3358void
3359ixgbe_write_pci_cfg(struct ixgbe_hw *hw, uint32_t reg, uint16_t value)
3360{
3361 struct pci_attach_args *pa;
3362 uint32_t rv;
3363 int high = 0;
3364
3365 /* Need to do read/mask/write... because 16 vs 32 bit!!! */
3366 if (reg & 0x2) {
3367 high = 1;
3368 reg &= ~0x2;
3369 }
3370 pa = &((struct ixgbe_osdep *)hw->back)->os_pa;
3371 rv = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3372 if (!high)
3373 rv = (rv & 0xffff0000) | value;
3374 else
3375 rv = (rv & 0xffff) | ((uint32_t)value << 16);
3376 pci_conf_write(pa->pa_pc, pa->pa_tag, reg, rv);
3377}
3378
3379/*
3380 * Setup the correct IVAR register for a particular MSIX interrupt
3381 * (yes this is all very magic and confusing :)
3382 * - entry is the register array entry
3383 * - vector is the MSIX vector for this queue
3384 * - type is RX/TX/MISC
3385 */
3386void
3387ixgbe_set_ivar(struct ix_softc *sc, uint8_t entry, uint8_t vector, int8_t type)
3388{
3389 struct ixgbe_hw *hw = &sc->hw;
3390 uint32_t ivar, index;
3391
3392 vector |= IXGBE_IVAR_ALLOC_VAL0x80;
3393
3394 switch (hw->mac.type) {
3395
3396 case ixgbe_mac_82598EB:
3397 if (type == -1)
3398 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX97;
3399 else
3400 entry += (type * 64);
3401 index = (entry >> 2) & 0x1F;
3402 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x00900
+ ((index) * 4)))))
;
3403 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3404 ivar |= (vector << (8 * (entry & 0x3)));
3405 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x00900 + ((index) * 4))), (ivar)))
;
3406 break;
3407
3408 case ixgbe_mac_82599EB:
3409 case ixgbe_mac_X540:
3410 case ixgbe_mac_X550:
3411 case ixgbe_mac_X550EM_x:
3412 case ixgbe_mac_X550EM_a:
3413 if (type == -1) { /* MISC IVAR */
3414 index = (entry & 1) * 8;
3415 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00A00
)))
;
3416 ivar &= ~(0xFF << index);
3417 ivar |= (vector << index);
3418 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x00A00
), (ivar)))
;
3419 } else { /* RX/TX IVARS */
3420 index = (16 * (entry & 1)) + (8 * type);
3421 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x00900
+ ((entry >> 1) * 4)))))
;
3422 ivar &= ~(0xFF << index);
3423 ivar |= (vector << index);
3424 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x00900
+ ((entry >> 1) * 4))), (ivar)))
;
3425 }
3426
3427 default:
3428 break;
3429 }
3430}
3431
3432void
3433ixgbe_configure_ivars(struct ix_softc *sc)
3434{
3435 struct ix_queue *que = sc->queues;
3436 uint32_t newitr;
3437 int i;
3438
3439 newitr = (4000000 / IXGBE_INTS_PER_SEC8000) & 0x0FF8;
3440
3441 for (i = 0; i < sc->num_queues; i++, que++) {
3442 /* First the RX queue entry */
3443 ixgbe_set_ivar(sc, i, que->msix, 0);
3444 /* ... and the TX */
3445 ixgbe_set_ivar(sc, i, que->msix, 1);
3446 /* Set an Initial EITR value */
3447 IXGBE_WRITE_REG(&sc->hw,((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((((que->msix) <= 23) ? (0x00820 + ((que
->msix) * 4)) : (0x012300 + (((que->msix) - 24) * 4))))
, (newitr)))
3448 IXGBE_EITR(que->msix), newitr)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((((que->msix) <= 23) ? (0x00820 + ((que
->msix) * 4)) : (0x012300 + (((que->msix) - 24) * 4))))
, (newitr)))
;
3449 }
3450
3451 /* For the Link interrupt */
3452 ixgbe_set_ivar(sc, 1, sc->linkvec, -1);
3453}
3454
3455/*
3456 * SFP module interrupts handler
3457 */
3458void
3459ixgbe_handle_mod(struct ix_softc *sc)
3460{
3461 struct ixgbe_hw *hw = &sc->hw;
3462 uint32_t err;
3463
3464 err = hw->phy.ops.identify_sfp(hw);
3465 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED-19) {
3466 printf("%s: Unsupported SFP+ module type was detected!\n",
3467 sc->dev.dv_xname);
3468 return;
3469 }
3470 err = hw->mac.ops.setup_sfp(hw);
3471 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED-19) {
3472 printf("%s: Setup failure - unsupported SFP+ module type!\n",
3473 sc->dev.dv_xname);
3474 return;
3475 }
3476
3477 ixgbe_handle_msf(sc);
3478}
3479
3480
3481/*
3482 * MSF (multispeed fiber) interrupts handler
3483 */
3484void
3485ixgbe_handle_msf(struct ix_softc *sc)
3486{
3487 struct ixgbe_hw *hw = &sc->hw;
3488 uint32_t autoneg;
3489 bool_Bool negotiate;
3490
3491 autoneg = hw->phy.autoneg_advertised;
3492 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) {
3493 if (hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate))
3494 return;
3495 }
3496 if (hw->mac.ops.setup_link)
3497 hw->mac.ops.setup_link(hw, autoneg, TRUE1);
3498
3499 ifmedia_delete_instance(&sc->media, IFM_INST_ANY((uint64_t) -1));
3500 ixgbe_add_media_types(sc);
3501 ifmedia_set(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL);
3502}
3503
3504/*
3505 * External PHY interrupts handler
3506 */
3507void
3508ixgbe_handle_phy(struct ix_softc *sc)
3509{
3510 struct ixgbe_hw *hw = &sc->hw;
3511 int error;
3512
3513 error = hw->phy.ops.handle_lasi(hw);
3514 if (error == IXGBE_ERR_OVERTEMP-26)
3515 printf("%s: CRITICAL: EXTERNAL PHY OVER TEMP!! "
3516 " PHY will downshift to lower power state!\n",
3517 sc->dev.dv_xname);
3518 else if (error)
3519 printf("%s: Error handling LASI interrupt: %d\n",
3520 sc->dev.dv_xname, error);
3521
3522}
3523
3524#if NKSTAT0 > 0
3525enum ix_counter_idx {
3526 ix_counter_crcerrs,
3527 ix_counter_lxontxc,
3528 ix_counter_lxonrxc,
3529 ix_counter_lxofftxc,
3530 ix_counter_lxoffrxc,
3531 ix_counter_prc64,
3532 ix_counter_prc127,
3533 ix_counter_prc255,
3534 ix_counter_prc511,
3535 ix_counter_prc1023,
3536 ix_counter_prc1522,
3537 ix_counter_gptc,
3538 ix_counter_gorc,
3539 ix_counter_gotc,
3540 ix_counter_ruc,
3541 ix_counter_rfc,
3542 ix_counter_roc,
3543 ix_counter_rjc,
3544 ix_counter_tor,
3545 ix_counter_tpr,
3546 ix_counter_tpt,
3547 ix_counter_gprc,
3548 ix_counter_bprc,
3549 ix_counter_mprc,
3550 ix_counter_ptc64,
3551 ix_counter_ptc127,
3552 ix_counter_ptc255,
3553 ix_counter_ptc511,
3554 ix_counter_ptc1023,
3555 ix_counter_ptc1522,
3556 ix_counter_mptc,
3557 ix_counter_bptc,
3558
3559 ix_counter_num,
3560};
3561
3562CTASSERT(KSTAT_KV_U_PACKETS <= 0xff)extern char _ctassert[(KSTAT_KV_U_PACKETS <= 0xff) ? 1 : -
1 ] __attribute__((__unused__))
;
3563CTASSERT(KSTAT_KV_U_BYTES <= 0xff)extern char _ctassert[(KSTAT_KV_U_BYTES <= 0xff) ? 1 : -1 ]
__attribute__((__unused__))
;
3564
3565struct ix_counter {
3566 char name[KSTAT_KV_NAMELEN16];
3567 uint32_t reg;
3568 uint8_t width;
3569 uint8_t unit;
3570};
3571
3572static const struct ix_counter ix_counters[ix_counter_num] = {
3573 [ix_counter_crcerrs] = { "crc errs", IXGBE_CRCERRS0x04000, 32,
3574 KSTAT_KV_U_PACKETS },
3575 [ix_counter_lxontxc] = { "tx link xon", IXGBE_LXONTXC0x03F60, 32,
3576 KSTAT_KV_U_PACKETS },
3577 [ix_counter_lxonrxc] = { "rx link xon", 0, 32,
3578 KSTAT_KV_U_PACKETS },
3579 [ix_counter_lxofftxc] = { "tx link xoff", IXGBE_LXOFFTXC0x03F68, 32,
3580 KSTAT_KV_U_PACKETS },
3581 [ix_counter_lxoffrxc] = { "rx link xoff", 0, 32,
3582 KSTAT_KV_U_PACKETS },
3583 [ix_counter_prc64] = { "rx 64B", IXGBE_PRC640x0405C, 32,
3584 KSTAT_KV_U_PACKETS },
3585 [ix_counter_prc127] = { "rx 65-127B", IXGBE_PRC1270x04060, 32,
3586 KSTAT_KV_U_PACKETS },
3587 [ix_counter_prc255] = { "rx 128-255B", IXGBE_PRC2550x04064, 32,
3588 KSTAT_KV_U_PACKETS },
3589 [ix_counter_prc511] = { "rx 256-511B", IXGBE_PRC5110x04068, 32,
3590 KSTAT_KV_U_PACKETS },
3591 [ix_counter_prc1023] = { "rx 512-1023B", IXGBE_PRC10230x0406C, 32,
3592 KSTAT_KV_U_PACKETS },
3593 [ix_counter_prc1522] = { "rx 1024-maxB", IXGBE_PRC15220x04070, 32,
3594 KSTAT_KV_U_PACKETS },
3595 [ix_counter_gptc] = { "tx good", IXGBE_GPTC0x04080, 32,
3596 KSTAT_KV_U_PACKETS },
3597 [ix_counter_gorc] = { "rx good", IXGBE_GORCL0x04088, 36,
3598 KSTAT_KV_U_BYTES },
3599 [ix_counter_gotc] = { "tx good", IXGBE_GOTCL0x04090, 36,
3600 KSTAT_KV_U_BYTES },
3601 [ix_counter_ruc] = { "rx undersize", IXGBE_RUC0x040A4, 32,
3602 KSTAT_KV_U_PACKETS },
3603 [ix_counter_rfc] = { "rx fragment", IXGBE_RFC0x040A8, 32,
3604 KSTAT_KV_U_PACKETS },
3605 [ix_counter_roc] = { "rx oversize", IXGBE_ROC0x040AC, 32,
3606 KSTAT_KV_U_PACKETS },
3607 [ix_counter_rjc] = { "rx jabber", IXGBE_RJC0x040B0, 32,
3608 KSTAT_KV_U_PACKETS },
3609 [ix_counter_tor] = { "rx total", IXGBE_TORL0x040C0, 36,
3610 KSTAT_KV_U_BYTES },
3611 [ix_counter_tpr] = { "rx total", IXGBE_TPR0x040D0, 32,
3612 KSTAT_KV_U_PACKETS },
3613 [ix_counter_tpt] = { "tx total", IXGBE_TPT0x040D4, 32,
3614 KSTAT_KV_U_PACKETS },
3615 [ix_counter_gprc] = { "rx good", IXGBE_GPRC0x04074, 32,
3616 KSTAT_KV_U_PACKETS },
3617 [ix_counter_bprc] = { "rx bcast", IXGBE_BPRC0x04078, 32,
3618 KSTAT_KV_U_PACKETS },
3619 [ix_counter_mprc] = { "rx mcast", IXGBE_MPRC0x0407C, 32,
3620 KSTAT_KV_U_PACKETS },
3621 [ix_counter_ptc64] = { "tx 64B", IXGBE_PTC640x040D8, 32,
3622 KSTAT_KV_U_PACKETS },
3623 [ix_counter_ptc127] = { "tx 65-127B", IXGBE_PTC1270x040DC, 32,
3624 KSTAT_KV_U_PACKETS },
3625 [ix_counter_ptc255] = { "tx 128-255B", IXGBE_PTC2550x040E0, 32,
3626 KSTAT_KV_U_PACKETS },
3627 [ix_counter_ptc511] = { "tx 256-511B", IXGBE_PTC5110x040E4, 32,
3628 KSTAT_KV_U_PACKETS },
3629 [ix_counter_ptc1023] = { "tx 512-1023B", IXGBE_PTC10230x040E8, 32,
3630 KSTAT_KV_U_PACKETS },
3631 [ix_counter_ptc1522] = { "tx 1024-maxB", IXGBE_PTC15220x040EC, 32,
3632 KSTAT_KV_U_PACKETS },
3633 [ix_counter_mptc] = { "tx mcast", IXGBE_MPTC0x040F0, 32,
3634 KSTAT_KV_U_PACKETS },
3635 [ix_counter_bptc] = { "tx bcast", IXGBE_BPTC0x040F4, 32,
3636 KSTAT_KV_U_PACKETS },
3637};
3638
3639struct ix_rxq_kstats {
3640 struct kstat_kv qprc;
3641 struct kstat_kv qbrc;
3642 struct kstat_kv qprdc;
3643};
3644
3645static const struct ix_rxq_kstats ix_rxq_kstats_tpl = {
3646 KSTAT_KV_UNIT_INITIALIZER("packets",{ .kv_key = ("packets"), .kv_type = (KSTAT_KV_T_COUNTER64), .
kv_unit = (KSTAT_KV_U_PACKETS), }
3647 KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS){ .kv_key = ("packets"), .kv_type = (KSTAT_KV_T_COUNTER64), .
kv_unit = (KSTAT_KV_U_PACKETS), }
,
3648 KSTAT_KV_UNIT_INITIALIZER("bytes",{ .kv_key = ("bytes"), .kv_type = (KSTAT_KV_T_COUNTER64), .kv_unit
= (KSTAT_KV_U_BYTES), }
3649 KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES){ .kv_key = ("bytes"), .kv_type = (KSTAT_KV_T_COUNTER64), .kv_unit
= (KSTAT_KV_U_BYTES), }
,
3650 KSTAT_KV_UNIT_INITIALIZER("qdrops",{ .kv_key = ("qdrops"), .kv_type = (KSTAT_KV_T_COUNTER64), .kv_unit
= (KSTAT_KV_U_PACKETS), }
3651 KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS){ .kv_key = ("qdrops"), .kv_type = (KSTAT_KV_T_COUNTER64), .kv_unit
= (KSTAT_KV_U_PACKETS), }
,
3652};
3653
3654struct ix_txq_kstats {
3655 struct kstat_kv qptc;
3656 struct kstat_kv qbtc;
3657};
3658
3659static const struct ix_txq_kstats ix_txq_kstats_tpl = {
3660 KSTAT_KV_UNIT_INITIALIZER("packets",{ .kv_key = ("packets"), .kv_type = (KSTAT_KV_T_COUNTER64), .
kv_unit = (KSTAT_KV_U_PACKETS), }
3661 KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS){ .kv_key = ("packets"), .kv_type = (KSTAT_KV_T_COUNTER64), .
kv_unit = (KSTAT_KV_U_PACKETS), }
,
3662 KSTAT_KV_UNIT_INITIALIZER("bytes",{ .kv_key = ("bytes"), .kv_type = (KSTAT_KV_T_COUNTER64), .kv_unit
= (KSTAT_KV_U_BYTES), }
3663 KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES){ .kv_key = ("bytes"), .kv_type = (KSTAT_KV_T_COUNTER64), .kv_unit
= (KSTAT_KV_U_BYTES), }
,
3664};
3665
3666static int ix_kstats_read(struct kstat *ks);
3667static int ix_rxq_kstats_read(struct kstat *ks);
3668static int ix_txq_kstats_read(struct kstat *ks);
3669
3670static void
3671ix_kstats(struct ix_softc *sc)
3672{
3673 struct kstat *ks;
3674 struct kstat_kv *kvs;
3675 unsigned int i;
3676
3677 mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_kstat_mtx), ((((0x4)) > 0x0 && ((0x4)) < 0x9
) ? 0x9 : ((0x4)))); } while (0)
;
3678 timeout_set(&sc->sc_kstat_tmo, ix_kstats_tick, sc);
3679
3680 ks = kstat_create(sc->dev.dv_xname, 0, "ix-stats", 0,
3681 KSTAT_T_KV1, 0);
3682 if (ks == NULL((void *)0))
3683 return;
3684
3685 kvs = mallocarray(nitems(ix_counters)(sizeof((ix_counters)) / sizeof((ix_counters)[0])), sizeof(*kvs),
3686 M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
3687
3688 for (i = 0; i < nitems(ix_counters)(sizeof((ix_counters)) / sizeof((ix_counters)[0])); i++) {
3689 const struct ix_counter *ixc = &ix_counters[i];
3690
3691 kstat_kv_unit_init(&kvs[i], ixc->name,
3692 KSTAT_KV_T_COUNTER64, ixc->unit);
3693 }
3694
3695 kstat_set_mutex(ks, &sc->sc_kstat_mtx);
3696 ks->ks_softc = sc;
3697 ks->ks_data = kvs;
3698 ks->ks_datalen = nitems(ix_counters)(sizeof((ix_counters)) / sizeof((ix_counters)[0])) * sizeof(*kvs);
3699 ks->ks_read = ix_kstats_read;
3700
3701 sc->sc_kstat = ks;
3702 kstat_install(ks);
3703}
3704
3705static void
3706ix_rxq_kstats(struct ix_softc *sc, struct rx_ring *rxr)
3707{
3708 struct ix_rxq_kstats *stats;
3709 struct kstat *ks;
3710
3711 ks = kstat_create(sc->dev.dv_xname, 0, "ix-rxq", rxr->me,
3712 KSTAT_T_KV1, 0);
3713 if (ks == NULL((void *)0))
3714 return;
3715
3716 stats = malloc(sizeof(*stats), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
3717 *stats = ix_rxq_kstats_tpl;
3718
3719 kstat_set_mutex(ks, &sc->sc_kstat_mtx);
3720 ks->ks_softc = rxr;
3721 ks->ks_data = stats;
3722 ks->ks_datalen = sizeof(*stats);
3723 ks->ks_read = ix_rxq_kstats_read;
3724
3725 rxr->kstat = ks;
3726 kstat_install(ks);
3727}
3728
3729static void
3730ix_txq_kstats(struct ix_softc *sc, struct tx_ring *txr)
3731{
3732 struct ix_txq_kstats *stats;
3733 struct kstat *ks;
3734
3735 ks = kstat_create(sc->dev.dv_xname, 0, "ix-txq", txr->me,
3736 KSTAT_T_KV1, 0);
3737 if (ks == NULL((void *)0))
3738 return;
3739
3740 stats = malloc(sizeof(*stats), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
3741 *stats = ix_txq_kstats_tpl;
3742
3743 kstat_set_mutex(ks, &sc->sc_kstat_mtx);
3744 ks->ks_softc = txr;
3745 ks->ks_data = stats;
3746 ks->ks_datalen = sizeof(*stats);
3747 ks->ks_read = ix_txq_kstats_read;
3748
3749 txr->kstat = ks;
3750 kstat_install(ks);
3751}
3752
3753/**********************************************************************
3754 *
3755 * Update the board statistics counters.
3756 *
3757 **********************************************************************/
3758
3759static void
3760ix_kstats_tick(void *arg)
3761{
3762 struct ix_softc *sc = arg;
3763 int i;
3764
3765 timeout_add_sec(&sc->sc_kstat_tmo, 1);
3766
3767 mtx_enter(&sc->sc_kstat_mtx);
3768 ix_kstats_read(sc->sc_kstat);
3769 for (i = 0; i < sc->num_queues; i++) {
3770 ix_rxq_kstats_read(sc->rx_rings[i].kstat);
3771 ix_txq_kstats_read(sc->tx_rings[i].kstat);
3772 }
3773 mtx_leave(&sc->sc_kstat_mtx);
3774}
3775
3776static uint64_t
3777ix_read36(struct ixgbe_hw *hw, bus_size_t loreg, bus_size_t hireg)
3778{
3779 uint64_t lo, hi;
3780
3781 lo = IXGBE_READ_REG(hw, loreg)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (loreg)
))
;
3782 hi = IXGBE_READ_REG(hw, hireg)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (hireg)
))
;
3783
3784 return (((hi & 0xf) << 32) | lo);
3785}
3786
3787static int
3788ix_kstats_read(struct kstat *ks)
3789{
3790 struct ix_softc *sc = ks->ks_softc;
3791 struct kstat_kv *kvs = ks->ks_data;
3792 struct ixgbe_hw *hw = &sc->hw;
3793 unsigned int i;
3794
3795 for (i = 0; i < nitems(ix_counters)(sizeof((ix_counters)) / sizeof((ix_counters)[0])); i++) {
3796 const struct ix_counter *ixc = &ix_counters[i];
3797 uint32_t reg = ixc->reg;
3798 uint64_t v;
3799
3800 if (reg == 0)
3801 continue;
3802
3803 if (ixc->width > 32) {
3804 if (sc->hw.mac.type == ixgbe_mac_82598EB)
3805 v = IXGBE_READ_REG(hw, reg + 4)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (reg + 4
)))
;
3806 else
3807 v = ix_read36(hw, reg, reg + 4);
3808 } else
3809 v = IXGBE_READ_REG(hw, reg)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (reg)))
;
3810
3811 kstat_kv_u64(&kvs[i])(&kvs[i])->kv_v.v_u64 += v;
3812 }
3813
3814 /* handle the exceptions */
3815 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3816 kstat_kv_u64(&kvs[ix_counter_lxonrxc])(&kvs[ix_counter_lxonrxc])->kv_v.v_u64 +=
3817 IXGBE_READ_REG(hw, IXGBE_LXONRXC)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x0CF60
)))
;
3818 kstat_kv_u64(&kvs[ix_counter_lxoffrxc])(&kvs[ix_counter_lxoffrxc])->kv_v.v_u64 +=
3819 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x0CF68
)))
;
3820 } else {
3821 kstat_kv_u64(&kvs[ix_counter_lxonrxc])(&kvs[ix_counter_lxonrxc])->kv_v.v_u64 +=
3822 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x041A4
)))
;
3823 kstat_kv_u64(&kvs[ix_counter_lxoffrxc])(&kvs[ix_counter_lxoffrxc])->kv_v.v_u64 +=
3824 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT)((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), (0x041A8
)))
;
3825 }
3826
3827 getnanouptime(&ks->ks_updated);
3828
3829 return (0);
3830}
3831
3832int
3833ix_rxq_kstats_read(struct kstat *ks)
3834{
3835 struct ix_rxq_kstats *stats = ks->ks_data;
3836 struct rx_ring *rxr = ks->ks_softc;
3837 struct ix_softc *sc = rxr->sc;
3838 struct ixgbe_hw *hw = &sc->hw;
3839 uint32_t i = rxr->me;
3840
3841 kstat_kv_u64(&stats->qprc)(&stats->qprc)->kv_v.v_u64 += IXGBE_READ_REG(hw, IXGBE_QPRC(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x01030
+ ((i) * 0x40)))))
;
3842 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3843 kstat_kv_u64(&stats->qprdc)(&stats->qprdc)->kv_v.v_u64 +=
3844 IXGBE_READ_REG(hw, IXGBE_RNBC(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x03FC0
+ ((i) * 4)))))
;
3845 kstat_kv_u64(&stats->qbrc)(&stats->qbrc)->kv_v.v_u64 +=
3846 IXGBE_READ_REG(hw, IXGBE_QBRC(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x01034
+ ((i) * 0x40)))))
;
3847 } else {
3848 kstat_kv_u64(&stats->qprdc)(&stats->qprdc)->kv_v.v_u64 +=
3849 IXGBE_READ_REG(hw, IXGBE_QPRDC(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x01430
+ ((i) * 0x40)))))
;
3850 kstat_kv_u64(&stats->qbrc)(&stats->qbrc)->kv_v.v_u64 +=
3851 ix_read36(hw, IXGBE_QBRC_L(i)(0x01034 + ((i) * 0x40)), IXGBE_QBRC_H(i)(0x01038 + ((i) * 0x40)));
3852 }
3853
3854 getnanouptime(&ks->ks_updated);
3855
3856 return (0);
3857}
3858
3859int
3860ix_txq_kstats_read(struct kstat *ks)
3861{
3862 struct ix_txq_kstats *stats = ks->ks_data;
3863 struct tx_ring *txr = ks->ks_softc;
3864 struct ix_softc *sc = txr->sc;
3865 struct ixgbe_hw *hw = &sc->hw;
3866 uint32_t i = txr->me;
3867
3868 kstat_kv_u64(&stats->qptc)(&stats->qptc)->kv_v.v_u64 += IXGBE_READ_REG(hw, IXGBE_QPTC(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06030
+ ((i) * 0x40)))))
;
3869 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3870 kstat_kv_u64(&stats->qbtc)(&stats->qbtc)->kv_v.v_u64 +=
3871 IXGBE_READ_REG(hw, IXGBE_QBTC(i))((((struct ixgbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ixgbe_osdep *)(hw)->back)->os_memh), ((0x06034
+ ((i) * 0x40)))))
;
3872 } else {
3873 kstat_kv_u64(&stats->qbtc)(&stats->qbtc)->kv_v.v_u64 +=
3874 ix_read36(hw, IXGBE_QBTC_L(i)(0x08700 + ((i) * 0x8)), IXGBE_QBTC_H(i)(0x08704 + ((i) * 0x8)));
3875 }
3876
3877 getnanouptime(&ks->ks_updated);
3878
3879 return (0);
3880}
3881#endif /* NKVSTAT > 0 */
3882
3883void
3884ixgbe_map_queue_statistics(struct ix_softc *sc)
3885{
3886 int i;
3887 uint32_t r;
3888
3889 for (i = 0; i < 32; i++) {
3890 /*
3891 * Queues 0-15 are mapped 1:1
3892 * Queue 0 -> Counter 0
3893 * Queue 1 -> Counter 1
3894 * Queue 2 -> Counter 2....
3895 * Queues 16-127 are mapped to Counter 0
3896 */
3897 if (i < 4) {
3898 r = (i * 4 + 0);
3899 r |= (i * 4 + 1) << 8;
3900 r |= (i * 4 + 2) << 16;
3901 r |= (i * 4 + 3) << 24;
3902 } else
3903 r = 0;
3904
3905 IXGBE_WRITE_REG(&sc->hw, IXGBE_RQSMR(i), r)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x02300 + ((i) * 4))), (r)))
;
3906 IXGBE_WRITE_REG(&sc->hw, IXGBE_TQSM(i), r)((((struct ixgbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ixgbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x08600 + ((i) * 4))), (r)))
;
3907 }
3908}