Bug Summary

File:dev/pci/if_ngbe.c
Warning:line 2697, column 25
Dereference of null pointer

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name if_ngbe.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/if_ngbe.c
1/* $OpenBSD: if_ngbe.c,v 1.2 2023/11/10 15:51:20 bluhm Exp $ */
2
3/*
4 * Copyright (c) 2015-2017 Beijing WangXun Technology Co., Ltd.
5 * Copyright (c) 2023 Kevin Lo <kevlo@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#include "bpfilter.h"
21#include "vlan.h"
22
23#include <sys/param.h>
24#include <sys/systm.h>
25#include <sys/sockio.h>
26#include <sys/mbuf.h>
27#include <sys/malloc.h>
28#include <sys/kernel.h>
29#include <sys/socket.h>
30#include <sys/device.h>
31#include <sys/endian.h>
32#include <sys/intrmap.h>
33
34#include <net/if.h>
35#include <net/if_media.h>
36#include <net/toeplitz.h>
37
38#include <netinet/in.h>
39#include <netinet/if_ether.h>
40#include <netinet/ip.h>
41#include <netinet/ip6.h>
42
43#if NBPFILTER1 > 0
44#include <net/bpf.h>
45#endif
46
47#include <machine/bus.h>
48#include <machine/intr.h>
49
50#include <dev/mii/mii.h>
51
52#include <dev/pci/pcivar.h>
53#include <dev/pci/pcireg.h>
54#include <dev/pci/pcidevs.h>
55
56#include <dev/pci/if_ngbereg.h>
57
58const struct pci_matchid ngbe_devices[] = {
59 { PCI_VENDOR_WANGXUN0x8088, PCI_PRODUCT_WANGXUN_WX1860A20x0101 },
60 { PCI_VENDOR_WANGXUN0x8088, PCI_PRODUCT_WANGXUN_WX1860AL10x010b }
61};
62
63int ngbe_match(struct device *, void *, void *);
64void ngbe_attach(struct device *, struct device *, void *);
65int ngbe_detach(struct device *, int);
66void ngbe_init(void *);
67int ngbe_ioctl(struct ifnet *, u_long, caddr_t);
68int ngbe_media_change(struct ifnet *);
69void ngbe_media_status(struct ifnet *, struct ifmediareq *);
70int ngbe_rxfill(struct rx_ring *);
71int ngbe_rxrinfo(struct ngbe_softc *, struct if_rxrinfo *);
72void ngbe_start(struct ifqueue *);
73void ngbe_stop(struct ngbe_softc *);
74void ngbe_update_link_status(struct ngbe_softc *);
75void ngbe_watchdog(struct ifnet *);
76int ngbe_allocate_pci_resources(struct ngbe_softc *);
77void ngbe_free_pci_resources(struct ngbe_softc *);
78int ngbe_allocate_msix(struct ngbe_softc *);
79void ngbe_setup_interface(struct ngbe_softc *);
80int ngbe_setup_msix(struct ngbe_softc *);
81int ngbe_dma_malloc(struct ngbe_softc *, bus_size_t,
82 struct ngbe_dma_alloc *);
83void ngbe_dma_free(struct ngbe_softc *,
84 struct ngbe_dma_alloc *);
85int ngbe_allocate_isb(struct ngbe_softc *);
86void ngbe_free_isb(struct ngbe_softc *);
87int ngbe_allocate_queues(struct ngbe_softc *);
88void ngbe_free_receive_structures(struct ngbe_softc *);
89void ngbe_free_receive_buffers(struct rx_ring *);
90void ngbe_free_transmit_structures(struct ngbe_softc *);
91void ngbe_free_transmit_buffers(struct tx_ring *);
92int ngbe_allocate_receive_buffers(struct rx_ring *);
93int ngbe_allocate_transmit_buffers(struct tx_ring *);
94int ngbe_setup_receive_ring(struct rx_ring *);
95int ngbe_setup_transmit_ring(struct tx_ring *);
96int ngbe_setup_receive_structures(struct ngbe_softc *);
97int ngbe_setup_transmit_structures(struct ngbe_softc *);
98uint8_t * ngbe_addr_list_itr(struct ngbe_hw *, uint8_t **,
99 uint32_t *);
100void ngbe_iff(struct ngbe_softc *);
101int ngbe_initialize_receive_unit(struct ngbe_softc *);
102void ngbe_initialize_rss_mapping(struct ngbe_softc *);
103int ngbe_initialize_transmit_unit(struct ngbe_softc *);
104int ngbe_intr_link(void *);
105int ngbe_intr_queue(void *);
106void ngbe_init_eeprom_params(struct ngbe_hw *);
107int ngbe_init_hw(struct ngbe_softc *);
108void ngbe_init_ops(struct ngbe_hw *);
109void ngbe_init_rx_addrs(struct ngbe_softc *);
110void ngbe_init_shared_code(struct ngbe_softc *);
111void ngbe_init_thermal_sensor_thresh(struct ngbe_hw *);
112void ngbe_init_uta_tables(struct ngbe_hw *);
113void ngbe_fc_autoneg(struct ngbe_softc *);
114int ngbe_fc_autoneg_copper(struct ngbe_softc *);
115int ngbe_fc_enable(struct ngbe_softc *);
116int ngbe_fmgr_cmd_op(struct ngbe_hw *, uint32_t, uint32_t);
117uint32_t ngbe_flash_read_dword(struct ngbe_hw *, uint32_t);
118uint8_t ngbe_calculate_checksum(uint8_t *, uint32_t);
119int ngbe_check_flash_load(struct ngbe_softc *, uint32_t);
120int ngbe_check_internal_phy_id(struct ngbe_softc *);
121int ngbe_check_mac_link(struct ngbe_hw *, uint32_t *, int *,
122 int);
123int ngbe_check_mng_access(struct ngbe_hw *);
124int ngbe_check_reset_blocked(struct ngbe_softc *);
125void ngbe_clear_hw_cntrs(struct ngbe_hw *);
126void ngbe_clear_vfta(struct ngbe_hw *);
127void ngbe_configure_ivars(struct ngbe_softc *);
128void ngbe_configure_pb(struct ngbe_softc *);
129void ngbe_disable_intr(struct ngbe_softc *);
130int ngbe_disable_pcie_master(struct ngbe_softc *);
131void ngbe_disable_queue(struct ngbe_softc *, uint32_t);
132void ngbe_disable_rx(struct ngbe_hw *);
133void ngbe_disable_sec_rx_path(struct ngbe_hw *);
134int ngbe_eepromcheck_cap(struct ngbe_softc *, uint16_t,
135 uint32_t *);
136void ngbe_enable_intr(struct ngbe_softc *);
137void ngbe_enable_queue(struct ngbe_softc *, uint32_t);
138void ngbe_enable_rx(struct ngbe_hw *);
139void ngbe_enable_rx_dma(struct ngbe_hw *, uint32_t);
140void ngbe_enable_sec_rx_path(struct ngbe_hw *);
141int ngbe_encap(struct tx_ring *, struct mbuf *);
142int ngbe_get_buf(struct rx_ring *, int);
143void ngbe_get_bus_info(struct ngbe_softc *);
144void ngbe_get_copper_link_capabilities(struct ngbe_hw *,
145 uint32_t *, int *);
146int ngbe_get_eeprom_semaphore(struct ngbe_softc *);
147void ngbe_get_hw_control(struct ngbe_hw *);
148void ngbe_release_hw_control(struct ngbe_softc *);
149void ngbe_get_mac_addr(struct ngbe_hw *, uint8_t *);
150enum ngbe_media_type ngbe_get_media_type(struct ngbe_hw *);
151void ngbe_gphy_dis_eee(struct ngbe_hw *);
152void ngbe_gphy_efuse_calibration(struct ngbe_softc *);
153void ngbe_gphy_wait_mdio_access_on(struct ngbe_hw *);
154void ngbe_handle_phy_event(struct ngbe_softc *);
155int ngbe_host_interface_command(struct ngbe_softc *,
156 uint32_t *, uint32_t, uint32_t, int);
157int ngbe_hpbthresh(struct ngbe_softc *);
158int ngbe_lpbthresh(struct ngbe_softc *);
159int ngbe_mng_present(struct ngbe_hw *);
160int ngbe_mta_vector(struct ngbe_hw *, uint8_t *);
161int ngbe_negotiate_fc(struct ngbe_softc *, uint32_t,
162 uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
163int ngbe_non_sfp_link_config(struct ngbe_softc *);
164void ngbe_pbthresh_setup(struct ngbe_softc *);
165void ngbe_phy_check_event(struct ngbe_softc *);
166int ngbe_phy_check_overtemp(struct ngbe_hw *);
167void ngbe_phy_get_advertised_pause(struct ngbe_hw *,
168 uint8_t *);
169void ngbe_phy_get_lp_advertised_pause(struct ngbe_hw *,
170 uint8_t *);
171int ngbe_phy_identify(struct ngbe_softc *);
172int ngbe_phy_init(struct ngbe_softc *);
173void ngbe_phy_led_ctrl(struct ngbe_softc *);
174int ngbe_phy_led_oem_chk(struct ngbe_softc *, uint32_t *);
175int ngbe_phy_read_reg(struct ngbe_hw *, uint32_t, uint32_t,
176 uint16_t *);
177int ngbe_phy_write_reg(struct ngbe_hw *, uint32_t, uint32_t,
178 uint16_t);
179int ngbe_phy_reset(struct ngbe_softc *);
180int ngbe_phy_set_pause_advertisement(struct ngbe_hw *,
181 uint16_t);
182int ngbe_phy_setup(struct ngbe_softc *);
183int ngbe_phy_setup_link(struct ngbe_softc *, uint32_t, int);
184uint16_t ngbe_read_pci_cfg_word(struct ngbe_softc *, uint32_t);
185void ngbe_release_eeprom_semaphore(struct ngbe_hw *);
186int ngbe_acquire_swfw_sync(struct ngbe_softc *, uint32_t);
187void ngbe_release_swfw_sync(struct ngbe_softc *, uint32_t);
188void ngbe_reset(struct ngbe_softc *);
189int ngbe_reset_hw(struct ngbe_softc *);
190void ngbe_reset_misc(struct ngbe_hw *);
191int ngbe_set_fw_drv_ver(struct ngbe_softc *, uint8_t,
192 uint8_t, uint8_t, uint8_t);
193void ngbe_set_ivar(struct ngbe_softc *, uint16_t, uint16_t,
194 int8_t);
195void ngbe_set_lan_id_multi_port_pcie(struct ngbe_hw *);
196void ngbe_set_mta(struct ngbe_hw *, uint8_t *);
197void ngbe_set_pci_config_data(struct ngbe_hw *, uint16_t);
198int ngbe_set_rar(struct ngbe_softc *, uint32_t, uint8_t *,
199 uint64_t, uint32_t);
200void ngbe_set_rx_drop_en(struct ngbe_softc *);
201void ngbe_set_rxpba(struct ngbe_hw *, int, uint32_t, int);
202int ngbe_setup_copper_link(struct ngbe_softc *, uint32_t,
203 int);
204int ngbe_setup_fc(struct ngbe_softc *);
205void ngbe_setup_gpie(struct ngbe_hw *);
206void ngbe_setup_isb(struct ngbe_softc *);
207void ngbe_setup_psrtype(struct ngbe_hw *);
208void ngbe_setup_vlan_hw_support(struct ngbe_softc *);
209int ngbe_start_hw(struct ngbe_softc *);
210int ngbe_stop_adapter(struct ngbe_softc *);
211void ngbe_rx_checksum(uint32_t, struct mbuf *);
212void ngbe_rxeof(struct rx_ring *);
213void ngbe_rxrefill(void *);
214int ngbe_tx_ctx_setup(struct tx_ring *, struct mbuf *,
215 uint32_t *, uint32_t *);
216void ngbe_txeof(struct tx_ring *);
217void ngbe_update_mc_addr_list(struct ngbe_hw *, uint8_t *,
218 uint32_t, ngbe_mc_addr_itr, int);
219int ngbe_validate_mac_addr(uint8_t *);
220
221struct cfdriver ngbe_cd = {
222 NULL((void *)0), "ngbe", DV_IFNET
223};
224
225const struct cfattach ngbe_ca = {
226 sizeof(struct ngbe_softc), ngbe_match, ngbe_attach, ngbe_detach
227};
228
229int
230ngbe_match(struct device *parent, void *match, void *aux)
231{
232 return pci_matchbyid((struct pci_attach_args *)aux, ngbe_devices,
233 nitems(ngbe_devices)(sizeof((ngbe_devices)) / sizeof((ngbe_devices)[0])));
234}
235
236void
237ngbe_attach(struct device *parent, struct device *self, void *aux)
238{
239 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
240 struct ngbe_softc *sc = (struct ngbe_softc *)self;
241 struct ngbe_hw *hw = &sc->hw;
242 uint32_t eeprom_cksum_devcap, devcap, led_conf;
243 int error;
244
245 sc->osdep.os_sc = sc;
246 sc->osdep.os_pa = *pa;
247
248 /* Setup PCI resources. */
249 if (ngbe_allocate_pci_resources(sc))
250 goto fail1;
251
252 sc->num_tx_desc = NGBE_DEFAULT_TXD512;
253 sc->num_rx_desc = NGBE_DEFAULT_RXD512;
254
255 /* Allocate Tx/Rx queues. */
256 if (ngbe_allocate_queues(sc))
257 goto fail1;
258
259 /* Allocate multicast array memory. */
260 sc->mta = mallocarray(ETHER_ADDR_LEN6, NGBE_SP_RAR_ENTRIES32, M_DEVBUF2,
261 M_NOWAIT0x0002);
262 if (sc->mta == NULL((void *)0)) {
263 printf(": can not allocate multicast setup array\n");
264 goto fail1;
265 }
266
267 /* Allocate interrupt status resources. */
268 if (ngbe_allocate_isb(sc))
269 goto fail2;
270
271 hw->mac.autoneg = 1;
272 hw->phy.autoneg_advertised = NGBE_LINK_SPEED_AUTONEG(1 | 2 | 8);
273 hw->phy.force_speed = NGBE_LINK_SPEED_UNKNOWN0;
274
275 /* Initialize the shared code. */
276 ngbe_init_shared_code(sc);
277
278 sc->hw.mac.ops.set_lan_id(&sc->hw);
279
280 /* Check if flash load is done after hw power up. */
281 error = ngbe_check_flash_load(sc, NGBE_SPI_ILDR_STATUS_PERST0x00000001);
282 if (error)
283 goto fail3;
284 error = ngbe_check_flash_load(sc, NGBE_SPI_ILDR_STATUS_PWRRST0x00000002);
285 if (error)
286 goto fail3;
287
288 hw->phy.reset_if_overtemp = 1;
289 error = sc->hw.mac.ops.reset_hw(sc);
290 hw->phy.reset_if_overtemp = 0;
291 if (error) {
292 printf(": HW reset failed\n");
293 goto fail3;
294 }
295
296 eeprom_cksum_devcap = devcap = 0;
297 if (hw->bus.lan_id == 0) {
298 NGBE_WRITE_REG(hw, NGBE_CALSUM_CAP_STATUS, 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10224
), (0)))
;
299 NGBE_WRITE_REG(hw, NGBE_EEPROM_VERSION_STORE_REG, 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1022c
), (0)))
;
300 } else
301 eeprom_cksum_devcap = NGBE_READ_REG(hw, NGBE_CALSUM_CAP_STATUS)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10224
)))
;
302
303 hw->eeprom.ops.init_params(hw);
304 hw->mac.ops.release_swfw_sync(sc, NGBE_MNG_SWFW_SYNC_SW_MB0x0004);
305 if (hw->bus.lan_id == 0 || eeprom_cksum_devcap == 0) {
306 /* Make sure the EEPROM is good */
307 if (hw->eeprom.ops.eeprom_chksum_cap_st(sc, NGBE_CALSUM_COMMAND0xe9,
308 &devcap)) {
309 printf(": eeprom checksum is not valid\n");
310 goto fail3;
311 }
312 }
313
314 led_conf = 0;
315 if (hw->eeprom.ops.phy_led_oem_chk(sc, &led_conf))
316 sc->led_conf = -1;
317 else
318 sc->led_conf = led_conf;
319
320 memcpy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN)__builtin_memcpy((sc->sc_ac.ac_enaddr), (sc->hw.mac.addr
), (6))
;
321
322 error = ngbe_allocate_msix(sc);
323 if (error)
324 goto fail3;
325
326 ngbe_setup_interface(sc);
327
328 /* Reset the hardware with the new settings */
329 error = hw->mac.ops.start_hw(sc);
330 if (error) {
331 printf(": HW init failed\n");
332 goto fail3;
333 }
334
335 /* Pick up the PCI bus settings for reporting later */
336 hw->mac.ops.get_bus_info(sc);
337
338 hw->mac.ops.set_fw_drv_ver(sc, 0xff, 0xff, 0xff, 0xff);
339
340 printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
341 return;
342
343fail3:
344 ngbe_free_isb(sc);
345fail2:
346 ngbe_free_transmit_structures(sc);
347 ngbe_free_receive_structures(sc);
348 free(sc->mta, M_DEVBUF2, ETHER_ADDR_LEN6 * NGBE_SP_RAR_ENTRIES32);
349fail1:
350 ngbe_free_pci_resources(sc);
351}
352
353int
354ngbe_detach(struct device *self, int flags)
355{
356 struct ngbe_softc *sc = (struct ngbe_softc *)self;
357 struct ifnet *ifp = &sc->sc_ac.ac_if;
358
359 ngbe_stop(sc);
360 ngbe_release_hw_control(sc);
361
362 ether_ifdetach(ifp);
363 if_detach(ifp);
364
365 ngbe_free_pci_resources(sc);
366
367 ngbe_free_transmit_structures(sc);
368 ngbe_free_receive_structures(sc);
369 ngbe_free_isb(sc);
370 free(sc->mta, M_DEVBUF2, ETHER_ADDR_LEN6 * NGBE_SP_RAR_ENTRIES32);
371
372 return 0;
373}
374
375static inline uint32_t
376NGBE_READ_REG_MASK(struct ngbe_hw *hw, uint32_t reg, uint32_t mask)
377{
378 uint32_t val;
379
380 val = NGBE_READ_REG(hw, reg)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (reg)))
;
381 if (val == NGBE_FAILED_READ_REG0xffffffff)
382 return val;
383 return val & mask;
384}
385
386static inline void
387NGBE_WRITE_REG_MASK(struct ngbe_hw *hw, uint32_t reg, uint32_t mask,
388 uint32_t field)
389{
390 uint32_t val;
391
392 val = NGBE_READ_REG(hw, reg)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (reg)))
;
393 if (val == NGBE_FAILED_READ_REG0xffffffff)
394 return;
395 val = ((val & ~mask) | (field & mask));
396 NGBE_WRITE_REG(hw, reg, val)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (reg), (
val)))
;
397}
398
399static inline uint32_t
400ngbe_misc_isb(struct ngbe_softc *sc, enum ngbe_isb_idx idx)
401{
402 return htole32(sc->isb_base[idx])((__uint32_t)(sc->isb_base[idx]));
403}
404
405void
406ngbe_init(void *arg)
407{
408 struct ngbe_softc *sc = (struct ngbe_softc *)arg;
409 struct ngbe_hw *hw = &sc->hw;
410 struct ifnet *ifp = &sc->sc_ac.ac_if;
411 int i, s;
412
413 s = splnet()splraise(0x4);
414
415 ngbe_stop(sc);
416
417 ngbe_setup_isb(sc);
418
419 /* Setup the receive address. */
420 hw->mac.ops.set_rar(sc, 0, hw->mac.addr, 0, NGBE_PSR_MAC_SWC_AD_H_AV0x80000000);
421
422 /* Get the latest mac address, user can use a LAA. */
423 bcopy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN6);
424
425 hw->mac.ops.set_rar(sc, 0, hw->mac.addr, 0, 1);
426
427 ngbe_configure_pb(sc);
428
429 /* Program promiscuous mode and multicast filters. */
430 ngbe_iff(sc);
431
432 ngbe_setup_vlan_hw_support(sc);
433
434 /* Prepare transmit descriptors and buffers. */
435 if (ngbe_setup_transmit_structures(sc)) {
436 printf("%s: could not setup transmit structures\n",
437 DEVNAME(sc)((sc)->sc_dev.dv_xname));
438 ngbe_stop(sc);
439 splx(s)spllower(s);
440 return;
441 }
442 if (ngbe_initialize_transmit_unit(sc)) {
443 ngbe_stop(sc);
444 splx(s)spllower(s);
445 return;
446 }
447
448 /* Prepare receive descriptors and buffers. */
449 if (ngbe_setup_receive_structures(sc)) {
450 printf("%s: could not setup receive structures\n",
451 DEVNAME(sc)((sc)->sc_dev.dv_xname));
452 ngbe_stop(sc);
453 splx(s)spllower(s);
454 return;
455 }
456 if (ngbe_initialize_receive_unit(sc)) {
457 ngbe_stop(sc);
458 splx(s)spllower(s);
459 return;
460 }
461
462 ngbe_get_hw_control(hw);
463 ngbe_setup_gpie(hw);
464 ngbe_configure_ivars(sc);
465
466 if (ngbe_non_sfp_link_config(sc)) {
467 ngbe_stop(sc);
468 splx(s)spllower(s);
469 return;
470 }
471
472 /* Select GMII */
473 NGBE_WRITE_REG(hw, NGBE_MAC_TX_CFG,((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11000
), ((((((struct ngbe_osdep *)(hw)->back)->os_memt)->
read_4((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11000
))) & ~0x60000000) | 0x60000000)))
474 (NGBE_READ_REG(hw, NGBE_MAC_TX_CFG) & ~NGBE_MAC_TX_CFG_SPEED_MASK) |((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11000
), ((((((struct ngbe_osdep *)(hw)->back)->os_memt)->
read_4((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11000
))) & ~0x60000000) | 0x60000000)))
475 NGBE_MAC_TX_CFG_SPEED_1G)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11000
), ((((((struct ngbe_osdep *)(hw)->back)->os_memt)->
read_4((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11000
))) & ~0x60000000) | 0x60000000)))
;
476
477 /* Clear any pending interrupts, may auto mask */
478 NGBE_READ_REG(hw, NGBE_PX_IC)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x00120
)))
;
479 NGBE_READ_REG(hw, NGBE_PX_MISC_IC)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x00100
)))
;
480 ngbe_enable_intr(sc);
481
482 switch (hw->bus.lan_id) {
483 case 0:
484 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL0x10010,
485 NGBE_MIS_PRB_CTL_LAN0_UP0x8, NGBE_MIS_PRB_CTL_LAN0_UP0x8);
486 break;
487 case 1:
488 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL0x10010,
489 NGBE_MIS_PRB_CTL_LAN1_UP0x4, NGBE_MIS_PRB_CTL_LAN1_UP0x4);
490 break;
491 case 2:
492 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL0x10010,
493 NGBE_MIS_PRB_CTL_LAN2_UP0x2, NGBE_MIS_PRB_CTL_LAN2_UP0x2);
494 break;
495 case 3:
496 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL0x10010,
497 NGBE_MIS_PRB_CTL_LAN3_UP0x1, NGBE_MIS_PRB_CTL_LAN3_UP0x1);
498 break;
499 }
500
501 NGBE_WRITE_REG_MASK(hw, NGBE_CFG_PORT_CTL0x14400, NGBE_CFG_PORT_CTL_PFRSTD0x00004000,
502 NGBE_CFG_PORT_CTL_PFRSTD0x00004000);
503
504 /* Now inform the stack we're ready */
505 ifp->if_flags |= IFF_RUNNING0x40;
506 for (i = 0; i < sc->sc_nqueues; i++)
507 ifq_clr_oactive(ifp->if_ifqs[i]);
508 splx(s)spllower(s);
509}
510
511int
512ngbe_ioctl(struct ifnet * ifp, u_long cmd, caddr_t data)
513{
514 struct ngbe_softc *sc = ifp->if_softc;
515 struct ifreq *ifr = (struct ifreq *)data;
516 int s, error = 0;
517
518 s = splnet()splraise(0x4);
519
520 switch (cmd) {
521 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
522 ifp->if_flags |= IFF_UP0x1;
523 if (!(ifp->if_flags & IFF_RUNNING0x40))
524 ngbe_init(sc);
525 break;
526 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
527 if (ifp->if_flags & IFF_UP0x1) {
528 if (ifp->if_flags & IFF_RUNNING0x40)
529 error = ENETRESET52;
530 else
531 ngbe_init(sc);
532 } else {
533 if (ifp->if_flags & IFF_RUNNING0x40)
534 ngbe_stop(sc);
535 }
536 break;
537 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
538 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
539 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
540 break;
541 case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((170)))
:
542 error = ngbe_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data);
543 break;
544 default:
545 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
546 }
547
548 if (error == ENETRESET52) {
549 if (ifp->if_flags & IFF_RUNNING0x40) {
550 ngbe_disable_intr(sc);
551 ngbe_iff(sc);
552 ngbe_enable_intr(sc);
553 }
554 error = 0;
555 }
556
557 splx(s)spllower(s);
558 return error;
559}
560
561int
562ngbe_media_change(struct ifnet *ifp)
563{
564 struct ngbe_softc *sc = ifp->if_softc;
565 struct ngbe_hw *hw = &sc->hw;
566 struct ifmedia *ifm = &sc->sc_media;
567 uint32_t advertised = 0;
568
569 if (IFM_TYPE(ifm->ifm_media)((ifm->ifm_media) & 0x000000000000ff00ULL) != IFM_ETHER0x0000000000000100ULL)
570 return EINVAL22;
571
572 switch (IFM_SUBTYPE(ifm->ifm_media)((ifm->ifm_media) & 0x00000000000000ffULL)) {
573 case IFM_AUTO0ULL:
574 case IFM_1000_T16:
575 advertised |= NGBE_LINK_SPEED_AUTONEG(1 | 2 | 8);
576 break;
577 case IFM_100_TX6:
578 advertised |= NGBE_LINK_SPEED_100_FULL1;
579 break;
580 case IFM_10_T3:
581 advertised |= NGBE_LINK_SPEED_10_FULL8;
582 break;
583 default:
584 return EINVAL22;
585 }
586
587 hw->mac.autotry_restart = true1;
588 hw->mac.ops.setup_link(sc, advertised, 1);
589
590 return 0;
591}
592
593void
594ngbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
595{
596 struct ngbe_softc *sc = ifp->if_softc;
597
598 ifmr->ifm_status = IFM_AVALID0x0000000000000001ULL;
599 ifmr->ifm_active = IFM_ETHER0x0000000000000100ULL;
600
601 ngbe_update_link_status(sc);
602
603 if (!LINK_STATE_IS_UP(ifp->if_link_state)((ifp->if_data.ifi_link_state) >= 4 || (ifp->if_data
.ifi_link_state) == 0)
)
604 return;
605
606 ifmr->ifm_status |= IFM_ACTIVE0x0000000000000002ULL;
607
608 switch (sc->link_speed) {
609 case NGBE_LINK_SPEED_1GB_FULL2:
610 ifmr->ifm_active |= IFM_1000_T16 | IFM_FDX0x0000010000000000ULL;
611 break;
612 case NGBE_LINK_SPEED_100_FULL1:
613 ifmr->ifm_active |= IFM_100_TX6 | IFM_FDX0x0000010000000000ULL;
614 break;
615 case NGBE_LINK_SPEED_10_FULL8:
616 ifmr->ifm_active |= IFM_10_T3 | IFM_FDX0x0000010000000000ULL;
617 break;
618 }
619
620 switch (sc->hw.fc.current_mode) {
621 case ngbe_fc_tx_pause:
622 ifmr->ifm_active |= IFM_FLOW0x0000040000000000ULL | IFM_ETH_TXPAUSE0x0000000000040000ULL;
623 break;
624 case ngbe_fc_rx_pause:
625 ifmr->ifm_active |= IFM_FLOW0x0000040000000000ULL | IFM_ETH_RXPAUSE0x0000000000020000ULL;
626 break;
627 case ngbe_fc_full:
628 ifmr->ifm_active |= IFM_FLOW0x0000040000000000ULL | IFM_ETH_RXPAUSE0x0000000000020000ULL |
629 IFM_ETH_TXPAUSE0x0000000000040000ULL;
630 break;
631 default:
632 ifmr->ifm_active &= ~(IFM_FLOW0x0000040000000000ULL | IFM_ETH_RXPAUSE0x0000000000020000ULL |
633 IFM_ETH_TXPAUSE0x0000000000040000ULL);
634 break;
635 }
636}
637
638int
639ngbe_rxfill(struct rx_ring *rxr)
640{
641 struct ngbe_softc *sc = rxr->sc;
642 int i, post = 0;
643 u_int slots;
644
645 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize
), (0x08))
646 rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize
), (0x08))
;
647
648 i = rxr->last_desc_filled;
649 for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc); slots > 0;
650 slots--) {
651 if (++i == sc->num_rx_desc)
652 i = 0;
653
654 if (ngbe_get_buf(rxr, i) != 0)
655 break;
656
657 rxr->last_desc_filled = i;
658 post = 1;
659 }
660
661 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize
), (0x04))
662 rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize
), (0x04))
;
663
664 if_rxr_put(&rxr->rx_ring, slots)do { (&rxr->rx_ring)->rxr_alive -= (slots); } while
(0)
;
665
666 return post;
667}
668
669int
670ngbe_rxrinfo(struct ngbe_softc *sc, struct if_rxrinfo *ifri)
671{
672 struct if_rxring_info *ifr;
673 struct rx_ring *rxr;
674 int error, i, n = 0;
675
676 if ((ifr = mallocarray(sc->sc_nqueues, sizeof(*ifr), M_DEVBUF2,
677 M_WAITOK0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008)) == NULL((void *)0))
678 return ENOMEM12;
679
680 for (i = 0; i < sc->sc_nqueues; i++) {
681 rxr = &sc->rx_rings[i];
682 ifr[n].ifr_size = MCLBYTES(1 << 11);
683 snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "%d", i);
684 ifr[n].ifr_info = rxr->rx_ring;
685 n++;
686 }
687
688 error = if_rxr_info_ioctl(ifri, sc->sc_nqueues, ifr);
689 free(ifr, M_DEVBUF2, sc->sc_nqueues * sizeof(*ifr));
690
691 return error;
692}
693
694void
695ngbe_start(struct ifqueue *ifq)
696{
697 struct ifnet *ifp = ifq->ifq_if;
698 struct ngbe_softc *sc = ifp->if_softc;
699 struct tx_ring *txr = ifq->ifq_softc_ifq_ptr._ifq_softc;
700 struct mbuf *m;
701 unsigned int prod, free, used;
702 int post = 0;
703
704 if (!sc->link_up)
1
Assuming field 'link_up' is not equal to 0
2
Taking false branch
705 return;
706
707 prod = txr->next_avail_desc;
708 free = txr->next_to_clean;
709 if (free <= prod)
3
Assuming 'free' is > 'prod'
4
Taking false branch
710 free += sc->num_tx_desc;
711 free -= prod;
712
713 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x08))
714 txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x08))
;
715
716 for (;;) {
5
Loop condition is true. Entering loop body
717 if (free <= NGBE_MAX_SCATTER32 + 2) {
6
Assuming the condition is false
7
Taking false branch
718 ifq_set_oactive(ifq);
719 break;
720 }
721
722 m = ifq_dequeue(ifq);
723 if (m == NULL((void *)0))
8
Assuming 'm' is not equal to NULL
9
Taking false branch
724 break;
725
726 used = ngbe_encap(txr, m);
10
Calling 'ngbe_encap'
727 if (used == 0) {
728 m_freem(m);
729 continue;
730 }
731
732 free -= used;
733
734#if NBPFILTER1 > 0
735 if (ifp->if_bpf)
736 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
737#endif
738
739 /* Set timeout in case hardware has problems transmitting */
740 txr->watchdog_timer = NGBE_TX_TIMEOUT5;
741 ifp->if_timer = NGBE_TX_TIMEOUT5;
742
743 post = 1;
744 }
745
746 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x04))
747 txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x04))
;
748
749 if (post)
750 NGBE_WRITE_REG(&sc->hw, NGBE_PX_TR_WP(txr->me),((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x03008 + ((txr->me) * 0x40))), (txr->
next_avail_desc)))
751 txr->next_avail_desc)((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x03008 + ((txr->me) * 0x40))), (txr->
next_avail_desc)))
;
752}
753
754void
755ngbe_stop(struct ngbe_softc *sc)
756{
757 struct ifnet *ifp = &sc->sc_ac.ac_if;
758 struct ngbe_hw *hw = &sc->hw;
759 uint32_t rxdctl;
760 int i, wait_loop = NGBE_MAX_RX_DESC_POLL10;
761
762 /* Tell the stack that the interface is no longer active. */
763 ifp->if_flags &= ~IFF_RUNNING0x40;
764 ifp->if_timer = 0;
765
766 ngbe_disable_pcie_master(sc);
767 /* Disable receives */
768 hw->mac.ops.disable_rx(hw);
769
770 for (i = 0; i < sc->sc_nqueues; i++) {
771 NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i)(0x01010 + ((i) * 0x40)),
772 NGBE_PX_RR_CFG_RR_EN0x00000001, 0);
773 do {
774 DELAY(10)(*delay_func)(10);
775 rxdctl = NGBE_READ_REG(hw, NGBE_PX_RR_CFG(i))((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x01010
+ ((i) * 0x40)))))
;
776 } while (--wait_loop && (rxdctl & NGBE_PX_RR_CFG_RR_EN0x00000001));
777 if (!wait_loop) {
778 printf("%s: Rx queue %d not cleared within "
779 "the polling period\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), i);
780 return;
781 }
782 }
783
784 ngbe_disable_intr(sc);
785
786 switch (hw->bus.lan_id) {
787 case 0:
788 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL0x10010,
789 NGBE_MIS_PRB_CTL_LAN0_UP0x8, 0);
790 break;
791 case 1:
792 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL0x10010,
793 NGBE_MIS_PRB_CTL_LAN1_UP0x4, 0);
794 break;
795 case 2:
796 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL0x10010,
797 NGBE_MIS_PRB_CTL_LAN2_UP0x2, 0);
798 break;
799 case 3:
800 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL0x10010,
801 NGBE_MIS_PRB_CTL_LAN3_UP0x1, 0);
802 break;
803 }
804
805 NGBE_WRITE_REG_MASK(hw, NGBE_MAC_TX_CFG0x11000, NGBE_MAC_TX_CFG_TE0x00000001, 0);
806 for (i = 0; i < sc->sc_nqueues; i++)
807 NGBE_WRITE_REG(hw, NGBE_PX_TR_CFG(i), NGBE_PX_TR_CFG_SWFLSH)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x03010
+ ((i) * 0x40))), (0x04000000)))
;
808 NGBE_WRITE_REG_MASK(hw, NGBE_TDM_CTL0x18000, NGBE_TDM_CTL_TE0x1, 0);
809
810 ngbe_reset(sc);
811
812 hw->mac.ops.set_rar(sc, 0, hw->mac.addr, 0, NGBE_PSR_MAC_SWC_AD_H_AV0x80000000);
813
814 intr_barrier(sc->tag);
815 for (i = 0; i < sc->sc_nqueues; i++) {
816 struct ifqueue *ifq = ifp->if_ifqs[i];
817 ifq_barrier(ifq);
818 ifq_clr_oactive(ifq);
819
820 if (sc->queues[i].tag != NULL((void *)0))
821 intr_barrier(sc->queues[i].tag);
822 timeout_del(&sc->rx_rings[i].rx_refill);
823 }
824
825 ngbe_free_transmit_structures(sc);
826 ngbe_free_receive_structures(sc);
827
828 ngbe_update_link_status(sc);
829}
830
831void
832ngbe_update_link_status(struct ngbe_softc *sc)
833{
834 struct ifnet *ifp = &sc->sc_ac.ac_if;
835 struct ngbe_hw *hw = &sc->hw;
836 uint32_t reg, speed = 0;
837 int link_state = LINK_STATE_DOWN2;
838
839 hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up, 0);
840
841 ifp->if_baudrateif_data.ifi_baudrate = 0;
842 if (sc->link_up) {
843 link_state = LINK_STATE_FULL_DUPLEX6;
844
845 switch (sc->link_speed) {
846 case NGBE_LINK_SPEED_UNKNOWN0:
847 ifp->if_baudrateif_data.ifi_baudrate = 0;
848 break;
849 case NGBE_LINK_SPEED_1GB_FULL2:
850 ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL)));
851 speed = 2;
852 break;
853 case NGBE_LINK_SPEED_100_FULL1:
854 ifp->if_baudrateif_data.ifi_baudrate = IF_Mbps(100)((((100) * 1000ULL) * 1000ULL));
855 speed = 1;
856 break;
857 case NGBE_LINK_SPEED_10_FULL8:
858 ifp->if_baudrateif_data.ifi_baudrate = IF_Mbps(10)((((10) * 1000ULL) * 1000ULL));
859 break;
860 }
861 NGBE_WRITE_REG_MASK(hw, NGBE_CFG_LAN_SPEED0x14440, 0x3, speed);
862
863 /* Update any flow control changes */
864 hw->mac.ops.fc_enable(sc);
865
866 ngbe_set_rx_drop_en(sc);
867
868 if (sc->link_speed & (NGBE_LINK_SPEED_1GB_FULL2 |
869 NGBE_LINK_SPEED_100_FULL1 | NGBE_LINK_SPEED_10_FULL8)) {
870 NGBE_WRITE_REG(hw, NGBE_MAC_TX_CFG,((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11000
), ((((((struct ngbe_osdep *)(hw)->back)->os_memt)->
read_4((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11000
))) & ~0x60000000) | 0x00000001 | 0x60000000)))
871 (NGBE_READ_REG(hw, NGBE_MAC_TX_CFG) &((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11000
), ((((((struct ngbe_osdep *)(hw)->back)->os_memt)->
read_4((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11000
))) & ~0x60000000) | 0x00000001 | 0x60000000)))
872 ~NGBE_MAC_TX_CFG_SPEED_MASK) | NGBE_MAC_TX_CFG_TE |((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11000
), ((((((struct ngbe_osdep *)(hw)->back)->os_memt)->
read_4((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11000
))) & ~0x60000000) | 0x00000001 | 0x60000000)))
873 NGBE_MAC_TX_CFG_SPEED_1G)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11000
), ((((((struct ngbe_osdep *)(hw)->back)->os_memt)->
read_4((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11000
))) & ~0x60000000) | 0x00000001 | 0x60000000)))
;
874 }
875
876 reg = NGBE_READ_REG(hw, NGBE_MAC_RX_CFG)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11004
)))
;
877 NGBE_WRITE_REG(hw, NGBE_MAC_RX_CFG, reg)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11004
), (reg)))
;
878 NGBE_WRITE_REG(hw, NGBE_MAC_PKT_FLT, NGBE_MAC_PKT_FLT_PR)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11008
), (0x00000001)))
;
879 reg = NGBE_READ_REG(hw, NGBE_MAC_WDG_TIMEOUT)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1100c
)))
;
880 NGBE_WRITE_REG(hw, NGBE_MAC_WDG_TIMEOUT, reg)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1100c
), (reg)))
;
881 }
882
883 if (ifp->if_link_stateif_data.ifi_link_state != link_state) {
884 ifp->if_link_stateif_data.ifi_link_state = link_state;
885 if_link_state_change(ifp);
886 }
887}
888
889void
890ngbe_watchdog(struct ifnet *ifp)
891{
892 struct ngbe_softc *sc = ifp->if_softc;
893 struct tx_ring *txr = sc->tx_rings;
894 int i, tx_hang = 0;
895
896 /*
897 * The timer is set to 5 every time ixgbe_start() queues a packet.
898 * Anytime all descriptors are clean the timer is set to 0.
899 */
900 for (i = 0; i < sc->sc_nqueues; i++, txr++) {
901 if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
902 continue;
903 else {
904 tx_hang = 1;
905 break;
906 }
907 }
908 if (!tx_hang)
909 return;
910
911 printf("%s: watchdog timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
912 ifp->if_oerrorsif_data.ifi_oerrors++;
913
914 ifp->if_flags &= ~IFF_RUNNING0x40;
915 ngbe_init(sc);
916}
917
918int
919ngbe_allocate_pci_resources(struct ngbe_softc *sc)
920{
921 struct ngbe_osdep *os = &sc->osdep;
922 struct pci_attach_args *pa = &os->os_pa;
923 pcireg_t memtype;
924
925 memtype = PCI_MAPREG_TYPE_MEM0x00000000 | PCI_MAPREG_MEM_TYPE_64BIT0x00000004;
926 if (pci_mapreg_map(pa, NGBE_PCIREG0x10, memtype, 0, &os->os_memt,
927 &os->os_memh, &os->os_membase, &os->os_memsize, 0)) {
928 printf(": unable to map registers\n");
929 return ENXIO6;
930 }
931 sc->hw.back = os;
932
933 if (ngbe_setup_msix(sc))
934 return EINVAL22;
935
936 return 0;
937}
938
939void
940ngbe_free_pci_resources(struct ngbe_softc *sc)
941{
942 struct ngbe_osdep *os = &sc->osdep;
943 struct pci_attach_args *pa = &os->os_pa;
944
945 if (sc->tag)
946 pci_intr_disestablish(pa->pa_pc, sc->tag);
947 sc->tag = NULL((void *)0);
948 if (os->os_membase)
949 bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
950 os->os_membase = 0;
951}
952
953int
954ngbe_allocate_msix(struct ngbe_softc *sc)
955{
956 struct ngbe_osdep *os = &sc->osdep;
957 struct pci_attach_args *pa = &os->os_pa;
958 struct ngbe_queue *nq;
959 pci_intr_handle_t ih;
960 int i, error = 0;
961
962 for (i = 0, nq = sc->queues; i < sc->sc_nqueues; i++, nq++) {
963 if (pci_intr_map_msix(pa, i, &ih)) {
964 printf(": unable to map msi-x vector %d", i);
965 error = ENXIO6;
966 goto fail;
967 }
968
969 nq->tag = pci_intr_establish_cpu(pa->pa_pc, ih,
970 IPL_NET0x4 | IPL_MPSAFE0x100, intrmap_cpu(sc->sc_intrmap, i),
971 ngbe_intr_queue, nq, nq->name);
972 if (nq->tag == NULL((void *)0)) {
973 printf(": unable to establish interrupt %d\n", i);
974 error = ENXIO6;
975 goto fail;
976 }
977
978 nq->msix = i;
979 }
980
981 /* Now the link status/control last MSI-X vector */
982 if (pci_intr_map_msix(pa, i, &ih)) {
983 printf(": unable to map link vector\n");
984 error = ENXIO6;
985 goto fail;
986 }
987
988 sc->tag = pci_intr_establish(pa->pa_pc, ih, IPL_NET0x4 | IPL_MPSAFE0x100,
989 ngbe_intr_link, sc, sc->sc_dev.dv_xname);
990 if (sc->tag == NULL((void *)0)) {
991 printf(": unable to establish link interrupt\n");
992 error = ENXIO6;
993 goto fail;
994 }
995
996 sc->linkvec = i;
997 printf(", %s, %d queue%s", pci_intr_string(pa->pa_pc, ih), i,
998 (i > 1) ? "s" : "");
999
1000 return 0;
1001fail:
1002 for (nq = sc->queues; i > 0; i--, nq++) {
1003 if (nq->tag == NULL((void *)0))
1004 continue;
1005 pci_intr_disestablish(pa->pa_pc, nq->tag);
1006 nq->tag = NULL((void *)0);
1007 }
1008
1009 return error;
1010}
1011
1012void
1013ngbe_setup_interface(struct ngbe_softc *sc)
1014{
1015 struct ifnet *ifp = &sc->sc_ac.ac_if;
1016 int i;
1017
1018 strlcpy(ifp->if_xname, DEVNAME(sc)((sc)->sc_dev.dv_xname), IFNAMSIZ16);
1019 ifp->if_softc = sc;
1020 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
1021 ifp->if_xflags = IFXF_MPSAFE0x1;
1022 ifp->if_ioctl = ngbe_ioctl;
1023 ifp->if_qstart = ngbe_start;
1024 ifp->if_watchdog = ngbe_watchdog;
1025 ifp->if_hardmtu = NGBE_MAX_JUMBO_FRAME_SIZE9432 - ETHER_HDR_LEN((6 * 2) + 2) -
1026 ETHER_CRC_LEN4;
1027 ifq_init_maxlen(&ifp->if_snd, sc->num_tx_desc - 1);
1028
1029 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010;
1030
1031#if NVLAN1 > 0
1032 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020;
1033#endif
1034
1035 /* Initialize ifmedia structures. */
1036 ifmedia_init(&sc->sc_media, IFM_IMASK0xff00000000000000ULL, ngbe_media_change,
1037 ngbe_media_status);
1038 ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_10_T3 | IFM_FDX0x0000010000000000ULL, 0, NULL((void *)0));
1039 ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_100_TX6 | IFM_FDX0x0000010000000000ULL, 0, NULL((void *)0));
1040 ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_1000_T16 | IFM_FDX0x0000010000000000ULL, 0, NULL((void *)0));
1041
1042 ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0));
1043 ifmedia_set(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL);
1044 sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
1045
1046 if_attach(ifp);
1047 ether_ifattach(ifp);
1048
1049 if_attach_queues(ifp, sc->sc_nqueues);
1050 if_attach_iqueues(ifp, sc->sc_nqueues);
1051 for (i = 0; i < sc->sc_nqueues; i++) {
1052 struct ifqueue *ifq = ifp->if_ifqs[i];
1053 struct ifiqueue *ifiq = ifp->if_iqs[i];
1054 struct tx_ring *txr = &sc->tx_rings[i];
1055 struct rx_ring *rxr = &sc->rx_rings[i];
1056
1057 ifq->ifq_softc_ifq_ptr._ifq_softc = txr;
1058 txr->ifq = ifq;
1059
1060 ifiq->ifiq_softc_ifiq_ptr._ifiq_softc = rxr;
1061 rxr->ifiq = ifiq;
1062 }
1063}
1064
1065int
1066ngbe_setup_msix(struct ngbe_softc *sc)
1067{
1068 struct ngbe_osdep *os = &sc->osdep;
1069 struct pci_attach_args *pa = &os->os_pa;
1070 int nmsix;
1071
1072 nmsix = pci_intr_msix_count(pa);
1073 if (nmsix <= 1) {
1074 printf(": not enough msi-x vectors\n");
1075 return EINVAL22;
1076 }
1077
1078 /* Give one vector to events. */
1079 nmsix--;
1080
1081 sc->sc_intrmap = intrmap_create(&sc->sc_dev, nmsix, NGBE_MAX_VECTORS8,
1082 INTRMAP_POWEROF2(1 << 0));
1083 sc->sc_nqueues = intrmap_count(sc->sc_intrmap);
1084
1085 return 0;
1086}
1087
1088int
1089ngbe_dma_malloc(struct ngbe_softc *sc, bus_size_t size,
1090 struct ngbe_dma_alloc *dma)
1091{
1092 struct ngbe_osdep *os = &sc->osdep;
1093
1094 dma->dma_tag = os->os_pa.pa_dmat;
1095
1096 if (bus_dmamap_create(dma->dma_tag, size, 1, size, 0, BUS_DMA_NOWAIT,(*(dma->dma_tag)->_dmamap_create)((dma->dma_tag), (size
), (1), (size), (0), (0x0001), (&dma->dma_map))
1097 &dma->dma_map)(*(dma->dma_tag)->_dmamap_create)((dma->dma_tag), (size
), (1), (size), (0), (0x0001), (&dma->dma_map))
)
1098 return 1;
1099 if (bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,(*(dma->dma_tag)->_dmamem_alloc)((dma->dma_tag), (size
), ((1 << 12)), (0), (&dma->dma_seg), (1), (&
dma->dma_nseg), (0x0001))
1100 1, &dma->dma_nseg, BUS_DMA_NOWAIT)(*(dma->dma_tag)->_dmamem_alloc)((dma->dma_tag), (size
), ((1 << 12)), (0), (&dma->dma_seg), (1), (&
dma->dma_nseg), (0x0001))
)
1101 goto destroy;
1102 if (bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,(*(dma->dma_tag)->_dmamem_map)((dma->dma_tag), (&
dma->dma_seg), (dma->dma_nseg), (size), (&dma->dma_vaddr
), (0x0001 | 0x0004))
1103 &dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)(*(dma->dma_tag)->_dmamem_map)((dma->dma_tag), (&
dma->dma_seg), (dma->dma_nseg), (size), (&dma->dma_vaddr
), (0x0001 | 0x0004))
)
1104 goto free;
1105 if (bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size,(*(dma->dma_tag)->_dmamap_load)((dma->dma_tag), (dma
->dma_map), (dma->dma_vaddr), (size), (((void *)0)), (0x0001
))
1106 NULL, BUS_DMA_NOWAIT)(*(dma->dma_tag)->_dmamap_load)((dma->dma_tag), (dma
->dma_map), (dma->dma_vaddr), (size), (((void *)0)), (0x0001
))
)
1107 goto unmap;
1108
1109 dma->dma_size = size;
1110
1111 return 0;
1112unmap:
1113 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size)(*(dma->dma_tag)->_dmamem_unmap)((dma->dma_tag), (dma
->dma_vaddr), (size))
;
1114free:
1115 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg)(*(dma->dma_tag)->_dmamem_free)((dma->dma_tag), (&
dma->dma_seg), (dma->dma_nseg))
;
1116destroy:
1117 bus_dmamap_destroy(dma->dma_tag, dma->dma_map)(*(dma->dma_tag)->_dmamap_destroy)((dma->dma_tag), (
dma->dma_map))
;
1118 dma->dma_map = NULL((void *)0);
1119 dma->dma_tag = NULL((void *)0);
1120 return 1;
1121}
1122
1123void
1124ngbe_dma_free(struct ngbe_softc *sc, struct ngbe_dma_alloc *dma)
1125{
1126 if (dma->dma_tag == NULL((void *)0))
1127 return;
1128
1129 if (dma->dma_map != NULL((void *)0)) {
1130 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma
->dma_map), (0), (dma->dma_map->dm_mapsize), (0x02 |
0x08))
1131 dma->dma_map->dm_mapsize,(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma
->dma_map), (0), (dma->dma_map->dm_mapsize), (0x02 |
0x08))
1132 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma
->dma_map), (0), (dma->dma_map->dm_mapsize), (0x02 |
0x08))
;
1133 bus_dmamap_unload(dma->dma_tag, dma->dma_map)(*(dma->dma_tag)->_dmamap_unload)((dma->dma_tag), (dma
->dma_map))
;
1134 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size)(*(dma->dma_tag)->_dmamem_unmap)((dma->dma_tag), (dma
->dma_vaddr), (dma->dma_size))
;
1135 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg)(*(dma->dma_tag)->_dmamem_free)((dma->dma_tag), (&
dma->dma_seg), (dma->dma_nseg))
;
1136 bus_dmamap_destroy(dma->dma_tag, dma->dma_map)(*(dma->dma_tag)->_dmamap_destroy)((dma->dma_tag), (
dma->dma_map))
;
1137 dma->dma_map = NULL((void *)0);
1138 }
1139}
1140
1141int
1142ngbe_allocate_isb(struct ngbe_softc *sc)
1143{
1144 int isize;
1145
1146 isize = sizeof(uint32_t) * NGBE_ISB_MAX;
1147 if (ngbe_dma_malloc(sc, isize, &sc->isbdma)) {
1148 printf("%s: unable to allocate interrupt status resources\n",
1149 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1150 return ENOMEM12;
1151 }
1152 sc->isb_base = (uint32_t *)sc->isbdma.dma_vaddr;
1153 bzero((void *)sc->isb_base, isize)__builtin_bzero(((void *)sc->isb_base), (isize));
1154
1155 return 0;
1156}
1157
1158void
1159ngbe_free_isb(struct ngbe_softc *sc)
1160{
1161 ngbe_dma_free(sc, &sc->isbdma);
1162}
1163
1164int
1165ngbe_allocate_queues(struct ngbe_softc *sc)
1166{
1167 struct ngbe_queue *nq;
1168 struct tx_ring *txr;
1169 struct rx_ring *rxr;
1170 int i, rsize, rxconf, tsize, txconf;
1171
1172 /* Allocate the top level queue structs. */
1173 sc->queues = mallocarray(sc->sc_nqueues, sizeof(struct ngbe_queue),
1174 M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
1175 if (sc->queues == NULL((void *)0)) {
1176 printf("%s: unable to allocate queue\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1177 goto fail;
1178 }
1179
1180 /* Allocate the Tx ring. */
1181 sc->tx_rings = mallocarray(sc->sc_nqueues, sizeof(struct tx_ring),
1182 M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
1183 if (sc->tx_rings == NULL((void *)0)) {
1184 printf("%s: unable to allocate Tx ring\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1185 goto fail;
1186 }
1187
1188 /* Allocate the Rx ring. */
1189 sc->rx_rings = mallocarray(sc->sc_nqueues, sizeof(struct rx_ring),
1190 M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
1191 if (sc->rx_rings == NULL((void *)0)) {
1192 printf("%s: unable to allocate Rx ring\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1193 goto rx_fail;
1194 }
1195
1196 txconf = rxconf = 0;
1197
1198 /* Set up the Tx queues. */
1199 tsize = roundup2(sc->num_tx_desc * sizeof(union ngbe_tx_desc),(((sc->num_tx_desc * sizeof(union ngbe_tx_desc)) + ((1 <<
12)) - 1) & ~(((1 << 12)) - 1))
1200 PAGE_SIZE)(((sc->num_tx_desc * sizeof(union ngbe_tx_desc)) + ((1 <<
12)) - 1) & ~(((1 << 12)) - 1))
;
1201 for (i = 0; i < sc->sc_nqueues; i++, txconf++) {
1202 txr = &sc->tx_rings[i];
1203 txr->sc = sc;
1204 txr->me = i;
1205
1206 if (ngbe_dma_malloc(sc, tsize, &txr->txdma)) {
1207 printf("%s: unable to allocate Tx descriptor\n",
1208 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1209 goto err_tx_desc;
1210 }
1211 txr->tx_base = (union ngbe_tx_desc *)txr->txdma.dma_vaddr;
1212 bzero((void *)txr->tx_base, tsize)__builtin_bzero(((void *)txr->tx_base), (tsize));
1213 }
1214
1215 /* Set up the Rx queues. */
1216 rsize = roundup2(sc->num_rx_desc * sizeof(union ngbe_rx_desc),(((sc->num_rx_desc * sizeof(union ngbe_rx_desc)) + ((1 <<
12)) - 1) & ~(((1 << 12)) - 1))
1217 PAGE_SIZE)(((sc->num_rx_desc * sizeof(union ngbe_rx_desc)) + ((1 <<
12)) - 1) & ~(((1 << 12)) - 1))
;
1218 for (i = 0; i < sc->sc_nqueues; i++, rxconf++) {
1219 rxr = &sc->rx_rings[i];
1220 rxr->sc = sc;
1221 rxr->me = i;
1222 timeout_set(&rxr->rx_refill, ngbe_rxrefill, rxr);
1223
1224 if (ngbe_dma_malloc(sc, rsize, &rxr->rxdma)) {
1225 printf("%s: unable to allocate Rx descriptor\n",
1226 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1227 goto err_rx_desc;
1228 }
1229 rxr->rx_base = (union ngbe_rx_desc *)rxr->rxdma.dma_vaddr;
1230 bzero((void *)rxr->rx_base, rsize)__builtin_bzero(((void *)rxr->rx_base), (rsize));
1231 }
1232
1233 /* Set up the queue holding structs. */
1234 for (i = 0; i < sc->sc_nqueues; i++) {
1235 nq = &sc->queues[i];
1236 nq->sc = sc;
1237 nq->txr = &sc->tx_rings[i];
1238 nq->rxr = &sc->rx_rings[i];
1239 snprintf(nq->name, sizeof(nq->name), "%s:%d", DEVNAME(sc)((sc)->sc_dev.dv_xname), i);
1240 }
1241
1242 return 0;
1243
1244err_rx_desc:
1245 for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
1246 ngbe_dma_free(sc, &rxr->rxdma);
1247err_tx_desc:
1248 for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
1249 ngbe_dma_free(sc, &txr->txdma);
1250 free(sc->rx_rings, M_DEVBUF2, sc->sc_nqueues * sizeof(struct rx_ring));
1251 sc->rx_rings = NULL((void *)0);
1252rx_fail:
1253 free(sc->tx_rings, M_DEVBUF2, sc->sc_nqueues * sizeof(struct tx_ring));
1254 sc->tx_rings = NULL((void *)0);
1255fail:
1256 return ENOMEM12;
1257}
1258
1259void
1260ngbe_free_receive_structures(struct ngbe_softc *sc)
1261{
1262 struct rx_ring *rxr;
1263 int i;
1264
1265 for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++)
1266 if_rxr_init(&rxr->rx_ring, 0, 0);
1267
1268 for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++)
1269 ngbe_free_receive_buffers(rxr);
1270}
1271
1272void
1273ngbe_free_receive_buffers(struct rx_ring *rxr)
1274{
1275 struct ngbe_softc *sc;
1276 struct ngbe_rx_buf *rxbuf;
1277 int i;
1278
1279 sc = rxr->sc;
1280 if (rxr->rx_buffers != NULL((void *)0)) {
1281 for (i = 0; i < sc->num_rx_desc; i++) {
1282 rxbuf = &rxr->rx_buffers[i];
1283 if (rxbuf->buf != NULL((void *)0)) {
1284 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x02
))
1285 0, rxbuf->map->dm_mapsize,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x02
))
1286 BUS_DMASYNC_POSTREAD)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x02
))
;
1287 bus_dmamap_unload(rxr->rxdma.dma_tag,(*(rxr->rxdma.dma_tag)->_dmamap_unload)((rxr->rxdma.
dma_tag), (rxbuf->map))
1288 rxbuf->map)(*(rxr->rxdma.dma_tag)->_dmamap_unload)((rxr->rxdma.
dma_tag), (rxbuf->map))
;
1289 m_freem(rxbuf->buf);
1290 rxbuf->buf = NULL((void *)0);
1291 }
1292 bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map)(*(rxr->rxdma.dma_tag)->_dmamap_destroy)((rxr->rxdma
.dma_tag), (rxbuf->map))
;
1293 rxbuf->map = NULL((void *)0);
1294 }
1295 free(rxr->rx_buffers, M_DEVBUF2,
1296 sc->num_rx_desc * sizeof(struct ngbe_rx_buf));
1297 rxr->rx_buffers = NULL((void *)0);
1298 }
1299}
1300
1301void
1302ngbe_free_transmit_structures(struct ngbe_softc *sc)
1303{
1304 struct tx_ring *txr = sc->tx_rings;
1305 int i;
1306
1307 for (i = 0; i < sc->sc_nqueues; i++, txr++)
1308 ngbe_free_transmit_buffers(txr);
1309}
1310
1311void
1312ngbe_free_transmit_buffers(struct tx_ring *txr)
1313{
1314 struct ngbe_softc *sc = txr->sc;
1315 struct ngbe_tx_buf *tx_buffer;
1316 int i;
1317
1318 if (txr->tx_buffers == NULL((void *)0))
1319 return;
1320
1321 tx_buffer = txr->tx_buffers;
1322 for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
1323 if (tx_buffer->map != NULL((void *)0) && tx_buffer->map->dm_nsegs > 0) {
1324 bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (tx_buffer->map), (0), (tx_buffer->map->dm_mapsize
), (0x08))
1325 0, tx_buffer->map->dm_mapsize,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (tx_buffer->map), (0), (tx_buffer->map->dm_mapsize
), (0x08))
1326 BUS_DMASYNC_POSTWRITE)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (tx_buffer->map), (0), (tx_buffer->map->dm_mapsize
), (0x08))
;
1327 bus_dmamap_unload(txr->txdma.dma_tag, tx_buffer->map)(*(txr->txdma.dma_tag)->_dmamap_unload)((txr->txdma.
dma_tag), (tx_buffer->map))
;
1328 }
1329 if (tx_buffer->m_head != NULL((void *)0)) {
1330 m_freem(tx_buffer->m_head);
1331 tx_buffer->m_head = NULL((void *)0);
1332 }
1333 if (tx_buffer->map != NULL((void *)0)) {
1334 bus_dmamap_destroy(txr->txdma.dma_tag, tx_buffer->map)(*(txr->txdma.dma_tag)->_dmamap_destroy)((txr->txdma
.dma_tag), (tx_buffer->map))
;
1335 tx_buffer->map = NULL((void *)0);
1336 }
1337 }
1338
1339 if (txr->tx_buffers != NULL((void *)0))
1340 free(txr->tx_buffers, M_DEVBUF2,
1341 sc->num_tx_desc * sizeof(struct ngbe_tx_buf));
1342 txr->tx_buffers = NULL((void *)0);
1343 txr->txtag = NULL((void *)0);
1344}
1345
1346int
1347ngbe_allocate_receive_buffers(struct rx_ring *rxr)
1348{
1349 struct ngbe_softc *sc = rxr->sc;
1350 struct ngbe_rx_buf *rxbuf;
1351 int i, error;
1352
1353 rxr->rx_buffers = mallocarray(sc->num_rx_desc,
1354 sizeof(struct ngbe_rx_buf), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
1355 if (rxr->rx_buffers == NULL((void *)0)) {
1356 printf("%s: unable to allocate rx_buffer memory\n",
1357 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1358 error = ENOMEM12;
1359 goto fail;
1360 }
1361
1362 rxbuf = rxr->rx_buffers;
1363 for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
1364 error = bus_dmamap_create(rxr->rxdma.dma_tag,(*(rxr->rxdma.dma_tag)->_dmamap_create)((rxr->rxdma.
dma_tag), (9432), (1), (9432), (0), (0x0001), (&rxbuf->
map))
1365 NGBE_MAX_JUMBO_FRAME_SIZE, 1, NGBE_MAX_JUMBO_FRAME_SIZE, 0,(*(rxr->rxdma.dma_tag)->_dmamap_create)((rxr->rxdma.
dma_tag), (9432), (1), (9432), (0), (0x0001), (&rxbuf->
map))
1366 BUS_DMA_NOWAIT, &rxbuf->map)(*(rxr->rxdma.dma_tag)->_dmamap_create)((rxr->rxdma.
dma_tag), (9432), (1), (9432), (0), (0x0001), (&rxbuf->
map))
;
1367 if (error) {
1368 printf("%s: unable to create RX DMA map\n",
1369 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1370 goto fail;
1371 }
1372 }
1373 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize
), (0x01 | 0x04))
1374 rxr->rxdma.dma_map->dm_mapsize,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize
), (0x01 | 0x04))
1375 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (0), (rxr->rxdma.dma_map->dm_mapsize
), (0x01 | 0x04))
;
1376
1377 return 0;
1378fail:
1379 return error;
1380}
1381
1382int
1383ngbe_allocate_transmit_buffers(struct tx_ring *txr)
1384{
1385 struct ngbe_softc *sc = txr->sc;
1386 struct ngbe_tx_buf *txbuf;
1387 int error, i;
1388
1389 txr->tx_buffers = mallocarray(sc->num_tx_desc,
1390 sizeof(struct ngbe_tx_buf), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
1391 if (txr->tx_buffers == NULL((void *)0)) {
1392 printf("%s: unable to allocate tx_buffer memory\n",
1393 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1394 error = ENOMEM12;
1395 goto fail;
1396 }
1397 txr->txtag = txr->txdma.dma_tag;
1398
1399 /* Create the descriptor buffer dma maps. */
1400 for (i = 0; i < sc->num_tx_desc; i++) {
1401 txbuf = &txr->tx_buffers[i];
1402 error = bus_dmamap_create(txr->txdma.dma_tag, NGBE_TSO_SIZE,(*(txr->txdma.dma_tag)->_dmamap_create)((txr->txdma.
dma_tag), (32767), (32), ((1 << 12)), (0), (0x0001), (&
txbuf->map))
1403 NGBE_MAX_SCATTER, PAGE_SIZE, 0, BUS_DMA_NOWAIT,(*(txr->txdma.dma_tag)->_dmamap_create)((txr->txdma.
dma_tag), (32767), (32), ((1 << 12)), (0), (0x0001), (&
txbuf->map))
1404 &txbuf->map)(*(txr->txdma.dma_tag)->_dmamap_create)((txr->txdma.
dma_tag), (32767), (32), ((1 << 12)), (0), (0x0001), (&
txbuf->map))
;
1405 if (error != 0) {
1406 printf("%s: unable to create TX DMA map\n",
1407 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1408 goto fail;
1409 }
1410 }
1411
1412 return 0;
1413fail:
1414 return error;
1415}
1416
1417int
1418ngbe_setup_receive_ring(struct rx_ring *rxr)
1419{
1420 struct ngbe_softc *sc = rxr->sc;
1421 struct ifnet *ifp = &sc->sc_ac.ac_if;
1422 int rsize;
1423
1424 rsize = roundup2(sc->num_rx_desc * sizeof(union ngbe_rx_desc),(((sc->num_rx_desc * sizeof(union ngbe_rx_desc)) + ((1 <<
12)) - 1) & ~(((1 << 12)) - 1))
1425 PAGE_SIZE)(((sc->num_rx_desc * sizeof(union ngbe_rx_desc)) + ((1 <<
12)) - 1) & ~(((1 << 12)) - 1))
;
1426
1427 /* Clear the ring contents. */
1428 bzero((void *)rxr->rx_base, rsize)__builtin_bzero(((void *)rxr->rx_base), (rsize));
1429
1430 if (ngbe_allocate_receive_buffers(rxr))
1431 return ENOMEM12;
1432
1433 /* Setup our descriptor indices. */
1434 rxr->next_to_check = 0;
1435 rxr->last_desc_filled = sc->num_rx_desc - 1;
1436
1437 if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES(1 << 11)) + 1),
1438 sc->num_rx_desc - 1);
1439
1440 ngbe_rxfill(rxr);
1441 if (if_rxr_inuse(&rxr->rx_ring)((&rxr->rx_ring)->rxr_alive) == 0) {
1442 printf("%s: unable to fill any rx descriptors\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1443 return ENOBUFS55;
1444 }
1445
1446 return 0;
1447}
1448
1449int
1450ngbe_setup_transmit_ring(struct tx_ring *txr)
1451{
1452 struct ngbe_softc *sc = txr->sc;
1453
1454 /* Now allocate transmit buffers for the ring. */
1455 if (ngbe_allocate_transmit_buffers(txr))
1456 return ENOMEM12;
1457
1458 /* Clear the old ring contents */
1459 bzero((void *)txr->tx_base,__builtin_bzero(((void *)txr->tx_base), ((sizeof(union ngbe_tx_desc
)) * sc->num_tx_desc))
1460 (sizeof(union ngbe_tx_desc)) * sc->num_tx_desc)__builtin_bzero(((void *)txr->tx_base), ((sizeof(union ngbe_tx_desc
)) * sc->num_tx_desc))
;
1461
1462 /* Reset indices. */
1463 txr->next_avail_desc = 0;
1464 txr->next_to_clean = 0;
1465
1466 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x01 | 0x04))
1467 txr->txdma.dma_map->dm_mapsize,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x01 | 0x04))
1468 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x01 | 0x04))
;
1469
1470 return 0;
1471}
1472
1473int
1474ngbe_setup_receive_structures(struct ngbe_softc *sc)
1475{
1476 struct rx_ring *rxr = sc->rx_rings;
1477 int i;
1478
1479 for (i = 0; i < sc->sc_nqueues; i++, rxr++) {
1480 if (ngbe_setup_receive_ring(rxr))
1481 goto fail;
1482 }
1483
1484 return 0;
1485fail:
1486 ngbe_free_receive_structures(sc);
1487 return ENOBUFS55;
1488}
1489
1490int
1491ngbe_setup_transmit_structures(struct ngbe_softc *sc)
1492{
1493 struct tx_ring *txr = sc->tx_rings;
1494 int i;
1495
1496 for (i = 0; i < sc->sc_nqueues; i++, txr++) {
1497 if (ngbe_setup_transmit_ring(txr))
1498 goto fail;
1499 }
1500
1501 return 0;
1502fail:
1503 ngbe_free_transmit_structures(sc);
1504 return ENOBUFS55;
1505}
1506
1507uint8_t *
1508ngbe_addr_list_itr(struct ngbe_hw *hw, uint8_t **mc_addr_ptr, uint32_t *vmdq)
1509{
1510 uint8_t *addr = *mc_addr_ptr;
1511 uint8_t *newptr;
1512 *vmdq = 0;
1513
1514 newptr = addr + ETHER_ADDR_LEN6;
1515 *mc_addr_ptr = newptr;
1516 return addr;
1517}
1518
1519void
1520ngbe_iff(struct ngbe_softc *sc)
1521{
1522 struct ngbe_hw *hw = &sc->hw;
1523 struct ifnet *ifp = &sc->sc_ac.ac_if;
1524 struct arpcom *ac = &sc->sc_ac;
1525 struct ether_multi *enm;
1526 struct ether_multistep step;
1527 uint32_t fctrl, vlanctrl;
1528 uint8_t *mta, *update_ptr;
1529 int mcnt = 0;
1530
1531 mta = sc->mta;
1532 bzero(mta, sizeof(uint8_t) * ETHER_ADDR_LEN * NGBE_SP_RAR_ENTRIES)__builtin_bzero((mta), (sizeof(uint8_t) * 6 * 32));
1533
1534 fctrl = NGBE_READ_REG_MASK(hw, NGBE_PSR_CTL0x15000,
1535 ~(NGBE_PSR_CTL_UPE0x00000200 | NGBE_PSR_CTL_MPE0x00000100));
1536 vlanctrl = NGBE_READ_REG_MASK(hw, NGBE_PSR_VLAN_CTL0x15088,
1537 ~(NGBE_PSR_VLAN_CTL_VFE0x40000000 | NGBE_PSR_VLAN_CTL_CFIEN0x20000000));
1538 ifp->if_flags &= ~IFF_ALLMULTI0x200;
1539
1540 /* Set all bits that we expect to always be set */
1541 fctrl |= NGBE_PSR_CTL_BAM0x00000400 | NGBE_PSR_CTL_MFE0x00000080;
1542 vlanctrl |= NGBE_PSR_VLAN_CTL_VFE0x40000000;
1543
1544 hw->addr_ctrl.user_set_promisc = 0;
1545 if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0 ||
1546 ac->ac_multicnt > NGBE_SP_RAR_ENTRIES32) {
1547 ifp->if_flags |= IFF_ALLMULTI0x200;
1548 fctrl |= NGBE_PSR_CTL_MPE0x00000100;
1549 if (ifp->if_flags & IFF_PROMISC0x100) {
1550 fctrl |= NGBE_PSR_CTL_UPE0x00000200;
1551 vlanctrl &= ~NGBE_PSR_VLAN_CTL_VFE0x40000000;
1552 }
1553 } else {
1554 ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first
); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step
)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while
( 0)
;
1555 while (enm != NULL((void *)0)) {
1556 bcopy(enm->enm_addrlo, &mta[mcnt * ETHER_ADDR_LEN6],
1557 ETHER_ADDR_LEN6);
1558 mcnt++;
1559
1560 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
1561 }
1562
1563 update_ptr = mta;
1564 hw->mac.ops.update_mc_addr_list(hw, update_ptr, mcnt,
1565 ngbe_addr_list_itr, 1);
1566 }
1567
1568 NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_CTL, vlanctrl)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x15088
), (vlanctrl)))
;
1569 NGBE_WRITE_REG(hw, NGBE_PSR_CTL, fctrl)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x15000
), (fctrl)))
;
1570}
1571
1572int
1573ngbe_initialize_receive_unit(struct ngbe_softc *sc)
1574{
1575 struct ngbe_hw *hw = &sc->hw;
1576 struct rx_ring *rxr = sc->rx_rings;
1577 uint32_t bufsz, mhadd, rxctrl, rxdctl, srrctl;
1578 int i, wait_loop = NGBE_MAX_RX_DESC_POLL10;
1579 int error = 0;
1580
1581 /* Disable receives while setting up the descriptors */
1582 hw->mac.ops.disable_rx(hw);
1583
1584 ngbe_setup_psrtype(hw);
1585
1586 /* Enable hw crc stripping */
1587 NGBE_WRITE_REG_MASK(hw, NGBE_RSEC_CTL0x17000, NGBE_RSEC_CTL_CRC_STRIP0x00000004,
1588 NGBE_RSEC_CTL_CRC_STRIP0x00000004);
1589
1590 if (sc->sc_nqueues > 1) {
1591 NGBE_WRITE_REG_MASK(hw, NGBE_PSR_CTL0x15000, NGBE_PSR_CTL_PCSD0x00002000,
1592 NGBE_PSR_CTL_PCSD0x00002000);
1593 ngbe_initialize_rss_mapping(sc);
1594 }
1595
1596 mhadd = NGBE_READ_REG(hw, NGBE_PSR_MAX_SZ)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x15020
)))
;
1597 if (mhadd != NGBE_MAX_JUMBO_FRAME_SIZE9432)
1598 NGBE_WRITE_REG(hw, NGBE_PSR_MAX_SZ, NGBE_MAX_JUMBO_FRAME_SIZE)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x15020
), (9432)))
;
1599
1600 bufsz = MCLBYTES(1 << 11) >> NGBE_PX_RR_CFG_BSIZEPKT_SHIFT2;
1601
1602 for (i = 0; i < sc->sc_nqueues; i++, rxr++) {
1603 uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
1604
1605 /* Disable queue to avoid issues while updating state */
1606 NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i)(0x01010 + ((i) * 0x40)),
1607 NGBE_PX_RR_CFG_RR_EN0x00000001, 0);
1608
1609 /* Hardware may take up to 100us to actually disable Rx queue */
1610 do {
1611 DELAY(10)(*delay_func)(10);
1612 rxdctl = NGBE_READ_REG(hw, NGBE_PX_RR_CFG(i))((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x01010
+ ((i) * 0x40)))))
;
1613 } while (--wait_loop && (rxdctl & NGBE_PX_RR_CFG_RR_EN0x00000001));
1614 if (!wait_loop) {
1615 printf("%s: Rx queue %d not cleared within "
1616 "the polling period\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), i);
1617 error = ETIMEDOUT60;
1618 goto out;
1619 }
1620
1621 NGBE_WRITE_REG(hw, NGBE_PX_RR_BAL(i),((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x01000
+ ((i) * 0x40))), ((rdba & 0x00000000ffffffffULL))))
1622 (rdba & 0x00000000ffffffffULL))((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x01000
+ ((i) * 0x40))), ((rdba & 0x00000000ffffffffULL))))
;
1623 NGBE_WRITE_REG(hw, NGBE_PX_RR_BAH(i), (rdba >> 32))((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x01004
+ ((i) * 0x40))), ((rdba >> 32))))
;
1624
1625 rxdctl = NGBE_READ_REG(hw, NGBE_PX_RR_CFG(i))((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x01010
+ ((i) * 0x40)))))
;
1626 rxdctl |=
1627 (sc->num_rx_desc / 128) << NGBE_PX_RR_CFG_RR_SIZE_SHIFT1;
1628 rxdctl |= 0x1 << NGBE_PX_RR_CFG_RR_THER_SHIFT16;
1629 NGBE_WRITE_REG(hw, NGBE_PX_RR_CFG(i), rxdctl)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x01010
+ ((i) * 0x40))), (rxdctl)))
;
1630
1631 /* Reset head and tail pointers */
1632 NGBE_WRITE_REG(hw, NGBE_PX_RR_RP(i), 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x0100c
+ ((i) * 0x40))), (0)))
;
1633 NGBE_WRITE_REG(hw, NGBE_PX_RR_WP(i), 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x01008
+ ((i) * 0x40))), (0)))
;
1634
1635 /* Set up the SRRCTL register */
1636 srrctl = NGBE_READ_REG_MASK(hw, NGBE_PX_RR_CFG(i)(0x01010 + ((i) * 0x40)),
1637 ~(NGBE_PX_RR_CFG_RR_HDR_SZ0x0000f000 | NGBE_PX_RR_CFG_RR_BUF_SZ0x00000f00 |
1638 NGBE_PX_RR_CFG_SPLIT_MODE0x04000000));
1639 srrctl |= bufsz;
1640 NGBE_WRITE_REG(hw, NGBE_PX_RR_CFG(i), srrctl)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x01010
+ ((i) * 0x40))), (srrctl)))
;
1641
1642 /* Enable receive descriptor ring */
1643 NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i)(0x01010 + ((i) * 0x40)),
1644 NGBE_PX_RR_CFG_RR_EN0x00000001, NGBE_PX_RR_CFG_RR_EN0x00000001);
1645
1646 do {
1647 msec_delay(1)(*delay_func)(1000 * (1));
1648 rxdctl = NGBE_READ_REG(hw, NGBE_PX_RR_CFG(i))((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x01010
+ ((i) * 0x40)))))
;
1649 } while (--wait_loop && !(rxdctl & NGBE_PX_RR_CFG_RR_EN0x00000001));
1650 if (!wait_loop) {
1651 printf("%s: Rx queue %d not set within "
1652 "the polling period\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), i);
1653 error = ETIMEDOUT60;
1654 goto out;
1655 }
1656 NGBE_WRITE_REG(hw, NGBE_PX_RR_WP(i), rxr->last_desc_filled)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x01008
+ ((i) * 0x40))), (rxr->last_desc_filled)))
;
1657 }
1658
1659 /* Enable all receives */
1660 rxctrl = NGBE_READ_REG(hw, NGBE_RDB_PB_CTL)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x19000
)))
;
1661 rxctrl |= NGBE_RDB_PB_CTL_PBEN0x80000000;
1662 hw->mac.ops.enable_rx_dma(hw, rxctrl);
1663out:
1664 return error;
1665}
1666
1667void
1668ngbe_initialize_rss_mapping(struct ngbe_softc *sc)
1669{
1670 struct ngbe_hw *hw = &sc->hw;
1671 uint32_t reta = 0, rss_field, rss_key[10];
1672 int i, j, queue_id;
1673
1674 /* Set up the redirection table */
1675 for (i = 0, j = 0; i < 128; i++, j++) {
1676 if (j == sc->sc_nqueues)
1677 j = 0;
1678 queue_id = j;
1679 /*
1680 * The low 8 bits are for hash value (n+0);
1681 * The next 8 bits are for hash value (n+1), etc.
1682 */
1683 reta = reta >> 8;
1684 reta = reta | (((uint32_t)queue_id) << 24);
1685 if ((i & 3) == 3) {
1686 NGBE_WRITE_REG(hw, NGBE_RDB_RSSTBL(i >> 2), reta)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x19400
+ ((i >> 2) * 4))), (reta)))
;
1687 reta = 0;
1688 }
1689 }
1690
1691 /* Set up random bits */
1692 stoeplitz_to_key(&rss_key, sizeof(rss_key));
1693
1694 /* Fill out hash function seeds */
1695 for (i = 0; i < 10; i++)
1696 NGBE_WRITE_REG(hw, NGBE_RDB_RSSRK(i), rss_key[i])((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x19480
+ ((i) * 4))), (rss_key[i])))
;
1697
1698 /* Perform hash on these packet types */
1699 rss_field = NGBE_RDB_RA_CTL_RSS_EN0x00000004 | NGBE_RDB_RA_CTL_RSS_IPV40x00020000 |
1700 NGBE_RDB_RA_CTL_RSS_IPV4_TCP0x00010000 | NGBE_RDB_RA_CTL_RSS_IPV60x00100000 |
1701 NGBE_RDB_RA_CTL_RSS_IPV6_TCP0x00200000;
1702
1703 NGBE_WRITE_REG(hw, NGBE_RDB_RA_CTL, rss_field)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x194f4
), (rss_field)))
;
1704}
1705
1706int
1707ngbe_initialize_transmit_unit(struct ngbe_softc *sc)
1708{
1709 struct ngbe_hw *hw = &sc->hw;
1710 struct ifnet *ifp = &sc->sc_ac.ac_if;
1711 struct tx_ring *txr;
1712 uint64_t tdba;
1713 uint32_t txdctl;
1714 int i, wait_loop = NGBE_MAX_RX_DESC_POLL10;;
1715 int error = 0;
1716
1717 /* TDM_CTL.TE must be before Tx queues are enabled */
1718 NGBE_WRITE_REG_MASK(hw, NGBE_TDM_CTL0x18000, NGBE_TDM_CTL_TE0x1,
1719 NGBE_TDM_CTL_TE0x1);
1720
1721 /* Setup the base and length of the Tx descriptor ring. */
1722 for (i = 0; i < sc->sc_nqueues; i++) {
1723 txr = &sc->tx_rings[i];
1724 tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
1725
1726 /* Disable queue to avoid issues while updating state */
1727 NGBE_WRITE_REG(hw, NGBE_PX_TR_CFG(i), NGBE_PX_TR_CFG_SWFLSH)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x03010
+ ((i) * 0x40))), (0x04000000)))
;
1728 NGBE_WRITE_FLUSH(hw)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10000
)))
;
1729
1730 NGBE_WRITE_REG(hw, NGBE_PX_TR_BAL(i),((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x03000
+ ((i) * 0x40))), ((tdba & 0x00000000ffffffffULL))))
1731 (tdba & 0x00000000ffffffffULL))((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x03000
+ ((i) * 0x40))), ((tdba & 0x00000000ffffffffULL))))
;
1732 NGBE_WRITE_REG(hw, NGBE_PX_TR_BAH(i), (tdba >> 32))((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x03004
+ ((i) * 0x40))), ((tdba >> 32))))
;
1733
1734 /* Reset head and tail pointers */
1735 NGBE_WRITE_REG(hw, NGBE_PX_TR_RP(i), 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x0300c
+ ((i) * 0x40))), (0)))
;
1736 NGBE_WRITE_REG(hw, NGBE_PX_TR_WP(i), 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x03008
+ ((i) * 0x40))), (0)))
;
1737
1738 txr->watchdog_timer = 0;
1739
1740 txdctl = NGBE_PX_TR_CFG_ENABLE(1);
1741 txdctl |= 4 << NGBE_PX_TR_CFG_TR_SIZE_SHIFT1;
1742 txdctl |= 0x20 << NGBE_PX_TR_CFG_WTHRESH_SHIFT16;
1743
1744 /* Enable queue */
1745 NGBE_WRITE_REG(hw, NGBE_PX_TR_CFG(i), txdctl)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x03010
+ ((i) * 0x40))), (txdctl)))
;
1746
1747 /* Poll to verify queue is enabled */
1748 do {
1749 msec_delay(1)(*delay_func)(1000 * (1));
1750 txdctl = NGBE_READ_REG(hw, NGBE_PX_TR_CFG(i))((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x03010
+ ((i) * 0x40)))))
;
1751 } while (--wait_loop && !(txdctl & NGBE_PX_TR_CFG_ENABLE(1)));
1752 if (!wait_loop) {
1753 printf("%s: Tx queue %d not set within "
1754 "the polling period\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), i);
1755 error = ETIMEDOUT60;
1756 goto out;
1757 }
1758 }
1759
1760 ifp->if_timer = 0;
1761
1762 NGBE_WRITE_REG_MASK(hw, NGBE_TSEC_BUF_AE0x1d00c, 0x3ff, 0x10);
1763 NGBE_WRITE_REG_MASK(hw, NGBE_TSEC_CTL0x1d000, 0x2, 0);
1764 NGBE_WRITE_REG_MASK(hw, NGBE_TSEC_CTL0x1d000, 0x1, 1);
1765
1766 /* Enable mac transmitter */
1767 NGBE_WRITE_REG_MASK(hw, NGBE_MAC_TX_CFG0x11000, NGBE_MAC_TX_CFG_TE0x00000001,
1768 NGBE_MAC_TX_CFG_TE0x00000001);
1769out:
1770 return error;
1771}
1772
1773int
1774ngbe_intr_link(void *arg)
1775{
1776 struct ngbe_softc *sc = (struct ngbe_softc *)arg;
1777 uint32_t eicr;
1778
1779 eicr = ngbe_misc_isb(sc, NGBE_ISB_MISC);
1780 if (eicr & (NGBE_PX_MISC_IC_PHY0x00040000 | NGBE_PX_MISC_IC_GPIO0x04000000)) {
1781 KERNEL_LOCK()_kernel_lock();
1782 ngbe_handle_phy_event(sc);
1783 ngbe_update_link_status(sc);
1784 KERNEL_UNLOCK()_kernel_unlock();
1785 }
1786 ngbe_enable_queue(sc, sc->linkvec);
1787 return 1;
1788}
1789
1790int
1791ngbe_intr_queue(void *arg)
1792{
1793 struct ngbe_queue *nq = arg;
1794 struct ngbe_softc *sc = nq->sc;
1795 struct ifnet *ifp = &sc->sc_ac.ac_if;
1796 struct rx_ring *rxr = nq->rxr;
1797 struct tx_ring *txr = nq->txr;
1798
1799 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
1800 ngbe_rxeof(rxr);
1801 ngbe_txeof(txr);
1802 ngbe_rxrefill(rxr);
1803 }
1804
1805 ngbe_enable_queue(sc, nq->msix);
1806
1807 return 1;
1808}
1809
1810void
1811ngbe_init_eeprom_params(struct ngbe_hw *hw)
1812{
1813 struct ngbe_eeprom_info *eeprom = &hw->eeprom;
1814
1815 if (eeprom->type == ngbe_eeprom_uninitialized) {
1816 eeprom->type = ngbe_eeprom_none;
1817
1818 if (!(NGBE_READ_REG(hw, NGBE_SPI_STATUS)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1010c
)))
&
1819 NGBE_SPI_STATUS_FLASH_BYPASS0x80000000))
1820 eeprom->type = ngbe_flash;
1821 }
1822
1823 eeprom->sw_region_offset = 0x80;
1824}
1825
1826int
1827ngbe_init_hw(struct ngbe_softc *sc)
1828{
1829 struct ngbe_hw *hw = &sc->hw;
1830 int status;
1831
1832 /* Reset the hardware */
1833 status = hw->mac.ops.reset_hw(sc);
1834
1835 if (!status)
1836 status = hw->mac.ops.start_hw(sc);
1837
1838 return status;
1839}
1840
1841void
1842ngbe_init_ops(struct ngbe_hw *hw)
1843{
1844 struct ngbe_mac_info *mac = &hw->mac;
1845 struct ngbe_phy_info *phy = &hw->phy;
1846 struct ngbe_eeprom_info *eeprom = &hw->eeprom;
1847
1848 phy->ops.reset = ngbe_phy_reset;
1849 phy->ops.read_reg = ngbe_phy_read_reg;
1850 phy->ops.write_reg = ngbe_phy_write_reg;
1851 phy->ops.setup_link = ngbe_phy_setup_link;
1852 phy->ops.phy_led_ctrl = ngbe_phy_led_ctrl;
1853 phy->ops.check_overtemp = ngbe_phy_check_overtemp;
1854 phy->ops.identify = ngbe_phy_identify;
1855 phy->ops.init = ngbe_phy_init;
1856 phy->ops.check_event = ngbe_phy_check_event;
1857 phy->ops.get_adv_pause = ngbe_phy_get_advertised_pause;
1858 phy->ops.get_lp_adv_pause = ngbe_phy_get_lp_advertised_pause;
1859 phy->ops.set_adv_pause = ngbe_phy_set_pause_advertisement;
1860 phy->ops.setup_once = ngbe_phy_setup;
1861
1862 /* MAC */
1863 mac->ops.init_hw = ngbe_init_hw;
1864 mac->ops.clear_hw_cntrs = ngbe_clear_hw_cntrs;
1865 mac->ops.get_mac_addr = ngbe_get_mac_addr;
1866 mac->ops.stop_adapter = ngbe_stop_adapter;
1867 mac->ops.get_bus_info = ngbe_get_bus_info;
1868 mac->ops.set_lan_id = ngbe_set_lan_id_multi_port_pcie;
1869 mac->ops.acquire_swfw_sync = ngbe_acquire_swfw_sync;
1870 mac->ops.release_swfw_sync = ngbe_release_swfw_sync;
1871 mac->ops.reset_hw = ngbe_reset_hw;
1872 mac->ops.get_media_type = ngbe_get_media_type;
1873 mac->ops.disable_sec_rx_path = ngbe_disable_sec_rx_path;
1874 mac->ops.enable_sec_rx_path = ngbe_enable_sec_rx_path;
1875 mac->ops.enable_rx_dma = ngbe_enable_rx_dma;
1876 mac->ops.start_hw = ngbe_start_hw;
1877
1878 /* RAR, Multicast, VLAN */
1879 mac->ops.set_rar = ngbe_set_rar;
1880 mac->ops.init_rx_addrs = ngbe_init_rx_addrs;
1881 mac->ops.update_mc_addr_list = ngbe_update_mc_addr_list;
1882 mac->ops.enable_rx = ngbe_enable_rx;
1883 mac->ops.disable_rx = ngbe_disable_rx;
1884 mac->ops.clear_vfta = ngbe_clear_vfta;
1885 mac->ops.init_uta_tables = ngbe_init_uta_tables;
1886
1887 /* Flow Control */
1888 mac->ops.fc_enable = ngbe_fc_enable;
1889 mac->ops.setup_fc = ngbe_setup_fc;
1890
1891 /* Link */
1892 mac->ops.check_link = ngbe_check_mac_link;
1893 mac->ops.setup_rxpba = ngbe_set_rxpba;
1894
1895 mac->mcft_size = NGBE_SP_MC_TBL_SIZE128;
1896 mac->vft_size = NGBE_SP_VFT_TBL_SIZE128;
1897 mac->num_rar_entries = NGBE_SP_RAR_ENTRIES32;
1898 mac->rx_pb_size = NGBE_SP_RX_PB_SIZE42;
1899 mac->max_rx_queues = NGBE_SP_MAX_RX_QUEUES8;
1900 mac->max_tx_queues = NGBE_SP_MAX_TX_QUEUES8;
1901
1902 /* EEPROM */
1903 eeprom->ops.init_params = ngbe_init_eeprom_params;
1904 eeprom->ops.eeprom_chksum_cap_st = ngbe_eepromcheck_cap;
1905 eeprom->ops.phy_led_oem_chk = ngbe_phy_led_oem_chk;
1906
1907 /* Manageability interface */
1908 mac->ops.set_fw_drv_ver = ngbe_set_fw_drv_ver;
1909 mac->ops.init_thermal_sensor_thresh = ngbe_init_thermal_sensor_thresh;
1910}
1911
1912void
1913ngbe_init_rx_addrs(struct ngbe_softc *sc)
1914{
1915 struct ngbe_hw *hw = &sc->hw;
1916 uint32_t rar_entries = hw->mac.num_rar_entries;
1917 uint32_t i, psrctl;
1918
1919 /*
1920 * If the current mac address is valid, assume it is a software
1921 * override to the permanent address.
1922 * Otherwise, use the permanent address from the eeprom.
1923 */
1924 if (ngbe_validate_mac_addr(hw->mac.addr)) {
1925 /* Get the MAC address from the RAR0 for later reference */
1926 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1927 }
1928 hw->addr_ctrl.overflow_promisc = 0;
1929 hw->addr_ctrl.rar_used_count = 1;
1930
1931 /* Zero out the other receive addresses. */
1932 for (i = 1; i < rar_entries; i++) {
1933 NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_IDX, i)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x16210
), (i)))
;
1934 NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_AD_L, 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x16200
), (0)))
;
1935 NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_AD_H, 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x16204
), (0)))
;
1936 }
1937
1938 /* Clear the MTA */
1939 hw->addr_ctrl.mta_in_use = 0;
1940 psrctl = NGBE_READ_REG(hw, NGBE_PSR_CTL)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x15000
)))
;
1941 psrctl &= ~(NGBE_PSR_CTL_MO0x00000060 | NGBE_PSR_CTL_MFE0x00000080);
1942 psrctl |= hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT5;
1943 NGBE_WRITE_REG(hw, NGBE_PSR_CTL, psrctl)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x15000
), (psrctl)))
;
1944
1945 for (i = 0; i < hw->mac.mcft_size; i++)
1946 NGBE_WRITE_REG(hw, NGBE_PSR_MC_TBL(i), 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x15200
+ ((i) * 4))), (0)))
;
1947
1948 hw->mac.ops.init_uta_tables(hw);
1949}
1950
1951void
1952ngbe_init_shared_code(struct ngbe_softc *sc)
1953{
1954 struct ngbe_osdep *os = &sc->osdep;
1955 struct pci_attach_args *pa = &os->os_pa;
1956 struct ngbe_hw *hw = &sc->hw;
1957
1958 hw->subsystem_device_id = PCI_PRODUCT(pci_conf_read(pa->pa_pc,(((pci_conf_read(pa->pa_pc, pa->pa_tag, 0x2c)) >>
16) & 0xffff)
1959 pa->pa_tag, PCI_SUBSYS_ID_REG))(((pci_conf_read(pa->pa_pc, pa->pa_tag, 0x2c)) >>
16) & 0xffff)
;
1960
1961 hw->phy.type = ngbe_phy_internal;
1962
1963 NGBE_WRITE_REG(hw, NGBE_MDIO_CLAUSE_SELECT, 0xf)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11220
), (0xf)))
;
1964
1965 ngbe_init_ops(hw);
1966
1967 /* Default flow control settings. */
1968 hw->fc.requested_mode = ngbe_fc_full;
1969 hw->fc.current_mode = ngbe_fc_full;
1970
1971 hw->fc.pause_time = NGBE_DEFAULT_FCPAUSE0xffff;
1972 hw->fc.disable_fc_autoneg = 0;
1973}
1974
1975void
1976ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw)
1977{
1978 /* Only support thermal sensors attached to SP physical port 0 */
1979 if (hw->bus.lan_id)
1980 return;
1981
1982 NGBE_WRITE_REG(hw, NGBE_TS_INT_EN, NGBE_TS_INT_EN_DALARM_INT_EN |((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10314
), (0x00000002 | 0x00000001)))
1983 NGBE_TS_INT_EN_ALARM_INT_EN)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10314
), (0x00000002 | 0x00000001)))
;
1984 NGBE_WRITE_REG(hw, NGBE_TS_EN, NGBE_TS_EN_ENA)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10304
), (0x00000001)))
;
1985
1986 NGBE_WRITE_REG(hw, NGBE_TS_ALARM_THRE, 0x344)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1030c
), (0x344)))
;
1987 NGBE_WRITE_REG(hw, NGBE_TS_DALARM_THRE, 0x330)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10310
), (0x330)))
;
1988}
1989
1990void
1991ngbe_init_uta_tables(struct ngbe_hw *hw)
1992{
1993 int i;
1994
1995 for (i = 0; i < 128; i++)
1996 NGBE_WRITE_REG(hw, NGBE_PSR_UC_TBL(i), 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x15400
+ ((i) * 4))), (0)))
;
1997}
1998
1999void
2000ngbe_fc_autoneg(struct ngbe_softc *sc)
2001{
2002 struct ngbe_hw *hw = &sc->hw;
2003 uint32_t speed;
2004 int link_up;
2005 int error = EINVAL22;
2006
2007 /*
2008 * AN should have completed when the cable was plugged in.
2009 * Look for reasons to bail out. Bail out if:
2010 * - FC autoneg is disabled, or if
2011 * - link is not up.
2012 */
2013 if (hw->fc.disable_fc_autoneg) {
2014 printf("%s: flow control autoneg is disabled\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2015 goto out;
2016 }
2017
2018 hw->mac.ops.check_link(hw, &speed, &link_up, 0);
2019 if (!link_up)
2020 goto out;
2021
2022 switch (hw->phy.media_type) {
2023 /* Autoneg flow control on fiber adapters */
2024 case ngbe_media_type_fiber:
2025 break;
2026
2027 /* Autoneg flow control on copper adapters */
2028 case ngbe_media_type_copper:
2029 error = ngbe_fc_autoneg_copper(sc);
2030 break;
2031 default:
2032 break;
2033 }
2034out:
2035 if (error) {
2036 hw->fc.fc_was_autonegged = 0;
2037 hw->fc.current_mode = hw->fc.requested_mode;
2038 } else
2039 hw->fc.fc_was_autonegged = 1;
2040}
2041
2042int
2043ngbe_fc_autoneg_copper(struct ngbe_softc *sc)
2044{
2045 struct ngbe_hw *hw = &sc->hw;
2046 uint8_t technology_ability_reg, lp_technology_ability_reg;
2047
2048 technology_ability_reg = lp_technology_ability_reg = 0;
2049 if (!((hw->subsystem_device_id & OEM_MASK0x00ff) == RGMII_FPGA0x0080)) {
2050 hw->phy.ops.get_adv_pause(hw, &technology_ability_reg);
2051 hw->phy.ops.get_lp_adv_pause(hw, &lp_technology_ability_reg);
2052 }
2053
2054 return ngbe_negotiate_fc(sc, (uint32_t)technology_ability_reg,
2055 (uint32_t)lp_technology_ability_reg, NGBE_TAF_SYM_PAUSE0x1,
2056 NGBE_TAF_ASM_PAUSE0x2, NGBE_TAF_SYM_PAUSE0x1, NGBE_TAF_ASM_PAUSE0x2);
2057}
2058
2059int
2060ngbe_fc_enable(struct ngbe_softc *sc)
2061{
2062 struct ngbe_hw *hw = &sc->hw;
2063 uint32_t mflcn, fccfg;
2064 uint32_t fcrtl, fcrth;
2065 uint32_t reg;
2066 int error = 0;
2067
2068 /* Validate the water mark configuration */
2069 if (!hw->fc.pause_time) {
2070 error = EINVAL22;
2071 goto out;
2072 }
2073
2074 /* Low water mark of zero causes XOFF floods */
2075 if ((hw->fc.current_mode & ngbe_fc_tx_pause) && hw->fc.high_water) {
2076 if (!hw->fc.low_water ||
2077 hw->fc.low_water >= hw->fc.high_water) {
2078 printf("%s: invalid water mark configuration\n",
2079 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2080 error = EINVAL22;
2081 goto out;
2082 }
2083 }
2084
2085 /* Negotiate the fc mode to use */
2086 ngbe_fc_autoneg(sc);
2087
2088 /* Disable any previous flow control settings */
2089 mflcn = NGBE_READ_REG(hw, NGBE_MAC_RX_FLOW_CTRL)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11090
)))
;
2090 mflcn &= ~NGBE_MAC_RX_FLOW_CTRL_RFE0x00000001;
2091
2092 fccfg = NGBE_READ_REG(hw, NGBE_RDB_RFCC)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x192a4
)))
;
2093 fccfg &= ~NGBE_RDB_RFCC_RFCE_802_3X0x00000008;
2094
2095 /*
2096 * The possible values of fc.current_mode are:
2097 * 0: Flow control is completely disabled
2098 * 1: Rx flow control is enabled (we can receive pause frames,
2099 * but not send pause frames).
2100 * 2: Tx flow control is enabled (we can send pause frames but
2101 * we do not support receiving pause frames).
2102 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2103 * other: Invalid.
2104 */
2105 switch (hw->fc.current_mode) {
2106 case ngbe_fc_none:
2107 /*
2108 * Flow control is disabled by software override or autoneg.
2109 * The code below will actually disable it in the HW.
2110 */
2111 break;
2112 case ngbe_fc_rx_pause:
2113 /*
2114 * Rx Flow control is enabled and Tx Flow control is
2115 * disabled by software override. Since there really
2116 * isn't a way to advertise that we are capable of RX
2117 * Pause ONLY, we will advertise that we support both
2118 * symmetric and asymmetric Rx PAUSE. Later, we will
2119 * disable the adapter's ability to send PAUSE frames.
2120 */
2121 mflcn |= NGBE_MAC_RX_FLOW_CTRL_RFE0x00000001;
2122 break;
2123 case ngbe_fc_tx_pause:
2124 /*
2125 * Tx Flow control is enabled, and Rx Flow control is
2126 * disabled by software override.
2127 */
2128 fccfg |= NGBE_RDB_RFCC_RFCE_802_3X0x00000008;
2129 break;
2130 case ngbe_fc_full:
2131 /* Flow control (both Rx and Tx) is enabled by SW override. */
2132 mflcn |= NGBE_MAC_RX_FLOW_CTRL_RFE0x00000001;
2133 fccfg |= NGBE_RDB_RFCC_RFCE_802_3X0x00000008;
2134 break;
2135 default:
2136 printf("%s: flow control param set incorrectly\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2137 error = EINVAL22;
2138 goto out;
2139 }
2140
2141 /* Set 802.3x based flow control settings. */
2142 NGBE_WRITE_REG(hw, NGBE_MAC_RX_FLOW_CTRL, mflcn)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11090
), (mflcn)))
;
2143 NGBE_WRITE_REG(hw, NGBE_RDB_RFCC, fccfg)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x192a4
), (fccfg)))
;
2144
2145 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2146 if ((hw->fc.current_mode & ngbe_fc_tx_pause) && hw->fc.high_water) {
2147 /* 32Byte granularity */
2148 fcrtl = (hw->fc.low_water << 10) | NGBE_RDB_RFCL_XONE0x80000000;
2149 NGBE_WRITE_REG(hw, NGBE_RDB_RFCL, fcrtl)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x19220
), (fcrtl)))
;
2150 fcrth = (hw->fc.high_water << 10) | NGBE_RDB_RFCH_XOFFE0x80000000;
2151 } else {
2152 NGBE_WRITE_REG(hw, NGBE_RDB_RFCL, 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x19220
), (0)))
;
2153 /*
2154 * In order to prevent Tx hangs when the internal Tx
2155 * switch is enabled we must set the high water mark
2156 * to the Rx packet buffer size - 24KB. This allows
2157 * the Tx switch to function even under heavy Rx
2158 * workloads.
2159 */
2160 fcrth = NGBE_READ_REG(hw, NGBE_RDB_PB_SZ)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x19020
)))
- 24576;
2161 }
2162
2163 NGBE_WRITE_REG(hw, NGBE_RDB_RFCH, fcrth)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x19260
), (fcrth)))
;
2164
2165 /* Configure pause time (2 TCs per register) */
2166 reg = hw->fc.pause_time * 0x00010000;
2167 NGBE_WRITE_REG(hw, NGBE_RDB_RFCV, reg)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x19200
), (reg)))
;
2168
2169 /* Configure flow control refresh threshold value */
2170 NGBE_WRITE_REG(hw, NGBE_RDB_RFCRT, hw->fc.pause_time / 2)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x192a0
), (hw->fc.pause_time / 2)))
;
2171out:
2172 return error;
2173}
2174
2175int
2176ngbe_fmgr_cmd_op(struct ngbe_hw *hw, uint32_t cmd, uint32_t cmd_addr)
2177{
2178 uint32_t val;
2179 int timeout = 0;
2180
2181 val = (cmd << SPI_CLK_CMD_OFFSET28) | cmd_addr |
2182 (SPI_CLK_DIV3 << SPI_CLK_DIV_OFFSET25);
2183 NGBE_WRITE_REG(hw, NGBE_SPI_CMD, val)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10104
), (val)))
;
2184 for (;;) {
2185 if (NGBE_READ_REG(hw, NGBE_SPI_STATUS)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1010c
)))
& 0x1)
2186 break;
2187 if (timeout == SPI_TIME_OUT_VALUE10000)
2188 return ETIMEDOUT60;
2189
2190 timeout++;
2191 DELAY(10)(*delay_func)(10);
2192 }
2193
2194 return 0;
2195}
2196
2197uint32_t
2198ngbe_flash_read_dword(struct ngbe_hw *hw, uint32_t addr)
2199{
2200 int status = ngbe_fmgr_cmd_op(hw, SPI_CMD_READ_DWORD1, addr);
2201 if (status)
2202 return status;
2203
2204 return NGBE_READ_REG(hw, NGBE_SPI_DATA)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10108
)))
;
2205}
2206
2207uint8_t
2208ngbe_calculate_checksum(uint8_t *buffer, uint32_t length)
2209{
2210 uint32_t i;
2211 uint8_t sum = 0;
2212
2213 if (!buffer)
2214 return 0;
2215
2216 for (i = 0; i < length; i++)
2217 sum += buffer[i];
2218 return (uint8_t)(0 - sum);
2219}
2220
2221int
2222ngbe_check_flash_load(struct ngbe_softc *sc, uint32_t check_bit)
2223{
2224 struct ngbe_hw *hw = &sc->hw;
2225 uint32_t reg = 0;
2226 int i, error = 0;
2227
2228 /* if there's flash existing */
2229 if (!(NGBE_READ_REG(hw, NGBE_SPI_STATUS)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1010c
)))
&
2230 NGBE_SPI_STATUS_FLASH_BYPASS0x80000000)) {
2231 /* wait hw load flash done */
2232 for (i = 0; i < NGBE_MAX_FLASH_LOAD_POLL_TIME10; i++) {
2233 reg = NGBE_READ_REG(hw, NGBE_SPI_ILDR_STATUS)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10120
)))
;
2234 if (!(reg & check_bit))
2235 break;
2236 msec_delay(200)(*delay_func)(1000 * (200));
2237 }
2238 if (i == NGBE_MAX_FLASH_LOAD_POLL_TIME10) {
2239 error = ETIMEDOUT60;
2240 printf("%s: hardware loading flash failed\n",
2241 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2242 }
2243 }
2244 return error;
2245}
2246
2247int
2248ngbe_check_internal_phy_id(struct ngbe_softc *sc)
2249{
2250 struct ngbe_hw *hw = &sc->hw;
2251 uint16_t phy_id, phy_id_high, phy_id_low;
2252
2253 ngbe_gphy_wait_mdio_access_on(hw);
2254
2255 ngbe_phy_read_reg(hw, NGBE_MDI_PHY_ID1_OFFSET2, 0, &phy_id_high);
2256 phy_id = phy_id_high << 6;
2257 ngbe_phy_read_reg(hw, NGBE_MDI_PHY_ID2_OFFSET3, 0, &phy_id_low);
2258 phy_id |= (phy_id_low & NGBE_MDI_PHY_ID_MASK0xfffffc00) >> 10;
2259
2260 if (NGBE_INTERNAL_PHY_ID0x000732 != phy_id) {
2261 printf("%s: internal phy id 0x%x not supported\n",
2262 DEVNAME(sc)((sc)->sc_dev.dv_xname), phy_id);
2263 return ENOTSUP91;
2264 } else
2265 hw->phy.id = (uint32_t)phy_id;
2266
2267 return 0;
2268}
2269
2270int
2271ngbe_check_mac_link(struct ngbe_hw *hw, uint32_t *speed, int *link_up,
2272 int link_up_wait_to_complete)
2273{
2274 uint32_t status = 0;
2275 uint16_t speed_sta, value = 0;
2276 int i;
2277
2278 if ((hw->subsystem_device_id & OEM_MASK0x00ff) == RGMII_FPGA0x0080) {
2279 *link_up = 1;
2280 *speed = NGBE_LINK_SPEED_1GB_FULL2;
2281 return status;
2282 }
2283
2284 if (link_up_wait_to_complete) {
2285 for (i = 0; i < NGBE_LINK_UP_TIME90; i++) {
2286 status = hw->phy.ops.read_reg(hw,
2287 NGBE_MDIO_AUTO_NEG_STATUS0x1a,
2288 NGBE_INTERNAL_PHY_PAGE_OFFSET0xa43, &value);
2289 if (!status && (value & 0x4)) {
2290 *link_up = 1;
2291 break;
2292 } else
2293 *link_up = 0;
2294 msec_delay(100)(*delay_func)(1000 * (100));
2295 }
2296 } else {
2297 status = hw->phy.ops.read_reg(hw, NGBE_MDIO_AUTO_NEG_STATUS0x1a,
2298 NGBE_INTERNAL_PHY_PAGE_OFFSET0xa43, &value);
2299 if (!status && (value & 0x4))
2300 *link_up = 1;
2301 else
2302 *link_up = 0;
2303 }
2304
2305 speed_sta = value & 0x38;
2306 if (*link_up) {
2307 if (speed_sta == 0x28)
2308 *speed = NGBE_LINK_SPEED_1GB_FULL2;
2309 else if (speed_sta == 0x18)
2310 *speed = NGBE_LINK_SPEED_100_FULL1;
2311 else if (speed_sta == 0x8)
2312 *speed = NGBE_LINK_SPEED_10_FULL8;
2313 } else
2314 *speed = NGBE_LINK_SPEED_UNKNOWN0;
2315
2316 return status;
2317}
2318
2319int
2320ngbe_check_mng_access(struct ngbe_hw *hw)
2321{
2322 if (!ngbe_mng_present(hw))
2323 return 0;
2324 return 1;
2325}
2326
2327int
2328ngbe_check_reset_blocked(struct ngbe_softc *sc)
2329{
2330 uint32_t mmngc;
2331
2332 mmngc = NGBE_READ_REG(&sc->hw, NGBE_MIS_ST)((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), (0x10028)))
;
2333 if (mmngc & NGBE_MIS_ST_MNG_VETO0x00000100) {
2334 printf("%s: MNG_VETO bit detected\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2335 return 1;
2336 }
2337
2338 return 0;
2339}
2340
2341void
2342ngbe_clear_hw_cntrs(struct ngbe_hw *hw)
2343{
2344 uint16_t i;
2345
2346 NGBE_READ_REG(hw, NGBE_RX_CRC_ERROR_FRAMES_LOW)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11928
)))
;
2347 NGBE_READ_REG(hw, NGBE_RX_LEN_ERROR_FRAMES_LOW)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11978
)))
;
2348 NGBE_READ_REG(hw, NGBE_RDB_LXONTXC)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1921c
)))
;
2349 NGBE_READ_REG(hw, NGBE_RDB_LXOFFTXC)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x19218
)))
;
2350 NGBE_READ_REG(hw, NGBE_MAC_LXOFFRXC)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11988
)))
;
2351
2352 for (i = 0; i < 8; i++) {
2353 NGBE_WRITE_REG_MASK(hw, NGBE_MMC_CONTROL0x11800, NGBE_MMC_CONTROL_UP0x700,
2354 i << 16);
2355 NGBE_READ_REG(hw, NGBE_MAC_PXOFFRXC)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x119dc
)))
;
2356 }
2357
2358 NGBE_READ_REG(hw, NGBE_PX_GPRC)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x12504
)))
;
2359 NGBE_READ_REG(hw, NGBE_PX_GPTC)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x18308
)))
;
2360 NGBE_READ_REG(hw, NGBE_PX_GORC_MSB)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1250c
)))
;
2361 NGBE_READ_REG(hw, NGBE_PX_GOTC_MSB)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x18310
)))
;
2362
2363 NGBE_READ_REG(hw, NGBE_RX_BC_FRAMES_GOOD_LOW)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11918
)))
;
2364 NGBE_READ_REG(hw, NGBE_RX_UNDERSIZE_FRAMES_GOOD)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11938
)))
;
2365 NGBE_READ_REG(hw, NGBE_RX_OVERSIZE_FRAMES_GOOD)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1193c
)))
;
2366 NGBE_READ_REG(hw, NGBE_RX_FRAME_CNT_GOOD_BAD_LOW)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11900
)))
;
2367 NGBE_READ_REG(hw, NGBE_TX_FRAME_CNT_GOOD_BAD_LOW)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1181c
)))
;
2368 NGBE_READ_REG(hw, NGBE_TX_MC_FRAMES_GOOD_LOW)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1182c
)))
;
2369 NGBE_READ_REG(hw, NGBE_TX_BC_FRAMES_GOOD_LOW)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11824
)))
;
2370 NGBE_READ_REG(hw, NGBE_RDM_DRP_PKT)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x12500
)))
;
2371}
2372
2373void
2374ngbe_clear_vfta(struct ngbe_hw *hw)
2375{
2376 uint32_t offset;
2377
2378 for (offset = 0; offset < hw->mac.vft_size; offset++) {
2379 NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_TBL(offset), 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x16000
+ ((offset) * 4))), (0)))
;
2380 /* Errata 5 */
2381 hw->mac.vft_shadow[offset] = 0;
2382 }
2383
2384 for (offset = 0; offset < NGBE_PSR_VLAN_SWC_ENTRIES32; offset++) {
2385 NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_SWC_IDX, offset)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x16230
), (offset)))
;
2386 NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_SWC, 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x16220
), (0)))
;
2387 NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_SWC_VM_L, 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x16224
), (0)))
;
2388 }
2389}
2390
2391void
2392ngbe_configure_ivars(struct ngbe_softc *sc)
2393{
2394 struct ngbe_queue *nq = sc->queues;
2395 uint32_t newitr;
2396 int i;
2397
2398 /* Populate MSIX to EITR select */
2399 NGBE_WRITE_REG(&sc->hw, NGBE_PX_ITRSEL, 0)((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00180), (0)))
;
2400
2401 newitr = (4000000 / NGBE_MAX_INTS_PER_SEC8000) & NGBE_MAX_EITR0x00007ffc;
2402 newitr |= NGBE_PX_ITR_CNT_WDIS0x80000000;
2403
2404 for (i = 0; i < sc->sc_nqueues; i++, nq++) {
2405 /* Rx queue entry */
2406 ngbe_set_ivar(sc, i, nq->msix, 0);
2407 /* Tx queue entry */
2408 ngbe_set_ivar(sc, i, nq->msix, 1);
2409 NGBE_WRITE_REG(&sc->hw, NGBE_PX_ITR(nq->msix), newitr)((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x00200 + (nq->msix) * 4)), (newitr)))
;
2410 }
2411
2412 /* For the Link interrupt */
2413 ngbe_set_ivar(sc, 0, sc->linkvec, -1);
2414 NGBE_WRITE_REG(&sc->hw, NGBE_PX_ITR(sc->linkvec), 1950)((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x00200 + (sc->linkvec) * 4)), (1950)))
;
2415}
2416
2417void
2418ngbe_configure_pb(struct ngbe_softc *sc)
2419{
2420 struct ngbe_hw *hw = &sc->hw;
2421
2422 hw->mac.ops.setup_rxpba(hw, 0, 0, PBA_STRATEGY_EQUAL0);
2423 ngbe_pbthresh_setup(sc);
2424}
2425
2426void
2427ngbe_disable_intr(struct ngbe_softc *sc)
2428{
2429 struct ngbe_queue *nq;
2430 int i;
2431
2432 NGBE_WRITE_REG(&sc->hw, NGBE_PX_MISC_IEN, 0)((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00108), (0)))
;
2433 for (i = 0, nq = sc->queues; i < sc->sc_nqueues; i++, nq++)
2434 ngbe_disable_queue(sc, nq->msix);
2435 NGBE_WRITE_FLUSH(&sc->hw)((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), (0x10000)))
;
2436}
2437
2438int
2439ngbe_disable_pcie_master(struct ngbe_softc *sc)
2440{
2441 int i, error = 0;
2442
2443 /* Exit if master requests are blocked */
2444 if (!(NGBE_READ_REG(&sc->hw, NGBE_PX_TRANSACTION_PENDING)((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00168)))
))
2445 goto out;
2446
2447 /* Poll for master request bit to clear */
2448 for (i = 0; i < NGBE_PCI_MASTER_DISABLE_TIMEOUT800; i++) {
2449 DELAY(100)(*delay_func)(100);
2450 if (!(NGBE_READ_REG(&sc->hw, NGBE_PX_TRANSACTION_PENDING)((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00168)))
))
2451 goto out;
2452 }
2453 printf("%s: PCIe transaction pending bit did not clear\n",
2454 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2455 error = ETIMEDOUT60;
2456out:
2457 return error;
2458}
2459
2460void
2461ngbe_disable_queue(struct ngbe_softc *sc, uint32_t vector)
2462{
2463 uint64_t queue = 1ULL << vector;
2464 uint32_t mask;
2465
2466 mask = (queue & 0xffffffff);
2467 if (mask)
2468 NGBE_WRITE_REG(&sc->hw, NGBE_PX_IMS, mask)((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00140), (mask)))
;
2469}
2470
2471void
2472ngbe_disable_rx(struct ngbe_hw *hw)
2473{
2474 uint32_t rxctrl, psrctrl;
2475
2476 rxctrl = NGBE_READ_REG(hw, NGBE_RDB_PB_CTL)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x19000
)))
;
2477 if (rxctrl & NGBE_RDB_PB_CTL_PBEN0x80000000) {
2478 psrctrl = NGBE_READ_REG(hw, NGBE_PSR_CTL)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x15000
)))
;
2479 if (psrctrl & NGBE_PSR_CTL_SW_EN0x00040000) {
2480 psrctrl &= ~NGBE_PSR_CTL_SW_EN0x00040000;
2481 NGBE_WRITE_REG(hw, NGBE_PSR_CTL, psrctrl)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x15000
), (psrctrl)))
;
2482 hw->mac.set_lben = 1;
2483 } else
2484 hw->mac.set_lben = 0;
2485 rxctrl &= ~NGBE_RDB_PB_CTL_PBEN0x80000000;
2486 NGBE_WRITE_REG(hw, NGBE_RDB_PB_CTL, rxctrl)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x19000
), (rxctrl)))
;
2487
2488 NGBE_WRITE_REG_MASK(hw, NGBE_MAC_RX_CFG0x11004, NGBE_MAC_RX_CFG_RE0x00000001,
2489 0);
2490 }
2491}
2492
2493void
2494ngbe_disable_sec_rx_path(struct ngbe_hw *hw)
2495{
2496 uint32_t secrxreg;
2497 int i;
2498
2499 NGBE_WRITE_REG_MASK(hw, NGBE_RSEC_CTL0x17000, NGBE_RSEC_CTL_RX_DIS0x00000002,
2500 NGBE_RSEC_CTL_RX_DIS0x00000002);
2501 for (i = 0; i < 40; i++) {
2502 secrxreg = NGBE_READ_REG(hw, NGBE_RSEC_ST)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x17004
)))
;
2503 if (secrxreg & NGBE_RSEC_ST_RSEC_RDY0x00000001)
2504 break;
2505 else
2506 DELAY(1000)(*delay_func)(1000);
2507 }
2508}
2509
2510int
2511ngbe_eepromcheck_cap(struct ngbe_softc *sc, uint16_t offset, uint32_t *data)
2512{
2513 struct ngbe_hw *hw = &sc->hw;
2514 struct ngbe_hic_read_shadow_ram buffer;
2515 uint32_t tmp;
2516 int status;
2517
2518 buffer.hdr.req.cmd = FW_EEPROM_CHECK_STATUS0xe9;
2519 buffer.hdr.req.buf_lenh = 0;
2520 buffer.hdr.req.buf_lenl = 0;
2521 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM0xff;
2522
2523 /* Convert offset from words to bytes */
2524 buffer.address = 0;
2525 /* one word */
2526 buffer.length = 0;
2527
2528 status = ngbe_host_interface_command(sc, (uint32_t *)&buffer,
2529 sizeof(buffer), NGBE_HI_COMMAND_TIMEOUT5000, 0);
2530 if (status)
2531 return status;
2532
2533 if (ngbe_check_mng_access(hw)) {
2534 tmp = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, 1)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x1e100
+ ((1) << 2)))))
;
2535 if (tmp == NGBE_CHECKSUM_CAP_ST_PASS0x80658383)
2536 status = 0;
2537 else
2538 status = EINVAL22;
2539 } else
2540 status = EINVAL22;
2541
2542 return status;
2543}
2544
2545void
2546ngbe_enable_intr(struct ngbe_softc *sc)
2547{
2548 struct ngbe_hw *hw = &sc->hw;
2549 struct ngbe_queue *nq;
2550 uint32_t mask;
2551 int i;
2552
2553 /* Enable misc interrupt */
2554 mask = NGBE_PX_MISC_IEN_MASK(0x00000100 | 0x00000400 | 0x00001000 | 0x00020000 | 0x00040000
| 0x00080000 | 0x00100000 | 0x00800000 | 0x04000000 | 0x08000000
| 0x40000000 | 0x80000000)
;
2555
2556 mask |= NGBE_PX_MISC_IEN_OVER_HEAT0x10000000;
2557 NGBE_WRITE_REG(hw, NGBE_GPIO_DDR, 0x1)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x14804
), (0x1)))
;
2558 NGBE_WRITE_REG(hw, NGBE_GPIO_INTEN, 0x3)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x14830
), (0x3)))
;
2559 NGBE_WRITE_REG(hw, NGBE_GPIO_INTTYPE_LEVEL, 0x0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x14838
), (0x0)))
;
2560
2561 NGBE_WRITE_REG(hw, NGBE_GPIO_POLARITY, 0x3)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1483c
), (0x3)))
;
2562
2563 NGBE_WRITE_REG(hw, NGBE_PX_MISC_IEN, mask)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x00108
), (mask)))
;
2564
2565 /* Enable all queues */
2566 for (i = 0, nq = sc->queues; i < sc->sc_nqueues; i++, nq++)
2567 ngbe_enable_queue(sc, nq->msix);
2568 NGBE_WRITE_FLUSH(hw)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10000
)))
;
2569
2570 ngbe_enable_queue(sc, sc->linkvec);
2571}
2572
2573void
2574ngbe_enable_queue(struct ngbe_softc *sc, uint32_t vector)
2575{
2576 uint64_t queue = 1ULL << vector;
2577 uint32_t mask;
2578
2579 mask = (queue & 0xffffffff);
2580 if (mask)
2581 NGBE_WRITE_REG(&sc->hw, NGBE_PX_IMC, mask)((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00150), (mask)))
;
2582}
2583
2584void
2585ngbe_enable_rx(struct ngbe_hw *hw)
2586{
2587 uint32_t val;
2588
2589 /* Enable mac receiver */
2590 NGBE_WRITE_REG_MASK(hw, NGBE_MAC_RX_CFG0x11004, NGBE_MAC_RX_CFG_RE0x00000001,
2591 NGBE_MAC_RX_CFG_RE0x00000001);
2592
2593 NGBE_WRITE_REG_MASK(hw, NGBE_RSEC_CTL0x17000, 0x2, 0);
2594
2595 NGBE_WRITE_REG_MASK(hw, NGBE_RDB_PB_CTL0x19000, NGBE_RDB_PB_CTL_PBEN0x80000000,
2596 NGBE_RDB_PB_CTL_PBEN0x80000000);
2597
2598 if (hw->mac.set_lben) {
2599 val = NGBE_READ_REG(hw, NGBE_PSR_CTL)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x15000
)))
;
2600 val |= NGBE_PSR_CTL_SW_EN0x00040000;
2601 NGBE_WRITE_REG(hw, NGBE_PSR_CTL, val)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x15000
), (val)))
;
2602 hw->mac.set_lben = 0;
2603 }
2604}
2605
2606void
2607ngbe_enable_rx_dma(struct ngbe_hw *hw, uint32_t reg)
2608{
2609 /*
2610 * Workaround for emerald silicon errata when enabling the Rx datapath.
2611 * If traffic is incoming before we enable the Rx unit, it could hang
2612 * the Rx DMA unit. Therefore, make sure the security engine is
2613 * completely disabled prior to enabling the Rx unit.
2614 */
2615 hw->mac.ops.disable_sec_rx_path(hw);
2616
2617 if (reg & NGBE_RDB_PB_CTL_PBEN0x80000000)
2618 hw->mac.ops.enable_rx(hw);
2619 else
2620 hw->mac.ops.disable_rx(hw);
2621
2622 hw->mac.ops.enable_sec_rx_path(hw);
2623}
2624
2625void
2626ngbe_enable_sec_rx_path(struct ngbe_hw *hw)
2627{
2628 NGBE_WRITE_REG_MASK(hw, NGBE_RSEC_CTL0x17000, NGBE_RSEC_CTL_RX_DIS0x00000002, 0);
2629 NGBE_WRITE_FLUSH(hw)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10000
)))
;
2630}
2631
2632int
2633ngbe_encap(struct tx_ring *txr, struct mbuf *m)
2634{
2635 struct ngbe_softc *sc = txr->sc;
2636 uint32_t olinfo_status = 0, cmd_type_len;
2637 int i, j, ntxc;
2638 int first, last = 0;
2639 bus_dmamap_t map;
2640 struct ngbe_tx_buf *txbuf;
2641 union ngbe_tx_desc *txd = NULL((void *)0);
11
'txd' initialized to a null pointer value
2642
2643 /* Basic descriptor defines */
2644 cmd_type_len = NGBE_TXD_DTYP_DATA0x00000000 | NGBE_TXD_IFCS0x02000000;
2645
2646 /*
2647 * Important to capture the first descriptor
2648 * used because it will contain the index of
2649 * the one we tell the hardware to report back
2650 */
2651 first = txr->next_avail_desc;
2652 txbuf = &txr->tx_buffers[first];
2653 map = txbuf->map;
2654
2655 /*
2656 * Set the appropriate offload context
2657 * this will becomes the first descriptor.
2658 */
2659 ntxc = ngbe_tx_ctx_setup(txr, m, &cmd_type_len, &olinfo_status);
2660 if (ntxc == -1)
12
Taking false branch
2661 goto fail;
2662
2663 /*
2664 * Map the packet for DMA.
2665 */
2666 switch (bus_dmamap_load_mbuf(txr->txdma.dma_tag, map, m,(*(txr->txdma.dma_tag)->_dmamap_load_mbuf)((txr->txdma
.dma_tag), (map), (m), (0x0001))
13
Control jumps to 'case 0:' at line 2668
2667 BUS_DMA_NOWAIT)(*(txr->txdma.dma_tag)->_dmamap_load_mbuf)((txr->txdma
.dma_tag), (map), (m), (0x0001))
) {
2668 case 0:
2669 break;
14
Execution continues on line 2680
2670 case EFBIG27:
2671 if (m_defrag(m, M_NOWAIT0x0002) == 0 &&
2672 bus_dmamap_load_mbuf(txr->txdma.dma_tag, map, m,(*(txr->txdma.dma_tag)->_dmamap_load_mbuf)((txr->txdma
.dma_tag), (map), (m), (0x0001))
2673 BUS_DMA_NOWAIT)(*(txr->txdma.dma_tag)->_dmamap_load_mbuf)((txr->txdma
.dma_tag), (map), (m), (0x0001))
== 0)
2674 break;
2675 /* FALLTHROUGH */
2676 default:
2677 return 0;
2678 }
2679
2680 i = txr->next_avail_desc + ntxc;
2681 if (i >= sc->num_tx_desc)
15
Assuming 'i' is < field 'num_tx_desc'
16
Taking false branch
2682 i -= sc->num_tx_desc;
2683
2684 for (j = 0; j < map->dm_nsegs; j++) {
17
Assuming 'j' is >= field 'dm_nsegs'
18
Loop condition is false. Execution continues on line 2697
2685 txd = &txr->tx_base[i];
2686
2687 txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr)((__uint64_t)(map->dm_segs[j].ds_addr));
2688 txd->read.cmd_type_len =
2689 htole32(cmd_type_len | map->dm_segs[j].ds_len)((__uint32_t)(cmd_type_len | map->dm_segs[j].ds_len));
2690 txd->read.olinfo_status = htole32(olinfo_status)((__uint32_t)(olinfo_status));
2691 last = i;
2692
2693 if (++i == sc->num_tx_desc)
2694 i = 0;
2695 }
2696
2697 txd->read.cmd_type_len |= htole32(NGBE_TXD_EOP | NGBE_TXD_RS)((__uint32_t)(0x01000000 | 0x08000000));
19
Dereference of null pointer
2698
2699 bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (map), (0), (map->dm_mapsize), (0x04))
2700 BUS_DMASYNC_PREWRITE)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (map), (0), (map->dm_mapsize), (0x04))
;
2701
2702 /* Set the index of the descriptor that will be marked done */
2703 txbuf->m_head = m;
2704 txbuf->eop_index = last;
2705
2706 txr->next_avail_desc = i;
2707
2708 return ntxc + j;
2709
2710fail:
2711 bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map)(*(txr->txdma.dma_tag)->_dmamap_unload)((txr->txdma.
dma_tag), (txbuf->map))
;
2712 return 0;
2713}
2714
2715int
2716ngbe_get_buf(struct rx_ring *rxr, int i)
2717{
2718 struct ngbe_softc *sc = rxr->sc;
2719 struct ngbe_rx_buf *rxbuf;
2720 struct mbuf *m;
2721 union ngbe_rx_desc *rxdesc;
2722 int error;
2723
2724 rxbuf = &rxr->rx_buffers[i];
2725 rxdesc = &rxr->rx_base[i];
2726 if (rxbuf->buf) {
2727 printf("%s: slot %d already has an mbuf\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), i);
2728 return ENOBUFS55;
2729 }
2730
2731 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES + ETHER_ALIGN)m_clget((((void *)0)), (0x0002), ((1 << 11) + 2));
2732 if (!m)
2733 return ENOBUFS55;
2734
2735 m->m_datam_hdr.mh_data += (m->m_extM_dat.MH.MH_dat.MH_ext.ext_size - (MCLBYTES(1 << 11) + ETHER_ALIGN2));
2736 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = MCLBYTES(1 << 11) + ETHER_ALIGN2;
2737
2738 error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map, m,(*(rxr->rxdma.dma_tag)->_dmamap_load_mbuf)((rxr->rxdma
.dma_tag), (rxbuf->map), (m), (0x0001))
2739 BUS_DMA_NOWAIT)(*(rxr->rxdma.dma_tag)->_dmamap_load_mbuf)((rxr->rxdma
.dma_tag), (rxbuf->map), (m), (0x0001))
;
2740 if (error) {
2741 m_freem(m);
2742 return error;
2743 }
2744
2745 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x01
))
2746 rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x01
))
;
2747 rxbuf->buf = m;
2748
2749 rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr)((__uint64_t)(rxbuf->map->dm_segs[0].ds_addr));
2750
2751 return 0;
2752}
2753
2754void
2755ngbe_get_bus_info(struct ngbe_softc *sc)
2756{
2757 struct ngbe_hw *hw = &sc->hw;
2758 uint16_t link_status;
2759
2760 /* Get the negotiated link width and speed from PCI config space */
2761 link_status = ngbe_read_pci_cfg_word(sc, NGBE_PCI_LINK_STATUS0xb2);
2762
2763 ngbe_set_pci_config_data(hw, link_status);
2764}
2765
2766void
2767ngbe_get_copper_link_capabilities(struct ngbe_hw *hw, uint32_t *speed,
2768 int *autoneg)
2769{
2770 *speed = 0;
2771
2772 if (hw->mac.autoneg)
2773 *autoneg = 1;
2774 else
2775 *autoneg = 0;
2776
2777 *speed = NGBE_LINK_SPEED_10_FULL8 | NGBE_LINK_SPEED_100_FULL1 |
2778 NGBE_LINK_SPEED_1GB_FULL2;
2779}
2780
2781int
2782ngbe_get_eeprom_semaphore(struct ngbe_softc *sc)
2783{
2784 struct ngbe_hw *hw = &sc->hw;
2785 uint32_t swsm;
2786 int i, timeout = 2000;
2787 int status = ETIMEDOUT60;
2788
2789 /* Get SMBI software semaphore between device drivers first */
2790 for (i = 0; i < timeout; i++) {
2791 /*
2792 * If the SMBI bit is 0 when we read it, then the bit will be
2793 * set and we have the semaphore.
2794 */
2795 swsm = NGBE_READ_REG(hw, NGBE_MIS_SWSM)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1002c
)))
;
2796 if (!(swsm & NGBE_MIS_SWSM_SMBI1)) {
2797 status = 0;
2798 break;
2799 }
2800 DELAY(50)(*delay_func)(50);
2801 }
2802
2803 if (i == timeout) {
2804 printf("%s: cannot access the eeprom - SMBI semaphore not "
2805 "granted\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2806 /*
2807 * this release is particularly important because our attempts
2808 * above to get the semaphore may have succeeded, and if there
2809 * was a timeout, we should unconditionally clear the semaphore
2810 * bits to free the driver to make progress.
2811 */
2812 ngbe_release_eeprom_semaphore(hw);
2813 DELAY(50)(*delay_func)(50);
2814
2815 /*
2816 * One last try if the SMBI bit is 0 when we read it,
2817 * then the bit will be set and we have the semaphore.
2818 */
2819 swsm = NGBE_READ_REG(hw, NGBE_MIS_SWSM)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1002c
)))
;
2820 if (!(swsm & NGBE_MIS_SWSM_SMBI1))
2821 status = 0;
2822 }
2823
2824 return status;
2825}
2826
2827void
2828ngbe_get_hw_control(struct ngbe_hw *hw)
2829{
2830 /* Let firmware know the driver has taken over */
2831 NGBE_WRITE_REG_MASK(hw, NGBE_CFG_PORT_CTL0x14400,
2832 NGBE_CFG_PORT_CTL_DRV_LOAD0x00000008, NGBE_CFG_PORT_CTL_DRV_LOAD0x00000008);
2833}
2834
2835void
2836ngbe_release_hw_control(struct ngbe_softc *sc)
2837{
2838 /* Let firmware take over control of hw. */
2839 NGBE_WRITE_REG_MASK(&sc->hw, NGBE_CFG_PORT_CTL0x14400,
2840 NGBE_CFG_PORT_CTL_DRV_LOAD0x00000008, 0);
2841}
2842
2843void
2844ngbe_get_mac_addr(struct ngbe_hw *hw, uint8_t *mac_addr)
2845{
2846 uint32_t rar_high, rar_low;
2847 int i;
2848
2849 NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_IDX, 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x16210
), (0)))
;
2850 rar_high = NGBE_READ_REG(hw, NGBE_PSR_MAC_SWC_AD_H)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x16204
)))
;
2851 rar_low = NGBE_READ_REG(hw, NGBE_PSR_MAC_SWC_AD_L)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x16200
)))
;
2852
2853 for (i = 0; i < 2; i++)
2854 mac_addr[i] = (uint8_t)(rar_high >> (1 - i) * 8);
2855
2856 for (i = 0; i < 4; i++)
2857 mac_addr[i + 2] = (uint8_t)(rar_low >> (3 - i) * 8);
2858}
2859
2860enum ngbe_media_type
2861ngbe_get_media_type(struct ngbe_hw *hw)
2862{
2863 enum ngbe_media_type media_type = ngbe_media_type_copper;
2864
2865 return media_type;
2866}
2867
2868void
2869ngbe_gphy_dis_eee(struct ngbe_hw *hw)
2870{
2871 uint16_t val = 0;
2872
2873 hw->phy.ops.write_reg(hw, 0x11, 0xa4b, 0x1110);
2874 hw->phy.ops.write_reg(hw, MII_MMDACR0x0d, 0x0, MMDACR_FN_ADDRESS(0 << 14) | 0x07);
2875 hw->phy.ops.write_reg(hw, MII_MMDAADR0x0e, 0x0, 0x003c);
2876 hw->phy.ops.write_reg(hw, MII_MMDACR0x0d, 0x0, MMDACR_FN_DATANPI(1 << 14) | 0x07);
2877 hw->phy.ops.write_reg(hw, MII_MMDAADR0x0e, 0x0, 0);
2878
2879 /* Disable 10/100M Half Duplex */
2880 msec_delay(100)(*delay_func)(1000 * (100));
2881 hw->phy.ops.read_reg(hw, MII_ANAR0x04, 0, &val);
2882 val &= ~(ANAR_TX0x0080 | ANAR_100x0020);
2883 hw->phy.ops.write_reg(hw, MII_ANAR0x04, 0x0, val);
2884}
2885
2886void
2887ngbe_gphy_efuse_calibration(struct ngbe_softc *sc)
2888{
2889 struct ngbe_hw *hw = &sc->hw;
2890 uint32_t efuse[2];
2891
2892 ngbe_gphy_wait_mdio_access_on(hw);
2893
2894 efuse[0] = sc->gphy_efuse[0];
2895 efuse[1] = sc->gphy_efuse[1];
2896
2897 if (!efuse[0] && !efuse[1])
2898 efuse[0] = efuse[1] = 0xffffffff;
2899
2900 /* Calibration */
2901 efuse[0] |= 0xf0000100;
2902 efuse[1] |= 0xff807fff;
2903
2904 /* EODR, Efuse Output Data Register */
2905 ngbe_phy_write_reg(hw, 16, 0xa46, (efuse[0] >> 0) & 0xffff);
2906 ngbe_phy_write_reg(hw, 17, 0xa46, (efuse[0] >> 16) & 0xffff);
2907 ngbe_phy_write_reg(hw, 18, 0xa46, (efuse[1] >> 0) & 0xffff);
2908 ngbe_phy_write_reg(hw, 19, 0xa46, (efuse[1] >> 16) & 0xffff);
2909
2910 /* Set efuse ready */
2911 ngbe_phy_write_reg(hw, 20, 0xa46, 0x01);
2912 ngbe_gphy_wait_mdio_access_on(hw);
2913 ngbe_phy_write_reg(hw, 27, NGBE_INTERNAL_PHY_PAGE_OFFSET0xa43, 0x8011);
2914 ngbe_phy_write_reg(hw, 28, NGBE_INTERNAL_PHY_PAGE_OFFSET0xa43, 0x5737);
2915 ngbe_gphy_dis_eee(hw);
2916}
2917
2918void
2919ngbe_gphy_wait_mdio_access_on(struct ngbe_hw *hw)
2920{
2921 uint16_t val = 0;
2922 int i;
2923
2924 for (i = 0; i < 100; i++) {
2925 ngbe_phy_read_reg(hw, 29, NGBE_INTERNAL_PHY_PAGE_OFFSET0xa43, &val);
2926 if (val & 0x20)
2927 break;
2928 DELAY(1000)(*delay_func)(1000);
2929 }
2930}
2931
2932void
2933ngbe_handle_phy_event(struct ngbe_softc *sc)
2934{
2935 struct ngbe_hw *hw = &sc->hw;
2936 uint32_t reg;
2937
2938 reg = NGBE_READ_REG(hw, NGBE_GPIO_INTSTATUS)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x14840
)))
;
2939 NGBE_WRITE_REG(hw, NGBE_GPIO_EOI, reg)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1484c
), (reg)))
;
2940 if (!((hw->subsystem_device_id & OEM_MASK0x00ff) == RGMII_FPGA0x0080))
2941 hw->phy.ops.check_event(sc);
2942}
2943
2944int
2945ngbe_host_interface_command(struct ngbe_softc *sc, uint32_t *buffer,
2946 uint32_t length, uint32_t timeout, int return_data)
2947{
2948 struct ngbe_hw *hw = &sc->hw;
2949 uint32_t hicr, i, bi, dword_len;
2950 uint32_t hdr_size = sizeof(struct ngbe_hic_hdr);
2951 uint32_t buf[64] = {};
2952 uint16_t buf_len;
2953 int status = 0;
2954
2955 if (length == 0 || length > NGBE_HI_MAX_BLOCK_BYTE_LENGTH256) {
2956 printf("%s: buffer length failure\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2957 return EINVAL22;
2958 }
2959
2960 if (hw->mac.ops.acquire_swfw_sync(sc, NGBE_MNG_SWFW_SYNC_SW_MB0x0004))
2961 return EINVAL22;
2962
2963 /* Calculate length in DWORDs. We must be multiple of DWORD */
2964 if ((length % (sizeof(uint32_t))) != 0) {
2965 printf("%s: buffer length failure, not aligned to dword\n",
2966 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2967 status = EINVAL22;
2968 goto rel_out;
2969 }
2970
2971 if (ngbe_check_mng_access(hw)) {
2972 hicr = NGBE_READ_REG(hw, NGBE_MNG_MBOX_CTL)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1e044
)))
;
2973 if ((hicr & NGBE_MNG_MBOX_CTL_FWRDY0x4))
2974 printf("%s: fwrdy is set before command\n",
2975 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2976 }
2977
2978 dword_len = length >> 2;
2979
2980 /*
2981 * The device driver writes the relevant command block
2982 * into the ram area.
2983 */
2984 for (i = 0; i < dword_len; i++) {
2985 if (ngbe_check_mng_access(hw)) {
2986 NGBE_WRITE_REG_ARRAY(hw, NGBE_MNG_MBOX, i,((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x1e100
+ ((i) << 2))), (((__uint32_t)(buffer[i])))))
2987 htole32(buffer[i]))((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x1e100
+ ((i) << 2))), (((__uint32_t)(buffer[i])))))
;
2988 } else {
2989 status = EINVAL22;
2990 goto rel_out;
2991 }
2992 }
2993
2994 /* Setting this bit tells the ARC that a new command is pending. */
2995 if (ngbe_check_mng_access(hw)) {
2996 NGBE_WRITE_REG_MASK(hw, NGBE_MNG_MBOX_CTL0x1e044,
2997 NGBE_MNG_MBOX_CTL_SWRDY0x1, NGBE_MNG_MBOX_CTL_SWRDY0x1);
2998 } else {
2999 status = EINVAL22;
3000 goto rel_out;
3001 }
3002
3003 for (i = 0; i < timeout; i++) {
3004 if (ngbe_check_mng_access(hw)) {
3005 hicr = NGBE_READ_REG(hw, NGBE_MNG_MBOX_CTL)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1e044
)))
;
3006 if ((hicr & NGBE_MNG_MBOX_CTL_FWRDY0x4))
3007 break;
3008 }
3009 msec_delay(1)(*delay_func)(1000 * (1));
3010 }
3011
3012 buf[0] = NGBE_READ_REG(hw, NGBE_MNG_MBOX)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1e100
)))
;
3013 /* Check command completion */
3014 if (timeout != 0 && i == timeout) {
3015 printf("%s: command has failed with no status valid\n",
3016 DEVNAME(sc)((sc)->sc_dev.dv_xname));
3017 if ((buffer[0] & 0xff) != (~buf[0] >> 24)) {
3018 status = EINVAL22;
3019 goto rel_out;
3020 }
3021 }
3022
3023 if (!return_data)
3024 goto rel_out;
3025
3026 /* Calculate length in DWORDs */
3027 dword_len = hdr_size >> 2;
3028
3029 /* First pull in the header so we know the buffer length */
3030 for (bi = 0; bi < dword_len; bi++) {
3031 if (ngbe_check_mng_access(hw)) {
3032 buffer[bi] = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, bi)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x1e100
+ ((bi) << 2)))))
;
3033 le32_to_cpus(&buffer[bi])do { *((uint32_t *)(&buffer[bi])) = (((__uint32_t)(*(const
uint32_t *)((&buffer[bi]))))); } while (0)
;
3034 } else {
3035 status = EINVAL22;
3036 goto rel_out;
3037 }
3038 }
3039
3040 /* If there is any thing in data position pull it in */
3041 buf_len = ((struct ngbe_hic_hdr *)buffer)->buf_len;
3042 if (buf_len == 0)
3043 goto rel_out;
3044
3045 if (length < buf_len + hdr_size) {
3046 printf("%s: buffer not large enough for reply message\n",
3047 DEVNAME(sc)((sc)->sc_dev.dv_xname));
3048 status = EINVAL22;
3049 goto rel_out;
3050 }
3051
3052 /* Calculate length in DWORDs, add 3 for odd lengths */
3053 dword_len = (buf_len + 3) >> 2;
3054
3055 /* Pull in the rest of the buffer (bi is where we left off) */
3056 for (; bi <= dword_len; bi++) {
3057 if (ngbe_check_mng_access(hw)) {
3058 buffer[bi] = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, bi)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x1e100
+ ((bi) << 2)))))
;
3059 le32_to_cpus(&buffer[bi])do { *((uint32_t *)(&buffer[bi])) = (((__uint32_t)(*(const
uint32_t *)((&buffer[bi]))))); } while (0)
;
3060 } else {
3061 status = EINVAL22;
3062 goto rel_out;
3063 }
3064 }
3065
3066rel_out:
3067 hw->mac.ops.release_swfw_sync(sc, NGBE_MNG_SWFW_SYNC_SW_MB0x0004);
3068 return status;
3069}
3070
3071int
3072ngbe_hpbthresh(struct ngbe_softc *sc)
3073{
3074 uint32_t dv_id, rx_pba;
3075 int kb, link, marker, tc;
3076
3077 /* Calculate max LAN frame size */
3078 tc = link = sc->sc_ac.ac_if.if_mtuif_data.ifi_mtu + ETHER_HDR_LEN((6 * 2) + 2) + ETHER_CRC_LEN4 +
3079 NGBE_ETH_FRAMING20;
3080
3081 /* Calculate delay value for device */
3082 dv_id = NGBE_DV(link, tc)((36 * ((link * 8) + 672 + (2 * 5556) + (2 * (4096 + (2 * 1024
) + 12800)) + 6144) / 25 + 1) + 2 * (tc * 8))
;
3083
3084 /* Delay value is calculated in bit times convert to KB */
3085 kb = NGBE_BT2KB(dv_id)((dv_id + (8 * 1024 - 1)) / (8 * 1024));
3086 rx_pba = NGBE_READ_REG(&sc->hw, NGBE_RDB_PB_SZ)((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), (0x19020)))
>> NGBE_RDB_PB_SZ_SHIFT10;
3087
3088 marker = rx_pba - kb;
3089
3090 return marker;
3091}
3092
3093int
3094ngbe_lpbthresh(struct ngbe_softc *sc)
3095{
3096 uint32_t dv_id;
3097 int tc;
3098
3099 /* Calculate max LAN frame size */
3100 tc = sc->sc_ac.ac_if.if_mtuif_data.ifi_mtu + ETHER_HDR_LEN((6 * 2) + 2) + ETHER_CRC_LEN4;
3101
3102 /* Calculate delay value for device */
3103 dv_id = NGBE_LOW_DV(tc)(2 * (2 * (tc * 8) + (36 * 10000 / 25) + 1));
3104
3105 /* Delay value is calculated in bit times convert to KB */
3106 return NGBE_BT2KB(dv_id)((dv_id + (8 * 1024 - 1)) / (8 * 1024));
3107}
3108
3109int
3110ngbe_mng_present(struct ngbe_hw *hw)
3111{
3112 uint32_t fwsm;
3113
3114 fwsm = NGBE_READ_REG(hw, NGBE_MIS_ST)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10028
)))
;
3115
3116 return fwsm & NGBE_MIS_ST_MNG_INIT_DN0x00000001;
3117}
3118
3119int
3120ngbe_mta_vector(struct ngbe_hw *hw, uint8_t *mc_addr)
3121{
3122 uint32_t vector = 0;
3123 int rshift;
3124
3125 /* pick bits [47:32] of the address. */
3126 vector = mc_addr[4] | (((uint16_t)mc_addr[5]) << 8);
3127 switch (hw->mac.mc_filter_type) {
3128 case 0: /* bits 47:36 */
3129 case 1: /* bits 46:35 */
3130 case 2: /* bits 45:34 */
3131 rshift = 4 - hw->mac.mc_filter_type;
3132 break;
3133 case 3: /* bits 43:32 */
3134 rshift = 0;
3135 break;
3136 default: /* Invalid mc_filter_type */
3137 vector = rshift = 0;
3138 break;
3139 }
3140 vector = (vector >> rshift) & 0x0fff;
3141
3142 return vector;
3143}
3144
3145int
3146ngbe_negotiate_fc(struct ngbe_softc *sc, uint32_t adv_reg, uint32_t lp_reg,
3147 uint32_t adv_sym, uint32_t adv_asm, uint32_t lp_sym, uint32_t lp_asm)
3148{
3149 struct ngbe_hw *hw = &sc->hw;
3150
3151 if ((!(adv_reg)) || (!(lp_reg)))
3152 return EINVAL22;
3153
3154 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
3155 /*
3156 * Now we need to check if the user selected Rx ONLY
3157 * of pause frames. In this case, we had to advertise
3158 * FULL flow control because we could not advertise RX
3159 * ONLY. Hence, we must now check to see if we need to
3160 * turn OFF the TRANSMISSION of PAUSE frames.
3161 */
3162 if (hw->fc.requested_mode == ngbe_fc_full)
3163 hw->fc.current_mode = ngbe_fc_full;
3164 else
3165 hw->fc.current_mode = ngbe_fc_rx_pause;
3166
3167 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
3168 (lp_reg & lp_sym) && (lp_reg & lp_asm))
3169 hw->fc.current_mode = ngbe_fc_tx_pause;
3170 else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
3171 !(lp_reg & lp_sym) && (lp_reg & lp_asm))
3172 hw->fc.current_mode = ngbe_fc_rx_pause;
3173 else
3174 hw->fc.current_mode = ngbe_fc_none;
3175
3176 return 0;
3177}
3178
3179int
3180ngbe_non_sfp_link_config(struct ngbe_softc *sc)
3181{
3182 struct ngbe_hw *hw = &sc->hw;
3183 uint32_t speed;
3184 int error;
3185
3186 if (hw->mac.autoneg)
3187 speed = hw->phy.autoneg_advertised;
3188 else
3189 speed = hw->phy.force_speed;
3190
3191 msec_delay(50)(*delay_func)(1000 * (50));
3192 if (hw->phy.type == ngbe_phy_internal) {
3193 error = hw->phy.ops.setup_once(sc);
3194 if (error)
3195 return error;
3196 }
3197
3198 error = hw->mac.ops.setup_link(sc, speed, 0);
3199 return error;
3200}
3201
3202void
3203ngbe_pbthresh_setup(struct ngbe_softc *sc)
3204{
3205 struct ngbe_hw *hw = &sc->hw;
3206
3207 hw->fc.high_water = ngbe_hpbthresh(sc);
3208 hw->fc.low_water = ngbe_lpbthresh(sc);
3209
3210 /* Low water marks must not be larger than high water marks */
3211 if (hw->fc.low_water > hw->fc.high_water)
3212 hw->fc.low_water = 0;
3213}
3214
3215void
3216ngbe_phy_check_event(struct ngbe_softc *sc)
3217{
3218 struct ngbe_hw *hw = &sc->hw;
3219 uint16_t value = 0;
3220
3221 hw->phy.ops.read_reg(hw, NGBE_MDIO_AUTO_NEG_LSC0x1d,
3222 NGBE_INTERNAL_PHY_PAGE_OFFSET0xa43, &value);
3223}
3224
3225int
3226ngbe_phy_check_overtemp(struct ngbe_hw *hw)
3227{
3228 uint32_t ts_state;
3229 int status = 0;
3230
3231 /* Check that the LASI temp alarm status was triggered */
3232 ts_state = NGBE_READ_REG(hw, NGBE_TS_ALARM_ST)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10318
)))
;
3233
3234 if (ts_state & NGBE_TS_ALARM_ST_ALARM0x00000001)
3235 status = 1;
3236
3237 return status;
3238}
3239
3240void
3241ngbe_phy_get_advertised_pause(struct ngbe_hw *hw, uint8_t *pause_bit)
3242{
3243 uint16_t value;
3244
3245 hw->phy.ops.read_reg(hw, 4, 0, &value);
3246 *pause_bit = (uint8_t)((value >> 10) & 0x3);
3247}
3248
3249void
3250ngbe_phy_get_lp_advertised_pause(struct ngbe_hw *hw, uint8_t *pause_bit)
3251{
3252 uint16_t value;
3253
3254 hw->phy.ops.read_reg(hw, NGBE_MDIO_AUTO_NEG_LSC0x1d,
3255 NGBE_INTERNAL_PHY_PAGE_OFFSET0xa43, &value);
3256 hw->phy.ops.read_reg(hw, MII_BMSR0x01, 0, &value);
3257 value = (value & BMSR_ACOMP0x0020) ? 1 : 0;
3258
3259 /* If AN complete then check lp adv pause */
3260 hw->phy.ops.read_reg(hw, MII_ANLPAR0x05, 0, &value);
3261 *pause_bit = (uint8_t)((value >> 10) & 0x3);
3262}
3263
3264int
3265ngbe_phy_identify(struct ngbe_softc *sc)
3266{
3267 struct ngbe_hw *hw = &sc->hw;
3268 int error;
3269
3270 switch(hw->phy.type) {
3271 case ngbe_phy_internal:
3272 error = ngbe_check_internal_phy_id(sc);
3273 break;
3274 default:
3275 error = ENOTSUP91;
3276 }
3277
3278 return error;
3279}
3280
3281int
3282ngbe_phy_init(struct ngbe_softc *sc)
3283{
3284 struct ngbe_hw *hw = &sc->hw;
3285 uint16_t value;
3286 uint8_t lan_id = hw->bus.lan_id;
3287 int error;
3288
3289 /* Set fwsw semaphore mask for phy first */
3290 if (!hw->phy.phy_semaphore_mask)
3291 hw->phy.phy_semaphore_mask = NGBE_MNG_SWFW_SYNC_SW_PHY0x0001;
3292
3293 /* Init phy.addr according to HW design */
3294 hw->phy.addr = 0;
3295
3296 /* Identify the PHY or SFP module */
3297 error = hw->phy.ops.identify(sc);
3298 if (error == ENOTSUP91)
3299 return error;
3300
3301 /* Enable interrupts, only link status change and an done is allowed */
3302 if (hw->phy.type == ngbe_phy_internal) {
3303 value = NGBE_INTPHY_INT_LSC0x0010 | NGBE_INTPHY_INT_ANC0x0008;
3304 hw->phy.ops.write_reg(hw, 0x12, 0xa42, value);
3305 sc->gphy_efuse[0] =
3306 ngbe_flash_read_dword(hw, 0xfe010 + lan_id * 8);
3307 sc->gphy_efuse[1] =
3308 ngbe_flash_read_dword(hw, 0xfe010 + lan_id * 8 + 4);
3309 }
3310
3311 return error;
3312}
3313
3314void
3315ngbe_phy_led_ctrl(struct ngbe_softc *sc)
3316{
3317 struct ngbe_hw *hw = &sc->hw;
3318 uint16_t value;
3319
3320 if (sc->led_conf != -1)
3321 value = sc->led_conf & 0xffff;
3322 else
3323 value = 0x205b;
3324 hw->phy.ops.write_reg(hw, 16, 0xd04, value);
3325 hw->phy.ops.write_reg(hw, 17, 0xd04, 0);
3326
3327 hw->phy.ops.read_reg(hw, 18, 0xd04, &value);
3328 if (sc->led_conf != -1) {
3329 value &= ~0x73;
3330 value |= sc->led_conf >> 16;
3331 } else {
3332 value &= 0xfffc;
3333 /* Act led blinking mode set to 60ms */
3334 value |= 0x2;
3335 }
3336 hw->phy.ops.write_reg(hw, 18, 0xd04, value);
3337}
3338
3339int
3340ngbe_phy_led_oem_chk(struct ngbe_softc *sc, uint32_t *data)
3341{
3342 struct ngbe_hw *hw = &sc->hw;
3343 struct ngbe_hic_read_shadow_ram buffer;
3344 uint32_t tmp;
3345 int status;
3346
3347 buffer.hdr.req.cmd = FW_PHY_LED_CONF0xf1;
3348 buffer.hdr.req.buf_lenh = 0;
3349 buffer.hdr.req.buf_lenl = 0;
3350 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM0xff;
3351
3352 /* Convert offset from words to bytes */
3353 buffer.address = 0;
3354 /* One word */
3355 buffer.length = 0;
3356
3357 status = ngbe_host_interface_command(sc, (uint32_t *)&buffer,
3358 sizeof(buffer), NGBE_HI_COMMAND_TIMEOUT5000, 0);
3359 if (status)
3360 return status;
3361
3362 if (ngbe_check_mng_access(hw)) {
3363 tmp = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, 1)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x1e100
+ ((1) << 2)))))
;
3364 if (tmp == NGBE_CHECKSUM_CAP_ST_PASS0x80658383) {
3365 tmp = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, 2)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x1e100
+ ((2) << 2)))))
;
3366 *data = tmp;
3367 status = 0;
3368 } else if (tmp == NGBE_CHECKSUM_CAP_ST_FAIL0x70657376) {
3369 *data = tmp;
3370 status = EINVAL22;
3371 } else
3372 status = EINVAL22;
3373 } else {
3374 status = EINVAL22;
3375 return status;
3376 }
3377
3378 return status;
3379}
3380
3381int
3382ngbe_phy_read_reg(struct ngbe_hw *hw, uint32_t off, uint32_t page,
3383 uint16_t *data)
3384{
3385 *data = 0;
3386
3387 if (!((page == NGBE_INTERNAL_PHY_PAGE_OFFSET0xa43) &&
3388 ((off == NGBE_MDIO_AUTO_NEG_STATUS0x1a) ||
3389 (off == NGBE_MDIO_AUTO_NEG_LSC0x1d)))) {
3390 NGBE_WRITE_REG(hw,((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x14000
+ ((31) * 4))), (page)))
3391 NGBE_PHY_CONFIG(NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET),((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x14000
+ ((31) * 4))), (page)))
3392 page)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x14000
+ ((31) * 4))), (page)))
;
3393 }
3394 *data = NGBE_READ_REG(hw, NGBE_PHY_CONFIG(off))((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x14000
+ ((off) * 4)))))
& 0xffff;
3395
3396 return 0;
3397}
3398
3399int
3400ngbe_phy_write_reg(struct ngbe_hw *hw, uint32_t off, uint32_t page,
3401 uint16_t data)
3402{
3403 if (!((page == NGBE_INTERNAL_PHY_PAGE_OFFSET0xa43) &&
3404 ((off == NGBE_MDIO_AUTO_NEG_STATUS0x1a) ||
3405 (off == NGBE_MDIO_AUTO_NEG_LSC0x1d)))) {
3406 NGBE_WRITE_REG(hw,((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x14000
+ ((31) * 4))), (page)))
3407 NGBE_PHY_CONFIG(NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET),((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x14000
+ ((31) * 4))), (page)))
3408 page)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x14000
+ ((31) * 4))), (page)))
;
3409 }
3410 NGBE_WRITE_REG(hw, NGBE_PHY_CONFIG(off), data)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x14000
+ ((off) * 4))), (data)))
;
3411
3412 return 0;
3413}
3414
3415int
3416ngbe_phy_reset(struct ngbe_softc *sc)
3417{
3418 struct ngbe_hw *hw = &sc->hw;
3419 uint16_t value;
3420 int i, status;
3421
3422 /* only support internal phy */
3423 if (hw->phy.type != ngbe_phy_internal) {
3424 printf("%s: operation not supported\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3425 return EINVAL22;
3426 }
3427
3428 /* Don't reset PHY if it's shut down due to overtemp. */
3429 if (!hw->phy.reset_if_overtemp && hw->phy.ops.check_overtemp(hw) != 0) {
3430 printf("%s: overtemp! skip phy reset\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3431 return EINVAL22;
3432 }
3433
3434 /* Blocked by MNG FW so bail */
3435 status = ngbe_check_reset_blocked(sc);
3436 if (status)
3437 return status;
3438
3439 value = NGBE_MDI_PHY_RESET0x8000;
3440 status = hw->phy.ops.write_reg(hw, 0, 0, value);
3441 for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD50; i++) {
3442 status = hw->phy.ops.read_reg(hw, 0, 0, &value);
3443 if (!(value & NGBE_MDI_PHY_RESET0x8000))
3444 break;
3445 msec_delay(1)(*delay_func)(1000 * (1));
3446 }
3447
3448 if (i == NGBE_PHY_RST_WAIT_PERIOD50) {
3449 printf("%s: phy mode reset did not complete\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3450 return ETIMEDOUT60;
3451 }
3452
3453 return status;
3454}
3455
3456int
3457ngbe_phy_set_pause_advertisement(struct ngbe_hw *hw, uint16_t pause_bit)
3458{
3459 uint16_t value;
3460 int status;
3461
3462 status = hw->phy.ops.read_reg(hw, MII_ANAR0x04, 0, &value);
3463 value &= ~0xc00;
3464 value |= pause_bit;
3465 status = hw->phy.ops.write_reg(hw, MII_ANAR0x04, 0, value);
3466 return status;
3467}
3468
3469int
3470ngbe_phy_setup(struct ngbe_softc *sc)
3471{
3472 struct ngbe_hw *hw = &sc->hw;
3473 uint16_t value = 0;
3474 int i;
3475
3476 for (i = 0; i < 15; i++) {
3477 if (!NGBE_READ_REG_MASK(hw, NGBE_MIS_ST0x10028,
3478 NGBE_MIS_ST_GPHY_IN_RST(hw->bus.lan_id)(0x00000200 << (hw->bus.lan_id))))
3479 break;
3480 msec_delay(1)(*delay_func)(1000 * (1));
3481 }
3482 if (i == 15) {
3483 printf("%s: gphy reset exceeds maximum time\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3484 return ETIMEDOUT60;
3485 }
3486
3487 ngbe_gphy_efuse_calibration(sc);
3488 hw->phy.ops.write_reg(hw, 20, 0xa46, 2);
3489 ngbe_gphy_wait_mdio_access_on(hw);
3490
3491 for (i = 0; i < 100; i++) {
3492 hw->phy.ops.read_reg(hw, 16, 0xa42, &value);
3493 if ((value & 0x7) == 3)
3494 break;
3495 DELAY(1000)(*delay_func)(1000);
3496 }
3497 if (i == 100) {
3498 printf("%s: phy reset exceeds maximum time\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3499 return ETIMEDOUT60;
3500 }
3501
3502 return 0;
3503}
3504
3505int
3506ngbe_phy_setup_link(struct ngbe_softc *sc, uint32_t speed, int need_restart)
3507{
3508 struct ngbe_hw *hw = &sc->hw;
3509 uint16_t value = 0;
3510 int status;
3511
3512 if (!hw->mac.autoneg) {
3513 status = hw->phy.ops.reset(sc);
3514 if (status) {
3515 printf("%s: phy reset failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3516 return status;
3517 }
3518
3519 switch (speed) {
3520 case NGBE_LINK_SPEED_1GB_FULL2:
3521 value = NGBE_MDI_PHY_SPEED_SELECT10x0040;
3522 break;
3523 case NGBE_LINK_SPEED_100_FULL1:
3524 value = NGBE_MDI_PHY_SPEED_SELECT00x2000;
3525 break;
3526 case NGBE_LINK_SPEED_10_FULL8:
3527 value = 0;
3528 break;
3529 default:
3530 value = NGBE_MDI_PHY_SPEED_SELECT00x2000 |
3531 NGBE_MDI_PHY_SPEED_SELECT10x0040;
3532 printf("%s: unknown speed = 0x%x\n",
3533 DEVNAME(sc)((sc)->sc_dev.dv_xname), speed);
3534 break;
3535 }
3536 /* duplex full */
3537 value |= NGBE_MDI_PHY_DUPLEX0x0100;
3538 hw->phy.ops.write_reg(hw, 0, 0, value);
3539
3540 goto skip_an;
3541 }
3542
3543 /* Disable 10/100M Half Duplex */
3544 hw->phy.ops.read_reg(hw, 4, 0, &value);
3545 value &= 0xff5f;
3546 hw->phy.ops.write_reg(hw, 4, 0, value);
3547
3548 /* Set advertise enable according to input speed */
3549 hw->phy.ops.read_reg(hw, 9, 0, &value);
3550 if (!(speed & NGBE_LINK_SPEED_1GB_FULL2))
3551 value &= 0xfdff;
3552 else
3553 value |= 0x200;
3554 hw->phy.ops.write_reg(hw, 9, 0, value);
3555
3556 hw->phy.ops.read_reg(hw, 4, 0, &value);
3557 if (!(speed & NGBE_LINK_SPEED_100_FULL1))
3558 value &= 0xfeff;
3559 else
3560 value |= 0x100;
3561 hw->phy.ops.write_reg(hw, 4, 0, value);
3562
3563 hw->phy.ops.read_reg(hw, 4, 0, &value);
3564 if (!(speed & NGBE_LINK_SPEED_10_FULL8))
3565 value &= 0xffbf;
3566 else
3567 value |= 0x40;
3568 hw->phy.ops.write_reg(hw, 4, 0, value);
3569
3570 /* Restart AN and wait AN done interrupt */
3571 value = NGBE_MDI_PHY_RESTART_AN0x0200 | NGBE_MDI_PHY_ANE0x1000;
3572 hw->phy.ops.write_reg(hw, 0, 0, value);
3573
3574skip_an:
3575 hw->phy.ops.phy_led_ctrl(sc);
3576 hw->phy.ops.check_event(sc);
3577
3578 return 0;
3579}
3580
3581uint16_t
3582ngbe_read_pci_cfg_word(struct ngbe_softc *sc, uint32_t reg)
3583{
3584 struct ngbe_osdep *os = &sc->osdep;
3585 struct pci_attach_args *pa = &os->os_pa;
3586 uint32_t value;
3587 int high = 0;
3588
3589 if (reg & 0x2) {
3590 high = 1;
3591 reg &= ~0x2;
3592 }
3593 value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3594
3595 if (high)
3596 value >>= 16;
3597
3598 return (value & 0xffff);
3599}
3600
3601void
3602ngbe_release_eeprom_semaphore(struct ngbe_hw *hw)
3603{
3604 if (ngbe_check_mng_access(hw)) {
3605 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_SWSM0x1002c, NGBE_MIS_SWSM_SMBI1, 0);
3606 NGBE_WRITE_FLUSH(hw)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10000
)))
;
3607 }
3608}
3609
3610int
3611ngbe_acquire_swfw_sync(struct ngbe_softc *sc, uint32_t mask)
3612{
3613 struct ngbe_hw *hw = &sc->hw;
3614 uint32_t gssr = 0;
3615 uint32_t swmask = mask;
3616 uint32_t fwmask = mask << 16;
3617 int i, timeout = 200;
3618
3619 for (i = 0; i < timeout; i++) {
3620 /*
3621 * SW NVM semaphore bit is used for access to all
3622 * SW_FW_SYNC bits (not just NVM)
3623 */
3624 if (ngbe_get_eeprom_semaphore(sc))
3625 return 1;
3626 if (ngbe_check_mng_access(hw)) {
3627 gssr = NGBE_READ_REG(hw, NGBE_MNG_SWFW_SYNC)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1e008
)))
;
3628 if (!(gssr & (fwmask | swmask))) {
3629 gssr |= swmask;
3630 NGBE_WRITE_REG(hw, NGBE_MNG_SWFW_SYNC, gssr)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1e008
), (gssr)))
;
3631 ngbe_release_eeprom_semaphore(hw);
3632 return 0;
3633 } else {
3634 /* Resource is currently in use by FW or SW */
3635 ngbe_release_eeprom_semaphore(hw);
3636 msec_delay(5)(*delay_func)(1000 * (5));
3637 }
3638 }
3639 }
3640
3641 printf("%s: semaphore failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3642
3643 /* If time expired clear the bits holding the lock and retry */
3644 if (gssr & (fwmask | swmask))
3645 ngbe_release_swfw_sync(sc, gssr & (fwmask | swmask));
3646
3647 msec_delay(5)(*delay_func)(1000 * (5));
3648 return 1;
3649}
3650
3651void
3652ngbe_release_swfw_sync(struct ngbe_softc *sc, uint32_t mask)
3653{
3654 struct ngbe_hw *hw = &sc->hw;
3655
3656 ngbe_get_eeprom_semaphore(sc);
3657 if (ngbe_check_mng_access(hw))
3658 NGBE_WRITE_REG_MASK(hw, NGBE_MNG_SWFW_SYNC0x1e008, mask, 0);
3659
3660 ngbe_release_eeprom_semaphore(hw);
3661}
3662
3663void
3664ngbe_reset(struct ngbe_softc *sc)
3665{
3666 struct ngbe_hw *hw = &sc->hw;
3667 int error;
3668
3669 error = hw->mac.ops.init_hw(sc);
3670 switch (error) {
3671 case 0:
3672 break;
3673 default:
3674 printf("%s: hardware error\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3675 break;
3676 }
3677}
3678
3679int
3680ngbe_reset_hw(struct ngbe_softc *sc)
3681{
3682 struct ngbe_hw *hw = &sc->hw;
3683 struct ngbe_mac_info *mac = &hw->mac;
3684 uint32_t i, reset_status, rst_delay;
3685 uint32_t reset = 0;
3686 int status = 0;
3687
3688 status = hw->mac.ops.stop_adapter(sc);
3689 if (status)
3690 goto reset_hw_out;
3691
3692 /* Identify PHY and related function pointers */
3693 if (!((hw->subsystem_device_id & OEM_MASK0x00ff) == RGMII_FPGA0x0080)) {
3694 status = hw->phy.ops.init(sc);
3695 if (status)
3696 goto reset_hw_out;
3697 }
3698
3699 if (ngbe_get_media_type(hw) == ngbe_media_type_copper) {
3700 mac->ops.setup_link = ngbe_setup_copper_link;
3701 mac->ops.get_link_capabilities =
3702 ngbe_get_copper_link_capabilities;
3703 }
3704
3705 /*
3706 * Issue global reset to the MAC. Needs to be SW reset if link is up.
3707 * If link reset is used when link is up, it might reset the PHY when
3708 * mng is using it. If link is down or the flag to force full link
3709 * reset is set, then perform link reset.
3710 */
3711 if (hw->force_full_reset) {
3712 rst_delay = (NGBE_READ_REG(hw, NGBE_MIS_RST_ST)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10030
)))
&
3713 NGBE_MIS_RST_ST_RST_INIT0x0000ff00) >> NGBE_MIS_RST_ST_RST_INI_SHIFT8;
3714 if (hw->reset_type == NGBE_SW_RESET) {
3715 for (i = 0; i < rst_delay + 20; i++) {
3716 reset_status =
3717 NGBE_READ_REG(hw, NGBE_MIS_RST_ST)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10030
)))
;
3718 if (!(reset_status &
3719 NGBE_MIS_RST_ST_DEV_RST_ST_MASK0x00180000))
3720 break;
3721 msec_delay(100)(*delay_func)(1000 * (100));
3722 }
3723
3724 if (reset_status & NGBE_MIS_RST_ST_DEV_RST_ST_MASK0x00180000) {
3725 status = ETIMEDOUT60;
3726 printf("%s: software reset polling failed to "
3727 "complete\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3728 goto reset_hw_out;
3729 }
3730 status = ngbe_check_flash_load(sc,
3731 NGBE_SPI_ILDR_STATUS_SW_RESET0x00000800);
3732 if (status)
3733 goto reset_hw_out;
3734 } else if (hw->reset_type == NGBE_GLOBAL_RESET) {
3735 msec_delay(100 * rst_delay + 2000)(*delay_func)(1000 * (100 * rst_delay + 2000));
3736 }
3737 } else {
3738 if (hw->bus.lan_id == 0)
3739 reset = NGBE_MIS_RST_LAN0_RST0x00000002;
3740 else if (hw->bus.lan_id == 1)
3741 reset = NGBE_MIS_RST_LAN1_RST0x00000004;
3742 else if (hw->bus.lan_id == 2)
3743 reset = NGBE_MIS_RST_LAN2_RST0x00000008;
3744 else if (hw->bus.lan_id == 3)
3745 reset = NGBE_MIS_RST_LAN3_RST0x00000010;
3746
3747 NGBE_WRITE_REG(hw, NGBE_MIS_RST,((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1000c
), (reset | ((((struct ngbe_osdep *)(hw)->back)->os_memt
)->read_4((((struct ngbe_osdep *)(hw)->back)->os_memh
), (0x1000c))))))
3748 reset | NGBE_READ_REG(hw, NGBE_MIS_RST))((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1000c
), (reset | ((((struct ngbe_osdep *)(hw)->back)->os_memt
)->read_4((((struct ngbe_osdep *)(hw)->back)->os_memh
), (0x1000c))))))
;
3749 NGBE_WRITE_FLUSH(hw)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10000
)))
;
3750 msec_delay(15)(*delay_func)(1000 * (15));
3751 }
3752
3753 ngbe_reset_misc(hw);
3754
3755 /* Store the permanent mac address */
3756 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
3757
3758 /*
3759 * Store MAC address from RAR0, clear receive address registers, and
3760 * clear the multicast table. Also reset num_rar_entries to 32,
3761 * since we modify this value when programming the SAN MAC address.
3762 */
3763 hw->mac.num_rar_entries = NGBE_SP_RAR_ENTRIES32;
3764 hw->mac.ops.init_rx_addrs(sc);
3765
3766reset_hw_out:
3767 return status;
3768}
3769
3770void
3771ngbe_reset_misc(struct ngbe_hw *hw)
3772{
3773 int i;
3774
3775 /* Receive packets of size > 2048 */
3776 NGBE_WRITE_REG_MASK(hw, NGBE_MAC_RX_CFG0x11004, NGBE_MAC_RX_CFG_JE0x00000100,
3777 NGBE_MAC_RX_CFG_JE0x00000100);
3778
3779 /* Clear counters on read */
3780 NGBE_WRITE_REG_MASK(hw, NGBE_MMC_CONTROL0x11800, NGBE_MMC_CONTROL_RSTONRD0x4,
3781 NGBE_MMC_CONTROL_RSTONRD0x4);
3782
3783 NGBE_WRITE_REG_MASK(hw, NGBE_MAC_RX_FLOW_CTRL0x11090,
3784 NGBE_MAC_RX_FLOW_CTRL_RFE0x00000001, NGBE_MAC_RX_FLOW_CTRL_RFE0x00000001);
3785
3786 NGBE_WRITE_REG(hw, NGBE_MAC_PKT_FLT, NGBE_MAC_PKT_FLT_PR)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11008
), (0x00000001)))
;
3787
3788 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_RST_ST0x10030, NGBE_MIS_RST_ST_RST_INIT0x0000ff00,
3789 0x1e00);
3790
3791 /* errata 4: initialize mng flex tbl and wakeup flex tbl */
3792 NGBE_WRITE_REG(hw, NGBE_PSR_MNG_FLEX_SEL, 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1582c
), (0)))
;
3793 for (i = 0; i < 16; i++) {
3794 NGBE_WRITE_REG(hw, NGBE_PSR_MNG_FLEX_DW_L(i), 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x15a00
+ ((i) * 16))), (0)))
;
3795 NGBE_WRITE_REG(hw, NGBE_PSR_MNG_FLEX_DW_H(i), 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x15a04
+ ((i) * 16))), (0)))
;
3796 NGBE_WRITE_REG(hw, NGBE_PSR_MNG_FLEX_MSK(i), 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x15a08
+ ((i) * 16))), (0)))
;
3797 }
3798 NGBE_WRITE_REG(hw, NGBE_PSR_LAN_FLEX_SEL, 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x15b8c
), (0)))
;
3799 for (i = 0; i < 16; i++) {
3800 NGBE_WRITE_REG(hw, NGBE_PSR_LAN_FLEX_DW_L(i), 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x15c00
+ ((i) * 16))), (0)))
;
3801 NGBE_WRITE_REG(hw, NGBE_PSR_LAN_FLEX_DW_H(i), 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x15c04
+ ((i) * 16))), (0)))
;
3802 NGBE_WRITE_REG(hw, NGBE_PSR_LAN_FLEX_MSK(i), 0)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x15c08
+ ((i) * 16))), (0)))
;
3803 }
3804
3805 /* Set pause frame dst mac addr */
3806 NGBE_WRITE_REG(hw, NGBE_RDB_PFCMACDAL, 0xc2000001)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x19210
), (0xc2000001)))
;
3807 NGBE_WRITE_REG(hw, NGBE_RDB_PFCMACDAH, 0x0180)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x19214
), (0x0180)))
;
3808
3809 NGBE_WRITE_REG(hw, NGBE_MDIO_CLAUSE_SELECT, 0xf)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x11220
), (0xf)))
;
3810
3811 ngbe_init_thermal_sensor_thresh(hw);
3812}
3813
3814int
3815ngbe_set_fw_drv_ver(struct ngbe_softc *sc, uint8_t maj, uint8_t min,
3816 uint8_t build, uint8_t sub)
3817{
3818 struct ngbe_hw *hw = &sc->hw;
3819 struct ngbe_hic_drv_info fw_cmd;
3820 int i, error = 0;
3821
3822 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO0xdd;
3823 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN0x5;
3824 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED0x0;
3825 fw_cmd.port_num = (uint8_t)hw->bus.lan_id;
3826 fw_cmd.ver_maj = maj;
3827 fw_cmd.ver_min = min;
3828 fw_cmd.ver_build = build;
3829 fw_cmd.ver_sub = sub;
3830 fw_cmd.hdr.checksum = 0;
3831 fw_cmd.hdr.checksum = ngbe_calculate_checksum((uint8_t *)&fw_cmd,
3832 (FW_CEM_HDR_LEN0x4 + fw_cmd.hdr.buf_len));
3833 fw_cmd.pad = 0;
3834 fw_cmd.pad2 = 0;
3835
3836 DELAY(5000)(*delay_func)(5000);
3837 for (i = 0; i <= FW_CEM_MAX_RETRIES3; i++) {
3838 error = ngbe_host_interface_command(sc, (uint32_t *)&fw_cmd,
3839 sizeof(fw_cmd), NGBE_HI_COMMAND_TIMEOUT5000, 1);
3840 if (error)
3841 continue;
3842
3843 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
3844 FW_CEM_RESP_STATUS_SUCCESS0x1)
3845 error = 0;
3846 else
3847 error = EINVAL22;
3848 break;
3849 }
3850
3851 return error;
3852}
3853
3854void
3855ngbe_set_ivar(struct ngbe_softc *sc, uint16_t entry, uint16_t vector, int8_t
3856type)
3857{
3858 struct ngbe_hw *hw = &sc->hw;
3859 uint32_t ivar, index;
3860
3861 vector |= NGBE_PX_IVAR_ALLOC_VAL0x80;
3862
3863 if (type == -1) {
3864 /* other causes */
3865 index = 0;
3866 ivar = NGBE_READ_REG(hw, NGBE_PX_MISC_IVAR)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x004fc
)))
;
3867 ivar &= ~((uint32_t)0xff << index);
3868 ivar |= ((uint32_t)vector << index);
3869 NGBE_WRITE_REG(hw, NGBE_PX_MISC_IVAR, ivar)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x004fc
), (ivar)))
;
3870 } else {
3871 /* Tx or Rx causes */
3872 index = ((16 * (entry & 1)) + (8 * type));
3873 ivar = NGBE_READ_REG(hw, NGBE_PX_IVAR(entry >> 1))((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x00500
+ (entry >> 1) * 4))))
;
3874 ivar &= ~((uint32_t)0xff << index);
3875 ivar |= ((uint32_t)vector << index);
3876 NGBE_WRITE_REG(hw, NGBE_PX_IVAR(entry >> 1), ivar)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x00500
+ (entry >> 1) * 4)), (ivar)))
;
3877 }
3878}
3879
3880void
3881ngbe_set_lan_id_multi_port_pcie(struct ngbe_hw *hw)
3882{
3883 struct ngbe_bus_info *bus = &hw->bus;
3884 uint32_t reg = 0;
3885
3886 reg = NGBE_READ_REG(hw, NGBE_CFG_PORT_ST)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x14404
)))
;
3887 bus->lan_id = NGBE_CFG_PORT_ST_LAN_ID(reg)((0x00000300 & (reg)) >> 8);
3888}
3889
3890void
3891ngbe_set_mta(struct ngbe_hw *hw, uint8_t *mc_addr)
3892{
3893 uint32_t vector, vector_bit, vector_reg;
3894
3895 hw->addr_ctrl.mta_in_use++;
3896
3897 vector = ngbe_mta_vector(hw, mc_addr);
3898
3899 /*
3900 * The MTA is a register array of 128 32-bit registers. It is treated
3901 * like an array of 4096 bits. We want to set bit
3902 * BitArray[vector_value]. So we figure out what register the bit is
3903 * in, read it, OR in the new bit, then write back the new value. The
3904 * register is determined by the upper 7 bits of the vector value and
3905 * the bit within that register are determined by the lower 5 bits of
3906 * the value.
3907 */
3908 vector_reg = (vector >> 5) & 0x7f;
3909 vector_bit = vector & 0x1f;
3910 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
3911}
3912
3913void
3914ngbe_set_pci_config_data(struct ngbe_hw *hw, uint16_t link_status)
3915{
3916 if (hw->bus.type == ngbe_bus_type_unknown)
3917 hw->bus.type = ngbe_bus_type_pci_express;
3918
3919 switch (link_status & NGBE_PCI_LINK_WIDTH0x3f0) {
3920 case NGBE_PCI_LINK_WIDTH_10x10:
3921 hw->bus.width = ngbe_bus_width_pcie_x1;
3922 break;
3923 case NGBE_PCI_LINK_WIDTH_20x20:
3924 hw->bus.width = ngbe_bus_width_pcie_x2;
3925 break;
3926 case NGBE_PCI_LINK_WIDTH_40x40:
3927 hw->bus.width = ngbe_bus_width_pcie_x4;
3928 break;
3929 case NGBE_PCI_LINK_WIDTH_80x80:
3930 hw->bus.width = ngbe_bus_width_pcie_x8;
3931 break;
3932 default:
3933 hw->bus.width = ngbe_bus_width_unknown;
3934 break;
3935 }
3936
3937 switch (link_status & NGBE_PCI_LINK_SPEED0xf) {
3938 case NGBE_PCI_LINK_SPEED_25000x1:
3939 hw->bus.speed = ngbe_bus_speed_2500;
3940 break;
3941 case NGBE_PCI_LINK_SPEED_50000x2:
3942 hw->bus.speed = ngbe_bus_speed_5000;
3943 break;
3944 case NGBE_PCI_LINK_SPEED_80000x3:
3945 hw->bus.speed = ngbe_bus_speed_8000;
3946 break;
3947 default:
3948 hw->bus.speed = ngbe_bus_speed_unknown;
3949 break;
3950 }
3951}
3952
3953int
3954ngbe_set_rar(struct ngbe_softc *sc, uint32_t index, uint8_t *addr,
3955 uint64_t pools, uint32_t enable_addr)
3956{
3957 struct ngbe_hw *hw = &sc->hw;
3958 uint32_t rar_entries = hw->mac.num_rar_entries;
3959 uint32_t rar_low, rar_high;
3960
3961 /* Make sure we are using a valid rar index range */
3962 if (index >= rar_entries) {
3963 printf("%s: RAR index %d is out of range\n",
3964 DEVNAME(sc)((sc)->sc_dev.dv_xname), index);
3965 return EINVAL22;
3966 }
3967
3968 /* Select the MAC address */
3969 NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_IDX, index)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x16210
), (index)))
;
3970
3971 /* Setup VMDq pool mapping */
3972 NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_VM, pools & 0xffffffff)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x16208
), (pools & 0xffffffff)))
;
3973
3974 /*
3975 * HW expects these in little endian so we reverse the byte
3976 * order from network order (big endian) to little endian
3977 *
3978 * Some parts put the VMDq setting in the extra RAH bits,
3979 * so save everything except the lower 16 bits that hold part
3980 * of the address and the address valid bit.
3981 */
3982 rar_low = ((uint32_t)addr[5] | ((uint32_t)addr[4] << 8) |
3983 ((uint32_t)addr[3] << 16) | ((uint32_t)addr[2] << 24));
3984 rar_high = ((uint32_t)addr[1] | ((uint32_t)addr[0] << 8));
3985 if (enable_addr != 0)
3986 rar_high |= NGBE_PSR_MAC_SWC_AD_H_AV0x80000000;
3987
3988 NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_AD_L, rar_low)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x16200
), (rar_low)))
;
3989 NGBE_WRITE_REG_MASK(hw, NGBE_PSR_MAC_SWC_AD_H0x16204,
3990 (NGBE_PSR_MAC_SWC_AD_H_AD(~0)(((~0) & 0xffff)) | NGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0)(((~0) & 0x1) << 30) |
3991 NGBE_PSR_MAC_SWC_AD_H_AV0x80000000), rar_high);
3992
3993 return 0;
3994}
3995
3996void
3997ngbe_set_rx_drop_en(struct ngbe_softc *sc)
3998{
3999 uint32_t srrctl;
4000 int i;
4001
4002 if ((sc->sc_nqueues > 1) &&
4003 !(sc->hw.fc.current_mode & ngbe_fc_tx_pause)) {
4004 for (i = 0; i < sc->sc_nqueues; i++) {
4005 srrctl = NGBE_READ_REG(&sc->hw, NGBE_PX_RR_CFG(i))((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x01010 + ((i) * 0x40)))))
;
4006 srrctl |= NGBE_PX_RR_CFG_DROP_EN0x40000000;
4007 NGBE_WRITE_REG(&sc->hw, NGBE_PX_RR_CFG(i), srrctl)((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x01010 + ((i) * 0x40))), (srrctl)))
;
4008 }
4009
4010 } else {
4011 for (i = 0; i < sc->sc_nqueues; i++) {
4012 srrctl = NGBE_READ_REG(&sc->hw, NGBE_PX_RR_CFG(i))((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->read_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x01010 + ((i) * 0x40)))))
;
4013 srrctl &= ~NGBE_PX_RR_CFG_DROP_EN0x40000000;
4014 NGBE_WRITE_REG(&sc->hw, NGBE_PX_RR_CFG(i), srrctl)((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x01010 + ((i) * 0x40))), (srrctl)))
;
4015 }
4016 }
4017}
4018
4019void
4020ngbe_set_rxpba(struct ngbe_hw *hw, int num_pb, uint32_t headroom, int strategy)
4021{
4022 uint32_t pbsize = hw->mac.rx_pb_size;
4023 uint32_t txpktsize, txpbthresh, rxpktsize = 0;
4024
4025 /* Reserve headroom */
4026 pbsize -= headroom;
4027
4028 if (!num_pb)
4029 num_pb = 1;
4030
4031 /*
4032 * Divide remaining packet buffer space amongst the number of packet
4033 * buffers requested using supplied strategy.
4034 */
4035 switch (strategy) {
4036 case PBA_STRATEGY_EQUAL0:
4037 rxpktsize = (pbsize / num_pb) << NGBE_RDB_PB_SZ_SHIFT10;
4038 NGBE_WRITE_REG(hw, NGBE_RDB_PB_SZ, rxpktsize)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x19020
), (rxpktsize)))
;
4039 break;
4040 default:
4041 break;
4042 }
4043
4044 /* Only support an equally distributed Tx packet buffer strategy. */
4045 txpktsize = NGBE_TDB_PB_SZ_MAX0x00005000 / num_pb;
4046 txpbthresh = (txpktsize / 1024) - NGBE_TXPKT_SIZE_MAX0xa;
4047
4048 NGBE_WRITE_REG(hw, NGBE_TDB_PB_SZ, txpktsize)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x1cc00
), (txpktsize)))
;
4049 NGBE_WRITE_REG(hw, NGBE_TDM_PB_THRE, txpbthresh)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x18020
), (txpbthresh)))
;
4050}
4051
4052int
4053ngbe_setup_copper_link(struct ngbe_softc *sc, uint32_t speed, int need_restart)
4054{
4055 struct ngbe_hw *hw = &sc->hw;
4056 int status = 0;
4057
4058 /* Setup the PHY according to input speed */
4059 if (!((hw->subsystem_device_id & OEM_MASK0x00ff) == RGMII_FPGA0x0080))
4060 status = hw->phy.ops.setup_link(sc, speed, need_restart);
4061
4062 return status;
4063}
4064
4065int
4066ngbe_setup_fc(struct ngbe_softc *sc)
4067{
4068 struct ngbe_hw *hw = &sc->hw;
4069 uint16_t pcap_backplane = 0;
4070 int error = 0;
4071
4072 /* Validate the requested mode */
4073 if (hw->fc.strict_ieee && hw->fc.requested_mode == ngbe_fc_rx_pause) {
4074 printf("%s: ngbe_fc_rx_pause not valid in strict IEEE mode\n",
4075 DEVNAME(sc)((sc)->sc_dev.dv_xname));
4076 error = EINVAL22;
4077 goto out;
4078 }
4079
4080 /*
4081 * Gig parts do not have a word in the EEPROM to determine the
4082 * default flow control setting, so we explicitly set it to full.
4083 */
4084 if (hw->fc.requested_mode == ngbe_fc_default)
4085 hw->fc.requested_mode = ngbe_fc_full;
4086
4087 /*
4088 * The possible values of fc.requested_mode are:
4089 * 0: Flow control is completely disabled
4090 * 1: Rx flow control is enabled (we can receive pause frames,
4091 * but not send pause frames).
4092 * 2: Tx flow control is enabled (we can send pause frames but
4093 * we do not support receiving pause frames).
4094 * 3: Both Rx and Tx flow control (symmetric) are enabled.
4095 * other: Invalid.
4096 */
4097 switch (hw->fc.requested_mode) {
4098 case ngbe_fc_none:
4099 /* Flow control completely disabled by software override. */
4100 break;
4101 case ngbe_fc_tx_pause:
4102 /*
4103 * Tx Flow control is enabled, and Rx Flow control is
4104 * disabled by software override.
4105 */
4106 pcap_backplane |= NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM0x800;
4107 break;
4108 case ngbe_fc_rx_pause:
4109 /*
4110 * Rx Flow control is enabled and Tx Flow control is
4111 * disabled by software override. Since there really
4112 * isn't a way to advertise that we are capable of RX
4113 * Pause ONLY, we will advertise that we support both
4114 * symmetric and asymmetric Rx PAUSE, as such we fall
4115 * through to the fc_full statement. Later, we will
4116 * disable the adapter's ability to send PAUSE frames.
4117 */
4118 case ngbe_fc_full:
4119 /* Flow control (both Rx and Tx) is enabled by SW override. */
4120 pcap_backplane |= NGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM0x400 |
4121 NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM0x800;
4122 break;
4123 default:
4124 printf("%s: flow control param set incorrectly\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
4125 error = EINVAL22;
4126 goto out;
4127 }
4128
4129 /* AUTOC restart handles negotiation of 1G on backplane and copper. */
4130 if ((hw->phy.media_type == ngbe_media_type_copper) &&
4131 !((hw->subsystem_device_id & OEM_MASK0x00ff) == RGMII_FPGA0x0080))
4132 error = hw->phy.ops.set_adv_pause(hw, pcap_backplane);
4133out:
4134 return error;
4135}
4136
4137void
4138ngbe_setup_gpie(struct ngbe_hw *hw)
4139{
4140 uint32_t gpie;
4141
4142 gpie = NGBE_PX_GPIE_MODEL0x00000001;
4143
4144 /*
4145 * use EIAM to auto-mask when MSI-X interrupt is asserted
4146 * this saves a register write for every interrupt.
4147 */
4148 NGBE_WRITE_REG(hw, NGBE_PX_GPIE, gpie)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x00118
), (gpie)))
;
4149}
4150
4151void
4152ngbe_setup_isb(struct ngbe_softc *sc)
4153{
4154 uint64_t idba = sc->isbdma.dma_map->dm_segs[0].ds_addr;
4155
4156 /* Set ISB address */
4157 NGBE_WRITE_REG(&sc->hw, NGBE_PX_ISB_ADDR_L,((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00160), ((idba & 0x00000000ffffffffULL
))))
4158 (idba & 0x00000000ffffffffULL))((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00160), ((idba & 0x00000000ffffffffULL
))))
;
4159 NGBE_WRITE_REG(&sc->hw, NGBE_PX_ISB_ADDR_H, (idba >> 32))((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), (0x00164), ((idba >> 32))))
;
4160}
4161
4162void
4163ngbe_setup_psrtype(struct ngbe_hw *hw)
4164{
4165 uint32_t psrtype;
4166
4167 /* PSRTYPE must be initialized in adapters */
4168 psrtype = NGBE_RDB_PL_CFG_L4HDR0x2 | NGBE_RDB_PL_CFG_L3HDR0x4 |
4169 NGBE_RDB_PL_CFG_L2HDR0x8 | NGBE_RDB_PL_CFG_TUN_TUNHDR0x10 |
4170 NGBE_RDB_PL_CFG_TUN_OUTER_L2HDR0x20;
4171
4172 NGBE_WRITE_REG(hw, NGBE_RDB_PL_CFG(0), psrtype)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), ((0x19300
+ ((0) * 4))), (psrtype)))
;
4173}
4174
4175void
4176ngbe_setup_vlan_hw_support(struct ngbe_softc *sc)
4177{
4178 struct ngbe_hw *hw = &sc->hw;
4179 int i;
4180
4181 for (i = 0; i < sc->sc_nqueues; i++) {
4182 NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i)(0x01010 + ((i) * 0x40)),
4183 NGBE_PX_RR_CFG_VLAN0x80000000, NGBE_PX_RR_CFG_VLAN0x80000000);
4184 }
4185}
4186
4187int
4188ngbe_start_hw(struct ngbe_softc *sc)
4189{
4190 struct ngbe_hw *hw = &sc->hw;
4191 int error;
4192
4193 /* Set the media type */
4194 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
4195
4196 /* Clear the VLAN filter table */
4197 hw->mac.ops.clear_vfta(hw);
4198
4199 /* Clear statistics registers */
4200 hw->mac.ops.clear_hw_cntrs(hw);
4201
4202 NGBE_WRITE_FLUSH(hw)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10000
)))
;
4203
4204 /* Setup flow control */
4205 error = hw->mac.ops.setup_fc(sc);
4206
4207 /* Clear adapter stopped flag */
4208 hw->adapter_stopped = 0;
4209
4210 /* We need to run link autotry after the driver loads */
4211 hw->mac.autotry_restart = 1;
4212
4213 return error;
4214}
4215
4216int
4217ngbe_stop_adapter(struct ngbe_softc *sc)
4218{
4219 struct ngbe_hw *hw = &sc->hw;
4220 int i;
4221
4222 /*
4223 * Set the adapter_stopped flag so other driver functions stop touching
4224 * the hardware.
4225 */
4226 hw->adapter_stopped = 1;
4227
4228 /* Disable the receive unit. */
4229 hw->mac.ops.disable_rx(hw);
4230
4231 /* Clear any pending interrupts, flush previous writes. */
4232 NGBE_WRITE_REG(hw, NGBE_PX_MISC_IC, 0xffffffff)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x00100
), (0xffffffff)))
;
4233
4234 NGBE_WRITE_REG(hw, NGBE_BME_CTL, 0x3)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x12020
), (0x3)))
;
4235
4236 /* Disable the transmit unit. Each queue must be disabled. */
4237 for (i = 0; i < hw->mac.max_tx_queues; i++) {
4238 NGBE_WRITE_REG_MASK(hw, NGBE_PX_TR_CFG(i)(0x03010 + ((i) * 0x40)),
4239 NGBE_PX_TR_CFG_SWFLSH0x04000000 | NGBE_PX_TR_CFG_ENABLE(1),
4240 NGBE_PX_TR_CFG_SWFLSH0x04000000);
4241 }
4242
4243 /* Disable the receive unit by stopping each queue */
4244 for (i = 0; i < hw->mac.max_rx_queues; i++) {
4245 NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i)(0x01010 + ((i) * 0x40)),
4246 NGBE_PX_RR_CFG_RR_EN0x00000001, 0);
4247 }
4248
4249 /* Flush all queues disables. */
4250 NGBE_WRITE_FLUSH(hw)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x10000
)))
;
4251 msec_delay(2)(*delay_func)(1000 * (2));
4252
4253 return ngbe_disable_pcie_master(sc);
4254}
4255
4256void
4257ngbe_rx_checksum(uint32_t staterr, struct mbuf *m)
4258{
4259 if (staterr & NGBE_RXD_STAT_IPCS0x00000100) {
4260 if (!(staterr & NGBE_RXD_ERR_IPE0x80000000))
4261 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK0x0008;
4262 else
4263 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags = 0;
4264 }
4265 if (staterr & NGBE_RXD_STAT_L4CS0x00000080) {
4266 if (!(staterr & NGBE_RXD_ERR_TCPE0x40000000))
4267 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |=
4268 M_TCP_CSUM_IN_OK0x0020 | M_UDP_CSUM_IN_OK0x0080;
4269 }
4270}
4271
4272void
4273ngbe_rxeof(struct rx_ring *rxr)
4274{
4275 struct ngbe_softc *sc = rxr->sc;
4276 struct ifnet *ifp = &sc->sc_ac.ac_if;
4277 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
4278 struct mbuf *mp, *m;
4279 struct ngbe_rx_buf *rxbuf, *nxbuf;
4280 union ngbe_rx_desc *rxdesc;
4281 uint32_t staterr = 0;
4282 uint16_t len, vtag;
4283 uint8_t eop = 0;
4284 int i, nextp;
4285
4286 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
4287 return;
4288
4289 i = rxr->next_to_check;
4290 while (if_rxr_inuse(&rxr->rx_ring)((&rxr->rx_ring)->rxr_alive) > 0) {
4291 uint32_t hash;
4292 uint16_t hashtype;
4293
4294 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (i * sizeof(union ngbe_rx_desc)),
(sizeof(union ngbe_rx_desc)), (0x02))
4295 i * sizeof(union ngbe_rx_desc), sizeof(union ngbe_rx_desc),(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (i * sizeof(union ngbe_rx_desc)),
(sizeof(union ngbe_rx_desc)), (0x02))
4296 BUS_DMASYNC_POSTREAD)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (i * sizeof(union ngbe_rx_desc)),
(sizeof(union ngbe_rx_desc)), (0x02))
;
4297
4298 rxdesc = &rxr->rx_base[i];
4299 staterr = letoh32(rxdesc->wb.upper.status_error)((__uint32_t)(rxdesc->wb.upper.status_error));
4300 if (!ISSET(staterr, NGBE_RXD_STAT_DD)((staterr) & (0x00000001))) {
4301 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (i * sizeof(union ngbe_rx_desc)),
(sizeof(union ngbe_rx_desc)), (0x01))
4302 i * sizeof(union ngbe_rx_desc),(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (i * sizeof(union ngbe_rx_desc)),
(sizeof(union ngbe_rx_desc)), (0x01))
4303 sizeof(union ngbe_rx_desc), BUS_DMASYNC_PREREAD)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (i * sizeof(union ngbe_rx_desc)),
(sizeof(union ngbe_rx_desc)), (0x01))
;
4304 break;
4305 }
4306
4307 /* Zero out the receive descriptors status. */
4308 rxdesc->wb.upper.status_error = 0;
4309 rxbuf = &rxr->rx_buffers[i];
4310
4311 /* Pull the mbuf off the ring. */
4312 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x02
))
4313 rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxbuf->map), (0), (rxbuf->map->dm_mapsize), (0x02
))
;
4314 bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map)(*(rxr->rxdma.dma_tag)->_dmamap_unload)((rxr->rxdma.
dma_tag), (rxbuf->map))
;
4315
4316 mp = rxbuf->buf;
4317 len = letoh16(rxdesc->wb.upper.length)((__uint16_t)(rxdesc->wb.upper.length));
4318 vtag = letoh16(rxdesc->wb.upper.vlan)((__uint16_t)(rxdesc->wb.upper.vlan));
4319 eop = ((staterr & NGBE_RXD_STAT_EOP0x00000002) != 0);
4320 hash = letoh32(rxdesc->wb.lower.hi_dword.rss)((__uint32_t)(rxdesc->wb.lower.hi_dword.rss));
4321 hashtype = le16toh(rxdesc->wb.lower.lo_dword.hs_rss.pkt_info)((__uint16_t)(rxdesc->wb.lower.lo_dword.hs_rss.pkt_info)) &
4322 NGBE_RXD_RSSTYPE_MASK0x0000000f;
4323
4324 if (staterr & NGBE_RXD_ERR_RXE0x20000000) {
4325 if (rxbuf->fmp) {
4326 m_freem(rxbuf->fmp);
4327 rxbuf->fmp = NULL((void *)0);
4328 }
4329
4330 m_freem(mp);
4331 rxbuf->buf = NULL((void *)0);
4332 goto next_desc;
4333 }
4334
4335 if (mp == NULL((void *)0)) {
4336 panic("%s: ngbe_rxeof: NULL mbuf in slot %d "
4337 "(nrx %d, filled %d)", DEVNAME(sc)((sc)->sc_dev.dv_xname), i,
4338 if_rxr_inuse(&rxr->rx_ring)((&rxr->rx_ring)->rxr_alive), rxr->last_desc_filled);
4339 }
4340
4341 if (!eop) {
4342 /*
4343 * Figure out the next descriptor of this frame.
4344 */
4345 nextp = i + 1;
4346 if (nextp == sc->num_rx_desc)
4347 nextp = 0;
4348 nxbuf = &rxr->rx_buffers[nextp];
4349 /* prefetch(nxbuf); */
4350 }
4351
4352 mp->m_lenm_hdr.mh_len = len;
4353
4354 m = rxbuf->fmp;
4355 rxbuf->buf = rxbuf->fmp = NULL((void *)0);
4356
4357 if (m != NULL((void *)0))
4358 m->m_pkthdrM_dat.MH.MH_pkthdr.len += mp->m_lenm_hdr.mh_len;
4359 else {
4360 m = mp;
4361 m->m_pkthdrM_dat.MH.MH_pkthdr.len = mp->m_lenm_hdr.mh_len;
4362#if NVLAN1 > 0
4363 if (staterr & NGBE_RXD_STAT_VP0x00000020) {
4364 m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag = vtag;
4365 m->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020;
4366 }
4367#endif
4368 }
4369
4370 /* Pass the head pointer on */
4371 if (eop == 0) {
4372 nxbuf->fmp = m;
4373 m = NULL((void *)0);
4374 mp->m_nextm_hdr.mh_next = nxbuf->buf;
4375 } else {
4376 ngbe_rx_checksum(staterr, m);
4377
4378 if (hashtype != NGBE_RXD_RSSTYPE_NONE0x00000000) {
4379 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid = hash;
4380 SET(m->m_pkthdr.csum_flags, M_FLOWID)((m->M_dat.MH.MH_pkthdr.csum_flags) |= (0x4000));
4381 }
4382
4383 ml_enqueue(&ml, m);
4384 }
4385next_desc:
4386 if_rxr_put(&rxr->rx_ring, 1)do { (&rxr->rx_ring)->rxr_alive -= (1); } while (0);
4387 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (i * sizeof(union ngbe_rx_desc)),
(sizeof(union ngbe_rx_desc)), (0x01))
4388 i * sizeof(union ngbe_rx_desc), sizeof(union ngbe_rx_desc),(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (i * sizeof(union ngbe_rx_desc)),
(sizeof(union ngbe_rx_desc)), (0x01))
4389 BUS_DMASYNC_PREREAD)(*(rxr->rxdma.dma_tag)->_dmamap_sync)((rxr->rxdma.dma_tag
), (rxr->rxdma.dma_map), (i * sizeof(union ngbe_rx_desc)),
(sizeof(union ngbe_rx_desc)), (0x01))
;
4390
4391 /* Advance our pointers to the next descriptor. */
4392 if (++i == sc->num_rx_desc)
4393 i = 0;
4394 }
4395 rxr->next_to_check = i;
4396
4397 if (ifiq_input(rxr->ifiq, &ml))
4398 if_rxr_livelocked(&rxr->rx_ring);
4399
4400 if (!(staterr & NGBE_RXD_STAT_DD0x00000001))
4401 return;
4402}
4403
4404void
4405ngbe_rxrefill(void *xrxr)
4406{
4407 struct rx_ring *rxr = xrxr;
4408 struct ngbe_softc *sc = rxr->sc;
4409
4410 if (ngbe_rxfill(rxr))
4411 NGBE_WRITE_REG(&sc->hw, NGBE_PX_RR_WP(rxr->me),((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x01008 + ((rxr->me) * 0x40))), (rxr->
last_desc_filled)))
4412 rxr->last_desc_filled)((((struct ngbe_osdep *)(&sc->hw)->back)->os_memt
)->write_4((((struct ngbe_osdep *)(&sc->hw)->back
)->os_memh), ((0x01008 + ((rxr->me) * 0x40))), (rxr->
last_desc_filled)))
;
4413 else if (if_rxr_inuse(&rxr->rx_ring)((&rxr->rx_ring)->rxr_alive) == 0)
4414 timeout_add(&rxr->rx_refill, 1);
4415}
4416
4417int
4418ngbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *m, uint32_t *cmd_type_len,
4419 uint32_t *olinfo_status)
4420{
4421 struct ngbe_tx_context_desc *txd;
4422 struct ngbe_tx_buf *tx_buffer;
4423 uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
4424 int ctxd = txr->next_avail_desc;
4425 int offload = 0;
4426
4427 /* Indicate the whole packet as payload when not doing TSO */
4428 *olinfo_status |= m->m_pkthdrM_dat.MH.MH_pkthdr.len << NGBE_TXD_PAYLEN_SHIFT13;
4429
4430#if NVLAN1 > 0
4431 if (ISSET(m->m_flags, M_VLANTAG)((m->m_hdr.mh_flags) & (0x0020))) {
4432 uint32_t vtag = m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag;
4433 vlan_macip_lens |= (vtag << NGBE_TXD_VLAN_SHIFT16);
4434 *cmd_type_len |= NGBE_TXD_VLE0x40000000;
4435 offload |= 1;
4436 }
4437#endif
4438
4439 if (!offload)
4440 return 0;
4441
4442 txd = (struct ngbe_tx_context_desc *)&txr->tx_base[ctxd];
4443 tx_buffer = &txr->tx_buffers[ctxd];
4444
4445 type_tucmd_mlhl |= NGBE_TXD_DTYP_CTXT0x00100000;
4446
4447 /* Now copy bits into descriptor */
4448 txd->vlan_macip_lens = htole32(vlan_macip_lens)((__uint32_t)(vlan_macip_lens));
4449 txd->type_tucmd_mlhl = htole32(type_tucmd_mlhl)((__uint32_t)(type_tucmd_mlhl));
4450 txd->seqnum_seed = htole32(0)((__uint32_t)(0));
4451 txd->mss_l4len_idx = htole32(0)((__uint32_t)(0));
4452
4453 tx_buffer->m_head = NULL((void *)0);
4454 tx_buffer->eop_index = -1;
4455
4456 return 1;
4457}
4458
4459void
4460ngbe_txeof(struct tx_ring *txr)
4461{
4462 struct ngbe_softc *sc = txr->sc;
4463 struct ifqueue *ifq = txr->ifq;
4464 struct ifnet *ifp = &sc->sc_ac.ac_if;
4465 struct ngbe_tx_buf *tx_buffer;
4466 union ngbe_tx_desc *tx_desc;
4467 unsigned int prod, cons, last;
4468
4469 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
4470 return;
4471
4472 prod = txr->next_avail_desc;
4473 cons = txr->next_to_clean;
4474
4475 if (prod == cons)
4476 return;
4477
4478 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x02))
4479 txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x02))
;
4480
4481 for (;;) {
4482 tx_buffer = &txr->tx_buffers[cons];
4483 last = tx_buffer->eop_index;
4484 tx_desc = (union ngbe_tx_desc *)&txr->tx_base[last];
4485
4486 if (!ISSET(tx_desc->wb.status, NGBE_TXD_STAT_DD)((tx_desc->wb.status) & (0x00000001)))
4487 break;
4488
4489 bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (tx_buffer->map), (0), (tx_buffer->map->dm_mapsize
), (0x08))
4490 0, tx_buffer->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (tx_buffer->map), (0), (tx_buffer->map->dm_mapsize
), (0x08))
;
4491 bus_dmamap_unload(txr->txdma.dma_tag, tx_buffer->map)(*(txr->txdma.dma_tag)->_dmamap_unload)((txr->txdma.
dma_tag), (tx_buffer->map))
;
4492 m_freem(tx_buffer->m_head);
4493
4494 tx_buffer->m_head = NULL((void *)0);
4495 tx_buffer->eop_index = -1;
4496
4497 cons = last + 1;
4498 if (cons == sc->num_tx_desc)
4499 cons = 0;
4500 if (prod == cons) {
4501 /* All clean, turn off the timer */
4502 ifp->if_timer = 0;
4503 break;
4504 }
4505 }
4506
4507 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x01))
4508 0, txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD)(*(txr->txdma.dma_tag)->_dmamap_sync)((txr->txdma.dma_tag
), (txr->txdma.dma_map), (0), (txr->txdma.dma_map->dm_mapsize
), (0x01))
;
4509
4510 txr->next_to_clean = cons;
4511
4512 if (ifq_is_oactive(ifq))
4513 ifq_restart(ifq);
4514}
4515
4516void
4517ngbe_update_mc_addr_list(struct ngbe_hw *hw, uint8_t *mc_addr_list,
4518 uint32_t mc_addr_count, ngbe_mc_addr_itr next, int clear)
4519{
4520 uint32_t i, psrctl, vmdq;
4521
4522 /*
4523 * Set the new number of MC addresses that we are being requested to
4524 * use.
4525 */
4526 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
4527 hw->addr_ctrl.mta_in_use = 0;
4528
4529 /* Clear mta_shadow */
4530 if (clear)
4531 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow))__builtin_memset((&hw->mac.mta_shadow), (0), (sizeof(hw
->mac.mta_shadow)))
;
4532
4533 /* Update mta_shadow */
4534 for (i = 0; i < mc_addr_count; i++)
4535 ngbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
4536
4537 /* Enable mta */
4538 for (i = 0; i < hw->mac.mcft_size; i++)
4539 NGBE_WRITE_REG_ARRAY(hw, NGBE_PSR_MC_TBL(0), i,((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (((0x15200
+ ((0) * 4)) + ((i) << 2))), (hw->mac.mta_shadow[i]
)))
4540 hw->mac.mta_shadow[i])((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (((0x15200
+ ((0) * 4)) + ((i) << 2))), (hw->mac.mta_shadow[i]
)))
;
4541
4542 if (hw->addr_ctrl.mta_in_use > 0) {
4543 psrctl = NGBE_READ_REG(hw, NGBE_PSR_CTL)((((struct ngbe_osdep *)(hw)->back)->os_memt)->read_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x15000
)))
;
4544 psrctl &= ~(NGBE_PSR_CTL_MO0x00000060 | NGBE_PSR_CTL_MFE0x00000080);
4545 psrctl |= NGBE_PSR_CTL_MFE0x00000080 |
4546 (hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT5);
4547 NGBE_WRITE_REG(hw, NGBE_PSR_CTL, psrctl)((((struct ngbe_osdep *)(hw)->back)->os_memt)->write_4
((((struct ngbe_osdep *)(hw)->back)->os_memh), (0x15000
), (psrctl)))
;
4548 }
4549}
4550
4551int
4552ngbe_validate_mac_addr(uint8_t *mac_addr)
4553{
4554 uint32_t status = 0;
4555
4556 /* Make sure it is not a multicast address */
4557 if (NGBE_IS_MULTICAST(mac_addr)(int)(((uint8_t *)(mac_addr))[0] & ((uint8_t)0x01)))
4558 status = EINVAL22;
4559 /* Not a broadcast address */
4560 else if (NGBE_IS_BROADCAST(mac_addr)((((uint8_t *)(mac_addr))[0] == ((uint8_t)0xff)) && (
((uint8_t *)(mac_addr))[1] == ((uint8_t)0xff)))
)
4561 status = EINVAL22;
4562 /* Reject the zero address */
4563 else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
4564 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
4565 status = EINVAL22;
4566
4567 return status;
4568}