Bug Summary

File:dev/pci/if_bnxt.c
Warning:line 2423, column 3
3rd function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name if_bnxt.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/if_bnxt.c
1/* $OpenBSD: if_bnxt.c,v 1.43 2024/01/10 05:06:00 jmatthew Exp $ */
2/*-
3 * Broadcom NetXtreme-C/E network driver.
4 *
5 * Copyright (c) 2016 Broadcom, All Rights Reserved.
6 * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/*
31 * Copyright (c) 2018 Jonathan Matthew <jmatthew@openbsd.org>
32 *
33 * Permission to use, copy, modify, and distribute this software for any
34 * purpose with or without fee is hereby granted, provided that the above
35 * copyright notice and this permission notice appear in all copies.
36 *
37 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
38 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
39 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
40 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
41 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
42 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
43 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
44 */
45
46
47#include "bpfilter.h"
48#include "vlan.h"
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/mbuf.h>
53#include <sys/kernel.h>
54#include <sys/malloc.h>
55#include <sys/device.h>
56#include <sys/stdint.h>
57#include <sys/sockio.h>
58#include <sys/atomic.h>
59#include <sys/intrmap.h>
60
61#include <machine/bus.h>
62
63#include <dev/pci/pcireg.h>
64#include <dev/pci/pcivar.h>
65#include <dev/pci/pcidevs.h>
66
67#include <dev/pci/if_bnxtreg.h>
68
69#include <net/if.h>
70#include <net/if_media.h>
71#include <net/toeplitz.h>
72
73#if NBPFILTER1 > 0
74#include <net/bpf.h>
75#endif
76
77#include <netinet/in.h>
78#include <netinet/if_ether.h>
79
80#define BNXT_HWRM_BAR0x10 0x10
81#define BNXT_DOORBELL_BAR0x18 0x18
82
83#define BNXT_MAX_QUEUES8 8
84
85#define BNXT_CP_RING_ID_BASE0 0
86#define BNXT_RX_RING_ID_BASE(8 + 1) (BNXT_MAX_QUEUES8 + 1)
87#define BNXT_AG_RING_ID_BASE((8 * 2) + 1) ((BNXT_MAX_QUEUES8 * 2) + 1)
88#define BNXT_TX_RING_ID_BASE((8 * 3) + 1) ((BNXT_MAX_QUEUES8 * 3) + 1)
89
90#define BNXT_MAX_MTU9500 9500
91#define BNXT_AG_BUFFER_SIZE8192 8192
92
93#define BNXT_CP_PAGES4 4
94
95#define BNXT_MAX_TX_SEGS32 32 /* a bit much? */
96#define BNXT_TX_SLOTS(bs)(bs->bs_map->dm_nsegs + 1) (bs->bs_map->dm_nsegs + 1)
97
98#define BNXT_HWRM_SHORT_REQ_LENsizeof(struct hwrm_short_input) sizeof(struct hwrm_short_input)
99
100#define BNXT_HWRM_LOCK_INIT(_sc, _name)do { (void)(_name); (void)(0); __mtx_init((&sc->sc_lock
), ((((0x4)) > 0x0 && ((0x4)) < 0x9) ? 0x9 : ((
0x4)))); } while (0)
\
101 mtx_init_flags(&sc->sc_lock, IPL_NET, _name, 0)do { (void)(_name); (void)(0); __mtx_init((&sc->sc_lock
), ((((0x4)) > 0x0 && ((0x4)) < 0x9) ? 0x9 : ((
0x4)))); } while (0)
102#define BNXT_HWRM_LOCK(_sc)mtx_enter(&_sc->sc_lock) mtx_enter(&_sc->sc_lock)
103#define BNXT_HWRM_UNLOCK(_sc)mtx_leave(&_sc->sc_lock) mtx_leave(&_sc->sc_lock)
104#define BNXT_HWRM_LOCK_DESTROY(_sc) /* nothing */
105#define BNXT_HWRM_LOCK_ASSERT(_sc)do { if (((&_sc->sc_lock)->mtx_owner != ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})) && !(panicstr ||
db_active)) panic("mutex %p not held in %s", (&_sc->sc_lock
), __func__); } while (0)
MUTEX_ASSERT_LOCKED(&_sc->sc_lock)do { if (((&_sc->sc_lock)->mtx_owner != ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})) && !(panicstr ||
db_active)) panic("mutex %p not held in %s", (&_sc->sc_lock
), __func__); } while (0)
106
107#define BNXT_FLAG_VF0x0001 0x0001
108#define BNXT_FLAG_NPAR0x0002 0x0002
109#define BNXT_FLAG_WOL_CAP0x0004 0x0004
110#define BNXT_FLAG_SHORT_CMD0x0008 0x0008
111#define BNXT_FLAG_MSIX0x0010 0x0010
112
113/* NVRam stuff has a five minute timeout */
114#define BNXT_NVM_TIMEO(5 * 60 * 1000) (5 * 60 * 1000)
115
116#define NEXT_CP_CONS_V(_ring, _cons, _v_bit)do { if (++(_cons) == (_ring)->ring_size) ((_cons) = 0, (_v_bit
) = !_v_bit); } while (0);
\
117do { \
118 if (++(_cons) == (_ring)->ring_size) \
119 ((_cons) = 0, (_v_bit) = !_v_bit); \
120} while (0);
121
122struct bnxt_ring {
123 uint64_t paddr;
124 uint64_t doorbell;
125 caddr_t vaddr;
126 uint32_t ring_size;
127 uint16_t id;
128 uint16_t phys_id;
129};
130
131struct bnxt_cp_ring {
132 struct bnxt_ring ring;
133 void *irq;
134 struct bnxt_softc *softc;
135 uint32_t cons;
136 int v_bit;
137 uint32_t commit_cons;
138 int commit_v_bit;
139 struct ctx_hw_stats *stats;
140 uint32_t stats_ctx_id;
141 struct bnxt_dmamem *ring_mem;
142};
143
144struct bnxt_grp_info {
145 uint32_t grp_id;
146 uint16_t stats_ctx;
147 uint16_t rx_ring_id;
148 uint16_t cp_ring_id;
149 uint16_t ag_ring_id;
150};
151
152struct bnxt_vnic_info {
153 uint16_t id;
154 uint16_t def_ring_grp;
155 uint16_t cos_rule;
156 uint16_t lb_rule;
157 uint16_t mru;
158
159 uint32_t flags;
160#define BNXT_VNIC_FLAG_DEFAULT0x01 0x01
161#define BNXT_VNIC_FLAG_BD_STALL0x02 0x02
162#define BNXT_VNIC_FLAG_VLAN_STRIP0x04 0x04
163
164 uint64_t filter_id;
165 uint32_t flow_id;
166
167 uint16_t rss_id;
168};
169
170struct bnxt_slot {
171 bus_dmamap_t bs_map;
172 struct mbuf *bs_m;
173};
174
175struct bnxt_dmamem {
176 bus_dmamap_t bdm_map;
177 bus_dma_segment_t bdm_seg;
178 size_t bdm_size;
179 caddr_t bdm_kva;
180};
181#define BNXT_DMA_MAP(_bdm)((_bdm)->bdm_map) ((_bdm)->bdm_map)
182#define BNXT_DMA_LEN(_bdm)((_bdm)->bdm_size) ((_bdm)->bdm_size)
183#define BNXT_DMA_DVA(_bdm)((u_int64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr) ((u_int64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr)
184#define BNXT_DMA_KVA(_bdm)((void *)(_bdm)->bdm_kva) ((void *)(_bdm)->bdm_kva)
185
186struct bnxt_rx_queue {
187 struct bnxt_softc *rx_softc;
188 struct ifiqueue *rx_ifiq;
189 struct bnxt_dmamem *rx_ring_mem; /* rx and ag */
190 struct bnxt_ring rx_ring;
191 struct bnxt_ring rx_ag_ring;
192 struct if_rxring rxr[2];
193 struct bnxt_slot *rx_slots;
194 struct bnxt_slot *rx_ag_slots;
195 int rx_prod;
196 int rx_cons;
197 int rx_ag_prod;
198 int rx_ag_cons;
199 struct timeout rx_refill;
200};
201
202struct bnxt_tx_queue {
203 struct bnxt_softc *tx_softc;
204 struct ifqueue *tx_ifq;
205 struct bnxt_dmamem *tx_ring_mem;
206 struct bnxt_ring tx_ring;
207 struct bnxt_slot *tx_slots;
208 int tx_prod;
209 int tx_cons;
210 int tx_ring_prod;
211 int tx_ring_cons;
212};
213
214struct bnxt_queue {
215 char q_name[8];
216 int q_index;
217 void *q_ihc;
218 struct bnxt_softc *q_sc;
219 struct bnxt_cp_ring q_cp;
220 struct bnxt_rx_queue q_rx;
221 struct bnxt_tx_queue q_tx;
222 struct bnxt_grp_info q_rg;
223};
224
225struct bnxt_softc {
226 struct device sc_dev;
227 struct arpcom sc_ac;
228 struct ifmedia sc_media;
229
230 struct mutex sc_lock;
231
232 pci_chipset_tag_t sc_pc;
233 pcitag_t sc_tag;
234 bus_dma_tag_t sc_dmat;
235
236 bus_space_tag_t sc_hwrm_t;
237 bus_space_handle_t sc_hwrm_h;
238 bus_size_t sc_hwrm_s;
239
240 struct bnxt_dmamem *sc_cmd_resp;
241 uint16_t sc_cmd_seq;
242 uint16_t sc_max_req_len;
243 uint32_t sc_cmd_timeo;
244 uint32_t sc_flags;
245
246 bus_space_tag_t sc_db_t;
247 bus_space_handle_t sc_db_h;
248 bus_size_t sc_db_s;
249
250 void *sc_ih;
251
252 int sc_hwrm_ver;
253 int sc_tx_queue_id;
254
255 struct bnxt_vnic_info sc_vnic;
256 struct bnxt_dmamem *sc_stats_ctx_mem;
257 struct bnxt_dmamem *sc_rx_cfg;
258
259 struct bnxt_cp_ring sc_cp_ring;
260
261 int sc_nqueues;
262 struct intrmap *sc_intrmap;
263 struct bnxt_queue sc_queues[BNXT_MAX_QUEUES8];
264};
265#define DEVNAME(_sc)((_sc)->sc_dev.dv_xname) ((_sc)->sc_dev.dv_xname)
266
267const struct pci_matchid bnxt_devices[] = {
268 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM573010x16c8 },
269 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM573020x16c9 },
270 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM573040x16ca },
271 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM573110x16ce },
272 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM573120x16cf },
273 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM573140x16df },
274 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM574020x16d0 },
275 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM574040x16d1 },
276 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM574060x16d2 },
277 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM574070x16d5 },
278 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM574120x16d6 },
279 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM574140x16d7 },
280 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM574160x16d8 },
281 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57416_SFP0x16e3 },
282 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM574170x16d8 },
283 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM57417_SFP0x16e2 }
284};
285
286int bnxt_match(struct device *, void *, void *);
287void bnxt_attach(struct device *, struct device *, void *);
288
289void bnxt_up(struct bnxt_softc *);
290void bnxt_down(struct bnxt_softc *);
291void bnxt_iff(struct bnxt_softc *);
292int bnxt_ioctl(struct ifnet *, u_long, caddr_t);
293int bnxt_rxrinfo(struct bnxt_softc *, struct if_rxrinfo *);
294void bnxt_start(struct ifqueue *);
295int bnxt_admin_intr(void *);
296int bnxt_intr(void *);
297void bnxt_watchdog(struct ifnet *);
298void bnxt_media_status(struct ifnet *, struct ifmediareq *);
299int bnxt_media_change(struct ifnet *);
300int bnxt_media_autonegotiate(struct bnxt_softc *);
301
302struct cmpl_base *bnxt_cpr_next_cmpl(struct bnxt_softc *, struct bnxt_cp_ring *);
303void bnxt_cpr_commit(struct bnxt_softc *, struct bnxt_cp_ring *);
304void bnxt_cpr_rollback(struct bnxt_softc *, struct bnxt_cp_ring *);
305
306void bnxt_mark_cpr_invalid(struct bnxt_cp_ring *);
307void bnxt_write_cp_doorbell(struct bnxt_softc *, struct bnxt_ring *,
308 int);
309void bnxt_write_cp_doorbell_index(struct bnxt_softc *,
310 struct bnxt_ring *, uint32_t, int);
311void bnxt_write_rx_doorbell(struct bnxt_softc *, struct bnxt_ring *,
312 int);
313void bnxt_write_tx_doorbell(struct bnxt_softc *, struct bnxt_ring *,
314 int);
315
316int bnxt_rx_fill(struct bnxt_queue *);
317u_int bnxt_rx_fill_slots(struct bnxt_softc *, struct bnxt_ring *, void *,
318 struct bnxt_slot *, uint *, int, uint16_t, u_int);
319void bnxt_refill(void *);
320int bnxt_rx(struct bnxt_softc *, struct bnxt_rx_queue *,
321 struct bnxt_cp_ring *, struct mbuf_list *, int *, int *,
322 struct cmpl_base *);
323
324void bnxt_txeof(struct bnxt_softc *, struct bnxt_tx_queue *, int *,
325 struct cmpl_base *);
326
327int bnxt_set_cp_ring_aggint(struct bnxt_softc *, struct bnxt_cp_ring *);
328
329int _hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
330int hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
331void bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *, void *, uint16_t);
332int bnxt_hwrm_err_map(uint16_t err);
333
334/* HWRM Function Prototypes */
335int bnxt_hwrm_ring_alloc(struct bnxt_softc *, uint8_t,
336 struct bnxt_ring *, uint16_t, uint32_t, int);
337int bnxt_hwrm_ring_free(struct bnxt_softc *, uint8_t,
338 struct bnxt_ring *);
339int bnxt_hwrm_ver_get(struct bnxt_softc *);
340int bnxt_hwrm_queue_qportcfg(struct bnxt_softc *);
341int bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *);
342int bnxt_hwrm_func_qcaps(struct bnxt_softc *);
343int bnxt_hwrm_func_qcfg(struct bnxt_softc *);
344int bnxt_hwrm_func_reset(struct bnxt_softc *);
345int bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *, uint16_t *);
346int bnxt_hwrm_vnic_ctx_free(struct bnxt_softc *, uint16_t *);
347int bnxt_hwrm_vnic_cfg(struct bnxt_softc *,
348 struct bnxt_vnic_info *);
349int bnxt_hwrm_vnic_cfg_placement(struct bnxt_softc *,
350 struct bnxt_vnic_info *vnic);
351int bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *,
352 struct bnxt_cp_ring *, uint64_t);
353int bnxt_hwrm_stat_ctx_free(struct bnxt_softc *,
354 struct bnxt_cp_ring *);
355int bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *,
356 struct bnxt_grp_info *);
357int bnxt_hwrm_ring_grp_free(struct bnxt_softc *,
358 struct bnxt_grp_info *);
359int bnxt_hwrm_vnic_alloc(struct bnxt_softc *,
360 struct bnxt_vnic_info *);
361int bnxt_hwrm_vnic_free(struct bnxt_softc *,
362 struct bnxt_vnic_info *);
363int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *,
364 uint32_t, uint32_t, uint64_t, uint32_t);
365int bnxt_hwrm_set_filter(struct bnxt_softc *,
366 struct bnxt_vnic_info *);
367int bnxt_hwrm_free_filter(struct bnxt_softc *,
368 struct bnxt_vnic_info *);
369int bnxt_hwrm_vnic_rss_cfg(struct bnxt_softc *,
370 struct bnxt_vnic_info *, uint32_t, daddr_t, daddr_t);
371int bnxt_cfg_async_cr(struct bnxt_softc *, struct bnxt_cp_ring *);
372int bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *, uint16_t *,
373 uint16_t *, uint32_t *, uint32_t *, uint32_t *, uint32_t *);
374int bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *,
375 struct ifmediareq *);
376int bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *);
377int bnxt_get_sffpage(struct bnxt_softc *, struct if_sffpage *);
378
379/* not used yet: */
380#if 0
381int bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool_Bool shutdown);
382
383int bnxt_hwrm_port_qstats(struct bnxt_softc *softc);
384
385
386int bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc);
387void bnxt_validate_hw_lro_settings(struct bnxt_softc *softc);
388int bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
389 uint8_t *selfreset);
390int bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type,
391 uint8_t *selfreset);
392int bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year,
393 uint8_t *month, uint8_t *day, uint8_t *hour, uint8_t *minute,
394 uint8_t *second, uint16_t *millisecond, uint16_t *zone);
395int bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year,
396 uint8_t month, uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
397 uint16_t millisecond, uint16_t zone);
398
399#endif
400
401
402const struct cfattach bnxt_ca = {
403 sizeof(struct bnxt_softc), bnxt_match, bnxt_attach
404};
405
406struct cfdriver bnxt_cd = {
407 NULL((void *)0), "bnxt", DV_IFNET
408};
409
410struct bnxt_dmamem *
411bnxt_dmamem_alloc(struct bnxt_softc *sc, size_t size)
412{
413 struct bnxt_dmamem *m;
414 int nsegs;
415
416 m = malloc(sizeof(*m), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
417 if (m == NULL((void *)0))
418 return (NULL((void *)0));
419
420 m->bdm_size = size;
421
422 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0001 | 0x0002), (&m->bdm_map))
423 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->bdm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0001 | 0x0002), (&m->bdm_map))
!= 0)
424 goto bdmfree;
425
426 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->bdm_seg, 1,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&m->bdm_seg), (1), (&nsegs
), (0x0001 | 0x1000))
427 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&m->bdm_seg), (1), (&nsegs
), (0x0001 | 0x1000))
!= 0)
428 goto destroy;
429
430 if (bus_dmamem_map(sc->sc_dmat, &m->bdm_seg, nsegs, size, &m->bdm_kva,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&m
->bdm_seg), (nsegs), (size), (&m->bdm_kva), (0x0001
))
431 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&m
->bdm_seg), (nsegs), (size), (&m->bdm_kva), (0x0001
))
!= 0)
432 goto free;
433
434 if (bus_dmamap_load(sc->sc_dmat, m->bdm_map, m->bdm_kva, size, NULL,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (m->
bdm_map), (m->bdm_kva), (size), (((void *)0)), (0x0001))
435 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (m->
bdm_map), (m->bdm_kva), (size), (((void *)0)), (0x0001))
!= 0)
436 goto unmap;
437
438 return (m);
439
440unmap:
441 bus_dmamem_unmap(sc->sc_dmat, m->bdm_kva, m->bdm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (m->
bdm_kva), (m->bdm_size))
;
442free:
443 bus_dmamem_free(sc->sc_dmat, &m->bdm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
m->bdm_seg), (1))
;
444destroy:
445 bus_dmamap_destroy(sc->sc_dmat, m->bdm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (m->
bdm_map))
;
446bdmfree:
447 free(m, M_DEVBUF2, sizeof *m);
448
449 return (NULL((void *)0));
450}
451
452void
453bnxt_dmamem_free(struct bnxt_softc *sc, struct bnxt_dmamem *m)
454{
455 bus_dmamap_unload(sc->sc_dmat, m->bdm_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (m->
bdm_map))
;
456 bus_dmamem_unmap(sc->sc_dmat, m->bdm_kva, m->bdm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (m->
bdm_kva), (m->bdm_size))
;
457 bus_dmamem_free(sc->sc_dmat, &m->bdm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
m->bdm_seg), (1))
;
458 bus_dmamap_destroy(sc->sc_dmat, m->bdm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (m->
bdm_map))
;
459 free(m, M_DEVBUF2, sizeof *m);
460}
461
462int
463bnxt_match(struct device *parent, void *match, void *aux)
464{
465 return (pci_matchbyid(aux, bnxt_devices, nitems(bnxt_devices)(sizeof((bnxt_devices)) / sizeof((bnxt_devices)[0]))));
466}
467
468void
469bnxt_attach(struct device *parent, struct device *self, void *aux)
470{
471 struct bnxt_softc *sc = (struct bnxt_softc *)self;
472 struct ifnet *ifp = &sc->sc_ac.ac_if;
473 struct pci_attach_args *pa = aux;
474 struct bnxt_cp_ring *cpr;
475 pci_intr_handle_t ih;
476 const char *intrstr;
477 u_int memtype;
478 int i;
479
480 sc->sc_pc = pa->pa_pc;
481 sc->sc_tag = pa->pa_tag;
482 sc->sc_dmat = pa->pa_dmat;
483
484 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNXT_HWRM_BAR0x10);
485 if (pci_mapreg_map(pa, BNXT_HWRM_BAR0x10, memtype, 0, &sc->sc_hwrm_t,
486 &sc->sc_hwrm_h, NULL((void *)0), &sc->sc_hwrm_s, 0)) {
487 printf(": failed to map hwrm\n");
488 return;
489 }
490
491 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNXT_DOORBELL_BAR0x18);
492 if (pci_mapreg_map(pa, BNXT_DOORBELL_BAR0x18, memtype, 0, &sc->sc_db_t,
493 &sc->sc_db_h, NULL((void *)0), &sc->sc_db_s, 0)) {
494 printf(": failed to map doorbell\n");
495 goto unmap_1;
496 }
497
498 BNXT_HWRM_LOCK_INIT(sc, DEVNAME(sc))do { (void)(((sc)->sc_dev.dv_xname)); (void)(0); __mtx_init
((&sc->sc_lock), ((((0x4)) > 0x0 && ((0x4))
< 0x9) ? 0x9 : ((0x4)))); } while (0)
;
499 sc->sc_cmd_resp = bnxt_dmamem_alloc(sc, PAGE_SIZE(1 << 12));
500 if (sc->sc_cmd_resp == NULL((void *)0)) {
501 printf(": failed to allocate command response buffer\n");
502 goto unmap_2;
503 }
504
505 if (bnxt_hwrm_ver_get(sc) != 0) {
506 printf(": failed to query version info\n");
507 goto free_resp;
508 }
509
510 if (bnxt_hwrm_nvm_get_dev_info(sc, NULL((void *)0), NULL((void *)0), NULL((void *)0), NULL((void *)0), NULL((void *)0), NULL((void *)0))
511 != 0) {
512 printf(": failed to get nvram info\n");
513 goto free_resp;
514 }
515
516 if (bnxt_hwrm_func_drv_rgtr(sc) != 0) {
517 printf(": failed to register driver with firmware\n");
518 goto free_resp;
519 }
520
521 if (bnxt_hwrm_func_rgtr_async_events(sc) != 0) {
522 printf(": failed to register async events\n");
523 goto free_resp;
524 }
525
526 if (bnxt_hwrm_func_qcaps(sc) != 0) {
527 printf(": failed to get queue capabilities\n");
528 goto free_resp;
529 }
530
531 /*
532 * devices advertise msi support, but there's no way to tell a
533 * completion queue to use msi mode, only legacy or msi-x.
534 */
535 if (pci_intr_map_msix(pa, 0, &ih) == 0) {
536 int nmsix;
537
538 sc->sc_flags |= BNXT_FLAG_MSIX0x0010;
539 intrstr = pci_intr_string(sc->sc_pc, ih);
540
541 nmsix = pci_intr_msix_count(pa);
542 if (nmsix > 1) {
543 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih,
544 IPL_NET0x4 | IPL_MPSAFE0x100, bnxt_admin_intr, sc, DEVNAME(sc)((sc)->sc_dev.dv_xname));
545 sc->sc_intrmap = intrmap_create(&sc->sc_dev,
546 nmsix - 1, BNXT_MAX_QUEUES8, INTRMAP_POWEROF2(1 << 0));
547 sc->sc_nqueues = intrmap_count(sc->sc_intrmap);
548 KASSERT(sc->sc_nqueues > 0)((sc->sc_nqueues > 0) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_bnxt.c", 548, "sc->sc_nqueues > 0"
))
;
549 KASSERT(powerof2(sc->sc_nqueues))((((((sc->sc_nqueues)-1)&(sc->sc_nqueues))==0)) ? (
void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_bnxt.c"
, 549, "powerof2(sc->sc_nqueues)"))
;
550 } else {
551 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih,
552 IPL_NET0x4 | IPL_MPSAFE0x100, bnxt_intr, &sc->sc_queues[0],
553 DEVNAME(sc)((sc)->sc_dev.dv_xname));
554 sc->sc_nqueues = 1;
555 }
556 } else if (pci_intr_map(pa, &ih) == 0) {
557 intrstr = pci_intr_string(sc->sc_pc, ih);
558 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET0x4 | IPL_MPSAFE0x100,
559 bnxt_intr, &sc->sc_queues[0], DEVNAME(sc)((sc)->sc_dev.dv_xname));
560 sc->sc_nqueues = 1;
561 } else {
562 printf(": unable to map interrupt\n");
563 goto free_resp;
564 }
565 if (sc->sc_ih == NULL((void *)0)) {
566 printf(": unable to establish interrupt");
567 if (intrstr != NULL((void *)0))
568 printf(" at %s", intrstr);
569 printf("\n");
570 goto deintr;
571 }
572 printf("%s, %d queues, address %s\n", intrstr, sc->sc_nqueues,
573 ether_sprintf(sc->sc_ac.ac_enaddr));
574
575 if (bnxt_hwrm_func_qcfg(sc) != 0) {
576 printf("%s: failed to query function config\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
577 goto deintr;
578 }
579
580 if (bnxt_hwrm_queue_qportcfg(sc) != 0) {
581 printf("%s: failed to query port config\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
582 goto deintr;
583 }
584
585 if (bnxt_hwrm_func_reset(sc) != 0) {
586 printf("%s: reset failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
587 goto deintr;
588 }
589
590 if (sc->sc_intrmap == NULL((void *)0))
591 cpr = &sc->sc_queues[0].q_cp;
592 else
593 cpr = &sc->sc_cp_ring;
594
595 cpr->stats_ctx_id = HWRM_NA_SIGNATURE((uint32_t)(-1));
596 cpr->ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1));
597 cpr->softc = sc;
598 cpr->ring.id = 0;
599 cpr->ring.doorbell = cpr->ring.id * 0x80;
600 cpr->ring.ring_size = (PAGE_SIZE(1 << 12) * BNXT_CP_PAGES4) /
601 sizeof(struct cmpl_base);
602 cpr->ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE(1 << 12) *
603 BNXT_CP_PAGES4);
604 if (cpr->ring_mem == NULL((void *)0)) {
605 printf("%s: failed to allocate completion queue memory\n",
606 DEVNAME(sc)((sc)->sc_dev.dv_xname));
607 goto deintr;
608 }
609 cpr->ring.vaddr = BNXT_DMA_KVA(cpr->ring_mem)((void *)(cpr->ring_mem)->bdm_kva);
610 cpr->ring.paddr = BNXT_DMA_DVA(cpr->ring_mem)((u_int64_t)(cpr->ring_mem)->bdm_map->dm_segs[0].ds_addr
)
;
611 cpr->cons = UINT32_MAX0xffffffffU;
612 cpr->v_bit = 1;
613 bnxt_mark_cpr_invalid(cpr);
614 if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL0x0U,
615 &cpr->ring, (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1)),
616 HWRM_NA_SIGNATURE((uint32_t)(-1)), 1) != 0) {
617 printf("%s: failed to allocate completion queue\n",
618 DEVNAME(sc)((sc)->sc_dev.dv_xname));
619 goto free_cp_mem;
620 }
621 if (bnxt_cfg_async_cr(sc, cpr) != 0) {
622 printf("%s: failed to set async completion ring\n",
623 DEVNAME(sc)((sc)->sc_dev.dv_xname));
624 goto free_cp_mem;
625 }
626 bnxt_write_cp_doorbell(sc, &cpr->ring, 1);
627
628 if (bnxt_set_cp_ring_aggint(sc, cpr) != 0) {
629 printf("%s: failed to set interrupt aggregation\n",
630 DEVNAME(sc)((sc)->sc_dev.dv_xname));
631 goto free_cp_mem;
632 }
633
634 strlcpy(ifp->if_xname, DEVNAME(sc)((sc)->sc_dev.dv_xname), IFNAMSIZ16);
635 ifp->if_softc = sc;
636 ifp->if_flags = IFF_BROADCAST0x2 | IFF_MULTICAST0x8000 | IFF_SIMPLEX0x800;
637 ifp->if_xflags = IFXF_MPSAFE0x1;
638 ifp->if_ioctl = bnxt_ioctl;
639 ifp->if_qstart = bnxt_start;
640 ifp->if_watchdog = bnxt_watchdog;
641 ifp->if_hardmtu = BNXT_MAX_MTU9500;
642 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010 | IFCAP_CSUM_IPv40x00000001 |
643 IFCAP_CSUM_UDPv40x00000004 | IFCAP_CSUM_TCPv40x00000002 | IFCAP_CSUM_UDPv60x00000100 |
644 IFCAP_CSUM_TCPv60x00000080;
645#if NVLAN1 > 0
646 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020;
647#endif
648 ifq_init_maxlen(&ifp->if_snd, 1024); /* ? */
649
650 ifmedia_init(&sc->sc_media, IFM_IMASK0xff00000000000000ULL, bnxt_media_change,
651 bnxt_media_status);
652
653 if_attach(ifp);
654 ether_ifattach(ifp);
655
656 if_attach_iqueues(ifp, sc->sc_nqueues);
657 if_attach_queues(ifp, sc->sc_nqueues);
658 for (i = 0; i < sc->sc_nqueues; i++) {
659 struct ifiqueue *ifiq = ifp->if_iqs[i];
660 struct ifqueue *ifq = ifp->if_ifqs[i];
661 struct bnxt_queue *bq = &sc->sc_queues[i];
662 struct bnxt_cp_ring *cp = &bq->q_cp;
663 struct bnxt_rx_queue *rx = &bq->q_rx;
664 struct bnxt_tx_queue *tx = &bq->q_tx;
665
666 bq->q_index = i;
667 bq->q_sc = sc;
668
669 rx->rx_softc = sc;
670 rx->rx_ifiq = ifiq;
671 timeout_set(&rx->rx_refill, bnxt_refill, bq);
672 ifiq->ifiq_softc_ifiq_ptr._ifiq_softc = rx;
673
674 tx->tx_softc = sc;
675 tx->tx_ifq = ifq;
676 ifq->ifq_softc_ifq_ptr._ifq_softc = tx;
677
678 if (sc->sc_intrmap != NULL((void *)0)) {
679 cp->stats_ctx_id = HWRM_NA_SIGNATURE((uint32_t)(-1));
680 cp->ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1));
681 cp->ring.id = i + 1; /* first cp ring is async only */
682 cp->softc = sc;
683 cp->ring.doorbell = bq->q_cp.ring.id * 0x80;
684 cp->ring.ring_size = (PAGE_SIZE(1 << 12) * BNXT_CP_PAGES4) /
685 sizeof(struct cmpl_base);
686 if (pci_intr_map_msix(pa, i + 1, &ih) != 0) {
687 printf("%s: unable to map queue interrupt %d\n",
688 DEVNAME(sc)((sc)->sc_dev.dv_xname), i);
689 goto intrdisestablish;
690 }
691 snprintf(bq->q_name, sizeof(bq->q_name), "%s:%d",
692 DEVNAME(sc)((sc)->sc_dev.dv_xname), i);
693 bq->q_ihc = pci_intr_establish_cpu(sc->sc_pc, ih,
694 IPL_NET0x4 | IPL_MPSAFE0x100, intrmap_cpu(sc->sc_intrmap, i),
695 bnxt_intr, bq, bq->q_name);
696 if (bq->q_ihc == NULL((void *)0)) {
697 printf("%s: unable to establish interrupt %d\n",
698 DEVNAME(sc)((sc)->sc_dev.dv_xname), i);
699 goto intrdisestablish;
700 }
701 }
702 }
703
704 bnxt_media_autonegotiate(sc);
705 bnxt_hwrm_port_phy_qcfg(sc, NULL((void *)0));
706 return;
707
708intrdisestablish:
709 for (i = 0; i < sc->sc_nqueues; i++) {
710 struct bnxt_queue *bq = &sc->sc_queues[i];
711 if (bq->q_ihc == NULL((void *)0))
712 continue;
713 pci_intr_disestablish(sc->sc_pc, bq->q_ihc);
714 bq->q_ihc = NULL((void *)0);
715 }
716free_cp_mem:
717 bnxt_dmamem_free(sc, cpr->ring_mem);
718deintr:
719 if (sc->sc_intrmap != NULL((void *)0)) {
720 intrmap_destroy(sc->sc_intrmap);
721 sc->sc_intrmap = NULL((void *)0);
722 }
723 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
724 sc->sc_ih = NULL((void *)0);
725free_resp:
726 bnxt_dmamem_free(sc, sc->sc_cmd_resp);
727unmap_2:
728 bus_space_unmap(sc->sc_db_t, sc->sc_db_h, sc->sc_db_s);
729 sc->sc_db_s = 0;
730unmap_1:
731 bus_space_unmap(sc->sc_hwrm_t, sc->sc_hwrm_h, sc->sc_hwrm_s);
732 sc->sc_hwrm_s = 0;
733}
734
735void
736bnxt_free_slots(struct bnxt_softc *sc, struct bnxt_slot *slots, int allocated,
737 int total)
738{
739 struct bnxt_slot *bs;
740
741 int i = allocated;
742 while (i-- > 0) {
743 bs = &slots[i];
744 bus_dmamap_destroy(sc->sc_dmat, bs->bs_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (bs
->bs_map))
;
745 if (bs->bs_m != NULL((void *)0))
746 m_freem(bs->bs_m);
747 }
748 free(slots, M_DEVBUF2, total * sizeof(*bs));
749}
750
751int
752bnxt_set_cp_ring_aggint(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
753{
754 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input aggint;
755
756 /*
757 * set interrupt aggregation parameters for around 10k interrupts
758 * per second. the timers are in units of 80usec, and the counters
759 * are based on the minimum rx ring size of 32.
760 */
761 memset(&aggint, 0, sizeof(aggint))__builtin_memset((&aggint), (0), (sizeof(aggint)));
762 bnxt_hwrm_cmd_hdr_init(sc, &aggint,
763 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS(0x53U));
764 aggint.ring_id = htole16(cpr->ring.phys_id)((__uint16_t)(cpr->ring.phys_id));
765 aggint.num_cmpl_dma_aggr = htole16(32)((__uint16_t)(32));
766 aggint.num_cmpl_dma_aggr_during_int = aggint.num_cmpl_dma_aggr;
767 aggint.cmpl_aggr_dma_tmr = htole16((1000000000 / 20000) / 80)((__uint16_t)((1000000000 / 20000) / 80));
768 aggint.cmpl_aggr_dma_tmr_during_int = aggint.cmpl_aggr_dma_tmr;
769 aggint.int_lat_tmr_min = htole16((1000000000 / 20000) / 80)((__uint16_t)((1000000000 / 20000) / 80));
770 aggint.int_lat_tmr_max = htole16((1000000000 / 10000) / 80)((__uint16_t)((1000000000 / 10000) / 80));
771 aggint.num_cmpl_aggr_int = htole16(16)((__uint16_t)(16));
772 return (hwrm_send_message(sc, &aggint, sizeof(aggint)));
773}
774
775int
776bnxt_queue_up(struct bnxt_softc *sc, struct bnxt_queue *bq)
777{
778 struct ifnet *ifp = &sc->sc_ac.ac_if;
779 struct bnxt_cp_ring *cp = &bq->q_cp;
780 struct bnxt_rx_queue *rx = &bq->q_rx;
781 struct bnxt_tx_queue *tx = &bq->q_tx;
782 struct bnxt_grp_info *rg = &bq->q_rg;
783 struct bnxt_slot *bs;
784 int i;
785
786 tx->tx_ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE(1 << 12));
787 if (tx->tx_ring_mem == NULL((void *)0)) {
788 printf("%s: failed to allocate tx ring %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), bq->q_index);
789 return ENOMEM12;
790 }
791
792 rx->rx_ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE(1 << 12) * 2);
793 if (rx->rx_ring_mem == NULL((void *)0)) {
794 printf("%s: failed to allocate rx ring %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), bq->q_index);
795 goto free_tx;
796 }
797
798 /* completion ring is already allocated if we're not using an intrmap */
799 if (sc->sc_intrmap != NULL((void *)0)) {
800 cp->ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE(1 << 12) * BNXT_CP_PAGES4);
801 if (cp->ring_mem == NULL((void *)0)) {
802 printf("%s: failed to allocate completion ring %d mem\n",
803 DEVNAME(sc)((sc)->sc_dev.dv_xname), bq->q_index);
804 goto free_rx;
805 }
806 cp->ring.vaddr = BNXT_DMA_KVA(cp->ring_mem)((void *)(cp->ring_mem)->bdm_kva);
807 cp->ring.paddr = BNXT_DMA_DVA(cp->ring_mem)((u_int64_t)(cp->ring_mem)->bdm_map->dm_segs[0].ds_addr
)
;
808 cp->cons = UINT32_MAX0xffffffffU;
809 cp->v_bit = 1;
810 bnxt_mark_cpr_invalid(cp);
811
812 if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL0x0U,
813 &cp->ring, (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1)),
814 HWRM_NA_SIGNATURE((uint32_t)(-1)), 1) != 0) {
815 printf("%s: failed to allocate completion queue %d\n",
816 DEVNAME(sc)((sc)->sc_dev.dv_xname), bq->q_index);
817 goto free_rx;
818 }
819
820 if (bnxt_set_cp_ring_aggint(sc, cp) != 0) {
821 printf("%s: failed to set interrupt %d aggregation\n",
822 DEVNAME(sc)((sc)->sc_dev.dv_xname), bq->q_index);
823 goto free_rx;
824 }
825 bnxt_write_cp_doorbell(sc, &cp->ring, 1);
826 }
827
828 if (bnxt_hwrm_stat_ctx_alloc(sc, &bq->q_cp,
829 BNXT_DMA_DVA(sc->sc_stats_ctx_mem)((u_int64_t)(sc->sc_stats_ctx_mem)->bdm_map->dm_segs
[0].ds_addr)
+
830 (bq->q_index * sizeof(struct ctx_hw_stats))) != 0) {
831 printf("%s: failed to set up stats context\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
832 goto free_rx;
833 }
834
835 tx->tx_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1));
836 tx->tx_ring.id = BNXT_TX_RING_ID_BASE((8 * 3) + 1) + bq->q_index;
837 tx->tx_ring.doorbell = tx->tx_ring.id * 0x80;
838 tx->tx_ring.ring_size = PAGE_SIZE(1 << 12) / sizeof(struct tx_bd_short);
839 tx->tx_ring.vaddr = BNXT_DMA_KVA(tx->tx_ring_mem)((void *)(tx->tx_ring_mem)->bdm_kva);
840 tx->tx_ring.paddr = BNXT_DMA_DVA(tx->tx_ring_mem)((u_int64_t)(tx->tx_ring_mem)->bdm_map->dm_segs[0].ds_addr
)
;
841 if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX0x1U,
842 &tx->tx_ring, cp->ring.phys_id, HWRM_NA_SIGNATURE((uint32_t)(-1)), 1) != 0) {
843 printf("%s: failed to set up tx ring\n",
844 DEVNAME(sc)((sc)->sc_dev.dv_xname));
845 goto dealloc_stats;
846 }
847 bnxt_write_tx_doorbell(sc, &tx->tx_ring, 0);
848
849 rx->rx_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1));
850 rx->rx_ring.id = BNXT_RX_RING_ID_BASE(8 + 1) + bq->q_index;
851 rx->rx_ring.doorbell = rx->rx_ring.id * 0x80;
852 rx->rx_ring.ring_size = PAGE_SIZE(1 << 12) / sizeof(struct rx_prod_pkt_bd);
853 rx->rx_ring.vaddr = BNXT_DMA_KVA(rx->rx_ring_mem)((void *)(rx->rx_ring_mem)->bdm_kva);
854 rx->rx_ring.paddr = BNXT_DMA_DVA(rx->rx_ring_mem)((u_int64_t)(rx->rx_ring_mem)->bdm_map->dm_segs[0].ds_addr
)
;
855 if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX0x2U,
856 &rx->rx_ring, cp->ring.phys_id, HWRM_NA_SIGNATURE((uint32_t)(-1)), 1) != 0) {
857 printf("%s: failed to set up rx ring\n",
858 DEVNAME(sc)((sc)->sc_dev.dv_xname));
859 goto dealloc_tx;
860 }
861 bnxt_write_rx_doorbell(sc, &rx->rx_ring, 0);
862
863 rx->rx_ag_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1));
864 rx->rx_ag_ring.id = BNXT_AG_RING_ID_BASE((8 * 2) + 1) + bq->q_index;
865 rx->rx_ag_ring.doorbell = rx->rx_ag_ring.id * 0x80;
866 rx->rx_ag_ring.ring_size = PAGE_SIZE(1 << 12) / sizeof(struct rx_prod_pkt_bd);
867 rx->rx_ag_ring.vaddr = BNXT_DMA_KVA(rx->rx_ring_mem)((void *)(rx->rx_ring_mem)->bdm_kva) + PAGE_SIZE(1 << 12);
868 rx->rx_ag_ring.paddr = BNXT_DMA_DVA(rx->rx_ring_mem)((u_int64_t)(rx->rx_ring_mem)->bdm_map->dm_segs[0].ds_addr
)
+ PAGE_SIZE(1 << 12);
869 if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX0x2U,
870 &rx->rx_ag_ring, cp->ring.phys_id, HWRM_NA_SIGNATURE((uint32_t)(-1)), 1) != 0) {
871 printf("%s: failed to set up rx ag ring\n",
872 DEVNAME(sc)((sc)->sc_dev.dv_xname));
873 goto dealloc_rx;
874 }
875 bnxt_write_rx_doorbell(sc, &rx->rx_ag_ring, 0);
876
877 rg->grp_id = HWRM_NA_SIGNATURE((uint32_t)(-1));
878 rg->stats_ctx = cp->stats_ctx_id;
879 rg->rx_ring_id = rx->rx_ring.phys_id;
880 rg->ag_ring_id = rx->rx_ag_ring.phys_id;
881 rg->cp_ring_id = cp->ring.phys_id;
882 if (bnxt_hwrm_ring_grp_alloc(sc, rg) != 0) {
883 printf("%s: failed to allocate ring group\n",
884 DEVNAME(sc)((sc)->sc_dev.dv_xname));
885 goto dealloc_ag;
886 }
887
888 rx->rx_slots = mallocarray(sizeof(*bs), rx->rx_ring.ring_size,
889 M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008);
890 if (rx->rx_slots == NULL((void *)0)) {
891 printf("%s: failed to allocate rx slots\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
892 goto dealloc_ring_group;
893 }
894
895 for (i = 0; i < rx->rx_ring.ring_size; i++) {
896 bs = &rx->rx_slots[i];
897 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0000 | 0x0002), (&
bs->bs_map))
898 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bs->bs_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0x0000 | 0x0002), (&
bs->bs_map))
!= 0) {
899 printf("%s: failed to allocate rx dma maps\n",
900 DEVNAME(sc)((sc)->sc_dev.dv_xname));
901 goto destroy_rx_slots;
902 }
903 }
904
905 rx->rx_ag_slots = mallocarray(sizeof(*bs), rx->rx_ag_ring.ring_size,
906 M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008);
907 if (rx->rx_ag_slots == NULL((void *)0)) {
908 printf("%s: failed to allocate rx ag slots\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
909 goto destroy_rx_slots;
910 }
911
912 for (i = 0; i < rx->rx_ag_ring.ring_size; i++) {
913 bs = &rx->rx_ag_slots[i];
914 if (bus_dmamap_create(sc->sc_dmat, BNXT_AG_BUFFER_SIZE, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (8192
), (1), (8192), (0), (0x0000 | 0x0002), (&bs->bs_map))
915 BNXT_AG_BUFFER_SIZE, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (8192
), (1), (8192), (0), (0x0000 | 0x0002), (&bs->bs_map))
916 &bs->bs_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (8192
), (1), (8192), (0), (0x0000 | 0x0002), (&bs->bs_map))
!= 0) {
917 printf("%s: failed to allocate rx ag dma maps\n",
918 DEVNAME(sc)((sc)->sc_dev.dv_xname));
919 goto destroy_rx_ag_slots;
920 }
921 }
922
923 tx->tx_slots = mallocarray(sizeof(*bs), tx->tx_ring.ring_size,
924 M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008);
925 if (tx->tx_slots == NULL((void *)0)) {
926 printf("%s: failed to allocate tx slots\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
927 goto destroy_rx_ag_slots;
928 }
929
930 for (i = 0; i < tx->tx_ring.ring_size; i++) {
931 bs = &tx->tx_slots[i];
932 if (bus_dmamap_create(sc->sc_dmat, BNXT_MAX_MTU, BNXT_MAX_TX_SEGS,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9500
), (32), (9500), (0), (0x0000 | 0x0002), (&bs->bs_map)
)
933 BNXT_MAX_MTU, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9500
), (32), (9500), (0), (0x0000 | 0x0002), (&bs->bs_map)
)
934 &bs->bs_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9500
), (32), (9500), (0), (0x0000 | 0x0002), (&bs->bs_map)
)
!= 0) {
935 printf("%s: failed to allocate tx dma maps\n",
936 DEVNAME(sc)((sc)->sc_dev.dv_xname));
937 goto destroy_tx_slots;
938 }
939 }
940
941 /*
942 * initially, the rx ring must be filled at least some distance beyond
943 * the current consumer index, as it looks like the firmware assumes the
944 * ring is full on creation, but doesn't prefetch the whole thing.
945 * once the whole ring has been used once, we should be able to back off
946 * to 2 or so slots, but we currently don't have a way of doing that.
947 */
948 if_rxr_init(&rx->rxr[0], 32, rx->rx_ring.ring_size - 1);
949 if_rxr_init(&rx->rxr[1], 32, rx->rx_ag_ring.ring_size - 1);
950 rx->rx_prod = 0;
951 rx->rx_cons = 0;
952 rx->rx_ag_prod = 0;
953 rx->rx_ag_cons = 0;
954 bnxt_rx_fill(bq);
955
956 tx->tx_cons = 0;
957 tx->tx_prod = 0;
958 tx->tx_ring_cons = 0;
959 tx->tx_ring_prod = 0;
960 ifq_clr_oactive(ifp->if_ifqs[bq->q_index]);
961 ifq_restart(ifp->if_ifqs[bq->q_index]);
962 return 0;
963
964destroy_tx_slots:
965 bnxt_free_slots(sc, tx->tx_slots, i, tx->tx_ring.ring_size);
966 tx->tx_slots = NULL((void *)0);
967
968 i = rx->rx_ag_ring.ring_size;
969destroy_rx_ag_slots:
970 bnxt_free_slots(sc, rx->rx_ag_slots, i, rx->rx_ag_ring.ring_size);
971 rx->rx_ag_slots = NULL((void *)0);
972
973 i = rx->rx_ring.ring_size;
974destroy_rx_slots:
975 bnxt_free_slots(sc, rx->rx_slots, i, rx->rx_ring.ring_size);
976 rx->rx_slots = NULL((void *)0);
977dealloc_ring_group:
978 bnxt_hwrm_ring_grp_free(sc, &bq->q_rg);
979dealloc_ag:
980 bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX0x2U,
981 &rx->rx_ag_ring);
982dealloc_tx:
983 bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX0x1U,
984 &tx->tx_ring);
985dealloc_rx:
986 bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX0x2U,
987 &rx->rx_ring);
988dealloc_stats:
989 bnxt_hwrm_stat_ctx_free(sc, cp);
990free_rx:
991 bnxt_dmamem_free(sc, rx->rx_ring_mem);
992 rx->rx_ring_mem = NULL((void *)0);
993free_tx:
994 bnxt_dmamem_free(sc, tx->tx_ring_mem);
995 tx->tx_ring_mem = NULL((void *)0);
996 return ENOMEM12;
997}
998
999void
1000bnxt_queue_down(struct bnxt_softc *sc, struct bnxt_queue *bq)
1001{
1002 struct bnxt_cp_ring *cp = &bq->q_cp;
1003 struct bnxt_rx_queue *rx = &bq->q_rx;
1004 struct bnxt_tx_queue *tx = &bq->q_tx;
1005
1006 bnxt_free_slots(sc, tx->tx_slots, tx->tx_ring.ring_size,
1007 tx->tx_ring.ring_size);
1008 tx->tx_slots = NULL((void *)0);
1009
1010 bnxt_free_slots(sc, rx->rx_ag_slots, rx->rx_ag_ring.ring_size,
1011 rx->rx_ag_ring.ring_size);
1012 rx->rx_ag_slots = NULL((void *)0);
1013
1014 bnxt_free_slots(sc, rx->rx_slots, rx->rx_ring.ring_size,
1015 rx->rx_ring.ring_size);
1016 rx->rx_slots = NULL((void *)0);
1017
1018 bnxt_hwrm_ring_grp_free(sc, &bq->q_rg);
1019 bnxt_hwrm_stat_ctx_free(sc, &bq->q_cp);
1020
1021 /* may need to wait for 500ms here before we can free the rings */
1022
1023 bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX0x1U,
1024 &tx->tx_ring);
1025 bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX0x2U,
1026 &rx->rx_ag_ring);
1027 bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX0x2U,
1028 &rx->rx_ring);
1029
1030 /* if no intrmap, leave cp ring in place for async events */
1031 if (sc->sc_intrmap != NULL((void *)0)) {
1032 bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL0x0U,
1033 &cp->ring);
1034
1035 bnxt_dmamem_free(sc, cp->ring_mem);
1036 cp->ring_mem = NULL((void *)0);
1037 }
1038
1039 bnxt_dmamem_free(sc, rx->rx_ring_mem);
1040 rx->rx_ring_mem = NULL((void *)0);
1041
1042 bnxt_dmamem_free(sc, tx->tx_ring_mem);
1043 tx->tx_ring_mem = NULL((void *)0);
1044}
1045
1046void
1047bnxt_up(struct bnxt_softc *sc)
1048{
1049 struct ifnet *ifp = &sc->sc_ac.ac_if;
1050 int i;
1051
1052 sc->sc_stats_ctx_mem = bnxt_dmamem_alloc(sc,
1053 sizeof(struct ctx_hw_stats) * sc->sc_nqueues);
1054 if (sc->sc_stats_ctx_mem == NULL((void *)0)) {
1055 printf("%s: failed to allocate stats contexts\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1056 return;
1057 }
1058
1059 sc->sc_rx_cfg = bnxt_dmamem_alloc(sc, PAGE_SIZE(1 << 12) * 2);
1060 if (sc->sc_rx_cfg == NULL((void *)0)) {
1061 printf("%s: failed to allocate rx config buffer\n",
1062 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1063 goto free_stats;
1064 }
1065
1066 for (i = 0; i < sc->sc_nqueues; i++) {
1067 if (bnxt_queue_up(sc, &sc->sc_queues[i]) != 0) {
1068 goto down_queues;
1069 }
1070 }
1071
1072 sc->sc_vnic.rss_id = (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1));
1073 if (bnxt_hwrm_vnic_ctx_alloc(sc, &sc->sc_vnic.rss_id) != 0) {
1074 printf("%s: failed to allocate vnic rss context\n",
1075 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1076 goto down_all_queues;
1077 }
1078
1079 sc->sc_vnic.id = (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1));
1080 sc->sc_vnic.def_ring_grp = sc->sc_queues[0].q_rg.grp_id;
1081 sc->sc_vnic.mru = BNXT_MAX_MTU9500;
1082 sc->sc_vnic.cos_rule = (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1));
1083 sc->sc_vnic.lb_rule = (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1));
1084 sc->sc_vnic.flags = BNXT_VNIC_FLAG_DEFAULT0x01 |
1085 BNXT_VNIC_FLAG_VLAN_STRIP0x04;
1086 if (bnxt_hwrm_vnic_alloc(sc, &sc->sc_vnic) != 0) {
1087 printf("%s: failed to allocate vnic\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1088 goto dealloc_vnic_ctx;
1089 }
1090
1091 if (bnxt_hwrm_vnic_cfg(sc, &sc->sc_vnic) != 0) {
1092 printf("%s: failed to configure vnic\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1093 goto dealloc_vnic;
1094 }
1095
1096 if (bnxt_hwrm_vnic_cfg_placement(sc, &sc->sc_vnic) != 0) {
1097 printf("%s: failed to configure vnic placement mode\n",
1098 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1099 goto dealloc_vnic;
1100 }
1101
1102 sc->sc_vnic.filter_id = -1;
1103 if (bnxt_hwrm_set_filter(sc, &sc->sc_vnic) != 0) {
1104 printf("%s: failed to set vnic filter\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1105 goto dealloc_vnic;
1106 }
1107
1108 if (sc->sc_nqueues > 1) {
1109 uint16_t *rss_table = (BNXT_DMA_KVA(sc->sc_rx_cfg)((void *)(sc->sc_rx_cfg)->bdm_kva) + PAGE_SIZE(1 << 12));
1110 uint8_t *hash_key = (uint8_t *)(rss_table + HW_HASH_INDEX_SIZE0x80);
1111
1112 for (i = 0; i < HW_HASH_INDEX_SIZE0x80; i++) {
1113 struct bnxt_queue *bq;
1114
1115 bq = &sc->sc_queues[i % sc->sc_nqueues];
1116 rss_table[i] = htole16(bq->q_rg.grp_id)((__uint16_t)(bq->q_rg.grp_id));
1117 }
1118 stoeplitz_to_key(hash_key, HW_HASH_KEY_SIZE40);
1119
1120 if (bnxt_hwrm_vnic_rss_cfg(sc, &sc->sc_vnic,
1121 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV40x1U |
1122 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV40x2U |
1123 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV60x8U |
1124 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV60x10U,
1125 BNXT_DMA_DVA(sc->sc_rx_cfg)((u_int64_t)(sc->sc_rx_cfg)->bdm_map->dm_segs[0].ds_addr
)
+ PAGE_SIZE(1 << 12),
1126 BNXT_DMA_DVA(sc->sc_rx_cfg)((u_int64_t)(sc->sc_rx_cfg)->bdm_map->dm_segs[0].ds_addr
)
+ PAGE_SIZE(1 << 12) +
1127 (HW_HASH_INDEX_SIZE0x80 * sizeof(uint16_t))) != 0) {
1128 printf("%s: failed to set RSS config\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1129 goto dealloc_vnic;
1130 }
1131 }
1132
1133 bnxt_iff(sc);
1134 SET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) |= (0x40));
1135
1136 return;
1137
1138dealloc_vnic:
1139 bnxt_hwrm_vnic_free(sc, &sc->sc_vnic);
1140dealloc_vnic_ctx:
1141 bnxt_hwrm_vnic_ctx_free(sc, &sc->sc_vnic.rss_id);
1142
1143down_all_queues:
1144 i = sc->sc_nqueues;
1145down_queues:
1146 while (i-- > 0)
1147 bnxt_queue_down(sc, &sc->sc_queues[i]);
1148
1149 bnxt_dmamem_free(sc, sc->sc_rx_cfg);
1150 sc->sc_rx_cfg = NULL((void *)0);
1151free_stats:
1152 bnxt_dmamem_free(sc, sc->sc_stats_ctx_mem);
1153 sc->sc_stats_ctx_mem = NULL((void *)0);
1154}
1155
1156void
1157bnxt_down(struct bnxt_softc *sc)
1158{
1159 struct ifnet *ifp = &sc->sc_ac.ac_if;
1160 int i;
1161
1162 CLR(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) &= ~(0x40));
1163
1164 intr_barrier(sc->sc_ih);
1165
1166 for (i = 0; i < sc->sc_nqueues; i++) {
1167 ifq_clr_oactive(ifp->if_ifqs[i]);
1168 ifq_barrier(ifp->if_ifqs[i]);
1169
1170 timeout_del_barrier(&sc->sc_queues[i].q_rx.rx_refill);
1171
1172 if (sc->sc_intrmap != NULL((void *)0))
1173 intr_barrier(sc->sc_queues[i].q_ihc);
1174 }
1175
1176 bnxt_hwrm_free_filter(sc, &sc->sc_vnic);
1177 bnxt_hwrm_vnic_free(sc, &sc->sc_vnic);
1178 bnxt_hwrm_vnic_ctx_free(sc, &sc->sc_vnic.rss_id);
1179
1180 for (i = 0; i < sc->sc_nqueues; i++)
1181 bnxt_queue_down(sc, &sc->sc_queues[i]);
1182
1183 bnxt_dmamem_free(sc, sc->sc_rx_cfg);
1184 sc->sc_rx_cfg = NULL((void *)0);
1185
1186 bnxt_dmamem_free(sc, sc->sc_stats_ctx_mem);
1187 sc->sc_stats_ctx_mem = NULL((void *)0);
1188}
1189
1190void
1191bnxt_iff(struct bnxt_softc *sc)
1192{
1193 struct ifnet *ifp = &sc->sc_ac.ac_if;
1194 struct ether_multi *enm;
1195 struct ether_multistep step;
1196 char *mc_list;
1197 uint32_t rx_mask, mc_count;
1198
1199 rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST0x8U
1200 | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST0x2U
1201 | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN0x100U;
1202
1203 mc_list = BNXT_DMA_KVA(sc->sc_rx_cfg)((void *)(sc->sc_rx_cfg)->bdm_kva);
1204 mc_count = 0;
1205
1206 if (ifp->if_flags & IFF_PROMISC0x100) {
1207 SET(ifp->if_flags, IFF_ALLMULTI)((ifp->if_flags) |= (0x200));
1208 rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS0x10U;
1209 } else if ((sc->sc_ac.ac_multirangecnt > 0) ||
1210 (sc->sc_ac.ac_multicnt > (PAGE_SIZE(1 << 12) / ETHER_ADDR_LEN6))) {
1211 SET(ifp->if_flags, IFF_ALLMULTI)((ifp->if_flags) |= (0x200));
1212 rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST0x4U;
1213 } else {
1214 CLR(ifp->if_flags, IFF_ALLMULTI)((ifp->if_flags) &= ~(0x200));
1215 ETHER_FIRST_MULTI(step, &sc->sc_ac, enm)do { (step).e_enm = ((&(&sc->sc_ac)->ac_multiaddrs
)->lh_first); do { if ((((enm)) = ((step)).e_enm) != ((void
*)0)) ((step)).e_enm = ((((enm)))->enm_list.le_next); } while
( 0); } while ( 0)
;
1216 while (enm != NULL((void *)0)) {
1217 memcpy(mc_list, enm->enm_addrlo, ETHER_ADDR_LEN)__builtin_memcpy((mc_list), (enm->enm_addrlo), (6));
1218 mc_list += ETHER_ADDR_LEN6;
1219 mc_count++;
1220
1221 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
1222 }
1223 }
1224
1225 bnxt_hwrm_cfa_l2_set_rx_mask(sc, sc->sc_vnic.id, rx_mask,
1226 BNXT_DMA_DVA(sc->sc_rx_cfg)((u_int64_t)(sc->sc_rx_cfg)->bdm_map->dm_segs[0].ds_addr
)
, mc_count);
1227}
1228
1229int
1230bnxt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1231{
1232 struct bnxt_softc *sc = (struct bnxt_softc *)ifp->if_softc;
1233 struct ifreq *ifr = (struct ifreq *)data;
1234 int s, error = 0;
1235
1236 s = splnet()splraise(0x4);
1237 switch (cmd) {
1238 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
1239 ifp->if_flags |= IFF_UP0x1;
1240 /* FALLTHROUGH */
1241
1242 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
1243 if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) {
1244 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
1245 error = ENETRESET52;
1246 else
1247 bnxt_up(sc);
1248 } else {
1249 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
1250 bnxt_down(sc);
1251 }
1252 break;
1253
1254 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
1255 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
1256 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1257 break;
1258
1259 case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((170)))
:
1260 error = bnxt_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data);
1261 break;
1262
1263 case SIOCGIFSFFPAGE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct if_sffpage) & 0x1fff) << 16) | ((('i')) <<
8) | ((57)))
:
1264 error = bnxt_get_sffpage(sc, (struct if_sffpage *)data);
1265 break;
1266
1267 default:
1268 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1269 }
1270
1271 if (error == ENETRESET52) {
1272 if ((ifp->if_flags & (IFF_UP0x1 | IFF_RUNNING0x40)) ==
1273 (IFF_UP0x1 | IFF_RUNNING0x40))
1274 bnxt_iff(sc);
1275 error = 0;
1276 }
1277
1278 splx(s)spllower(s);
1279
1280 return (error);
1281}
1282
1283int
1284bnxt_rxrinfo(struct bnxt_softc *sc, struct if_rxrinfo *ifri)
1285{
1286 struct if_rxring_info *ifr;
1287 int i;
1288 int error;
1289
1290 ifr = mallocarray(sc->sc_nqueues * 2, sizeof(*ifr), M_TEMP127,
1291 M_WAITOK0x0001 | M_ZERO0x0008 | M_CANFAIL0x0004);
1292 if (ifr == NULL((void *)0))
1293 return (ENOMEM12);
1294
1295 for (i = 0; i < sc->sc_nqueues; i++) {
1296 ifr[(i * 2)].ifr_size = MCLBYTES(1 << 11);
1297 ifr[(i * 2)].ifr_info = sc->sc_queues[i].q_rx.rxr[0];
1298
1299 ifr[(i * 2) + 1].ifr_size = BNXT_AG_BUFFER_SIZE8192;
1300 ifr[(i * 2) + 1].ifr_info = sc->sc_queues[i].q_rx.rxr[1];
1301 }
1302
1303 error = if_rxr_info_ioctl(ifri, sc->sc_nqueues * 2, ifr);
1304 free(ifr, M_TEMP127, sc->sc_nqueues * 2 * sizeof(*ifr));
1305
1306 return (error);
1307}
1308
1309int
1310bnxt_load_mbuf(struct bnxt_softc *sc, struct bnxt_slot *bs, struct mbuf *m)
1311{
1312 switch (bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bs->bs_map), (m), (0x0100 | 0x0001))
1313 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bs->bs_map), (m), (0x0100 | 0x0001))
) {
1314 case 0:
1315 break;
1316
1317 case EFBIG27:
1318 if (m_defrag(m, M_DONTWAIT0x0002) == 0 &&
1319 bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bs->bs_map), (m), (0x0100 | 0x0001))
1320 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bs->bs_map), (m), (0x0100 | 0x0001))
== 0)
1321 break;
1322
1323 default:
1324 return (1);
1325 }
1326
1327 bs->bs_m = m;
1328 return (0);
1329}
1330
1331void
1332bnxt_start(struct ifqueue *ifq)
1333{
1334 struct ifnet *ifp = ifq->ifq_if;
1335 struct tx_bd_short *txring;
1336 struct tx_bd_long_hi *txhi;
1337 struct bnxt_tx_queue *tx = ifq->ifq_softc_ifq_ptr._ifq_softc;
1338 struct bnxt_softc *sc = tx->tx_softc;
1339 struct bnxt_slot *bs;
1340 bus_dmamap_t map;
1341 struct mbuf *m;
1342 u_int idx, free, used, laststart;
1343 uint16_t txflags;
1344 int i;
1345
1346 txring = (struct tx_bd_short *)BNXT_DMA_KVA(tx->tx_ring_mem)((void *)(tx->tx_ring_mem)->bdm_kva);
1347
1348 idx = tx->tx_ring_prod;
1349 free = tx->tx_ring_cons;
1350 if (free <= idx)
1351 free += tx->tx_ring.ring_size;
1352 free -= idx;
1353
1354 used = 0;
1355
1356 for (;;) {
1357 /* +1 for tx_bd_long_hi */
1358 if (used + BNXT_MAX_TX_SEGS32 + 1 > free) {
1359 ifq_set_oactive(ifq);
1360 break;
1361 }
1362
1363 m = ifq_dequeue(ifq);
1364 if (m == NULL((void *)0))
1365 break;
1366
1367 bs = &tx->tx_slots[tx->tx_prod];
1368 if (bnxt_load_mbuf(sc, bs, m) != 0) {
1369 m_freem(m);
1370 ifp->if_oerrorsif_data.ifi_oerrors++;
1371 continue;
1372 }
1373
1374#if NBPFILTER1 > 0
1375 if (ifp->if_bpf)
1376 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
1377#endif
1378 map = bs->bs_map;
1379 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x04))
1380 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x04))
;
1381 used += BNXT_TX_SLOTS(bs)(bs->bs_map->dm_nsegs + 1);
1382
1383 /* first segment */
1384 laststart = idx;
1385 txring[idx].len = htole16(map->dm_segs[0].ds_len)((__uint16_t)(map->dm_segs[0].ds_len));
1386 txring[idx].opaque = tx->tx_prod;
1387 txring[idx].addr = htole64(map->dm_segs[0].ds_addr)((__uint64_t)(map->dm_segs[0].ds_addr));
1388
1389 if (map->dm_mapsize < 512)
1390 txflags = TX_BD_LONG_FLAGS_LHINT_LT512(0x0U << 13);
1391 else if (map->dm_mapsize < 1024)
1392 txflags = TX_BD_LONG_FLAGS_LHINT_LT1K(0x1U << 13);
1393 else if (map->dm_mapsize < 2048)
1394 txflags = TX_BD_LONG_FLAGS_LHINT_LT2K(0x2U << 13);
1395 else
1396 txflags = TX_BD_LONG_FLAGS_LHINT_GTE2K(0x3U << 13);
1397 txflags |= TX_BD_LONG_TYPE_TX_BD_LONG0x10U |
1398 TX_BD_LONG_FLAGS_NO_CMPL0x80U |
1399 (BNXT_TX_SLOTS(bs)(bs->bs_map->dm_nsegs + 1) << TX_BD_LONG_FLAGS_BD_CNT_SFT8);
1400 if (map->dm_nsegs == 1)
1401 txflags |= TX_BD_SHORT_FLAGS_PACKET_END0x40U;
1402 txring[idx].flags_type = htole16(txflags)((__uint16_t)(txflags));
1403
1404 idx++;
1405 if (idx == tx->tx_ring.ring_size)
1406 idx = 0;
1407
1408 /* long tx descriptor */
1409 txhi = (struct tx_bd_long_hi *)&txring[idx];
1410 memset(txhi, 0, sizeof(*txhi))__builtin_memset((txhi), (0), (sizeof(*txhi)));
1411 txflags = 0;
1412 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & (M_UDP_CSUM_OUT0x0004 | M_TCP_CSUM_OUT0x0002))
1413 txflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM0x1U;
1414 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_IPV4_CSUM_OUT0x0001)
1415 txflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM0x2U;
1416 txhi->lflags = htole16(txflags)((__uint16_t)(txflags));
1417
1418#if NVLAN1 > 0
1419 if (m->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020) {
1420 txhi->cfa_meta = htole32(m->m_pkthdr.ether_vtag |((__uint32_t)(m->M_dat.MH.MH_pkthdr.ether_vtag | (0x1U <<
16) | (0x1U << 28)))
1421 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 |((__uint32_t)(m->M_dat.MH.MH_pkthdr.ether_vtag | (0x1U <<
16) | (0x1U << 28)))
1422 TX_BD_LONG_CFA_META_KEY_VLAN_TAG)((__uint32_t)(m->M_dat.MH.MH_pkthdr.ether_vtag | (0x1U <<
16) | (0x1U << 28)))
;
1423 }
1424#endif
1425
1426 idx++;
1427 if (idx == tx->tx_ring.ring_size)
1428 idx = 0;
1429
1430 /* remaining segments */
1431 txflags = TX_BD_SHORT_TYPE_TX_BD_SHORT0x0U;
1432 for (i = 1; i < map->dm_nsegs; i++) {
1433 if (i == map->dm_nsegs - 1)
1434 txflags |= TX_BD_SHORT_FLAGS_PACKET_END0x40U;
1435 txring[idx].flags_type = htole16(txflags)((__uint16_t)(txflags));
1436
1437 txring[idx].len =
1438 htole16(bs->bs_map->dm_segs[i].ds_len)((__uint16_t)(bs->bs_map->dm_segs[i].ds_len));
1439 txring[idx].opaque = tx->tx_prod;
1440 txring[idx].addr =
1441 htole64(bs->bs_map->dm_segs[i].ds_addr)((__uint64_t)(bs->bs_map->dm_segs[i].ds_addr));
1442
1443 idx++;
1444 if (idx == tx->tx_ring.ring_size)
1445 idx = 0;
1446 }
1447
1448 if (++tx->tx_prod >= tx->tx_ring.ring_size)
1449 tx->tx_prod = 0;
1450 }
1451
1452 /* unset NO_CMPL on the first bd of the last packet */
1453 if (used != 0) {
1454 txring[laststart].flags_type &=
1455 ~htole16(TX_BD_SHORT_FLAGS_NO_CMPL)((__uint16_t)(0x80U));
1456 }
1457
1458 bnxt_write_tx_doorbell(sc, &tx->tx_ring, idx);
1459 tx->tx_ring_prod = idx;
1460}
1461
1462void
1463bnxt_handle_async_event(struct bnxt_softc *sc, struct cmpl_base *cmpl)
1464{
1465 struct hwrm_async_event_cmpl *ae = (struct hwrm_async_event_cmpl *)cmpl;
1466 uint16_t type = le16toh(ae->event_id)((__uint16_t)(ae->event_id));
1467
1468 switch (type) {
1469 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE0x0U:
1470 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE0x2U:
1471 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE0x6U:
1472 bnxt_hwrm_port_phy_qcfg(sc, NULL((void *)0));
1473 break;
1474
1475 default:
1476 printf("%s: unexpected async event %x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), type);
1477 break;
1478 }
1479}
1480
1481struct cmpl_base *
1482bnxt_cpr_next_cmpl(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1483{
1484 struct cmpl_base *cmpl;
1485 uint32_t cons;
1486 int v_bit;
1487
1488 cons = cpr->cons + 1;
1489 v_bit = cpr->v_bit;
1490 if (cons == cpr->ring.ring_size) {
1491 cons = 0;
1492 v_bit = !v_bit;
1493 }
1494 cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons];
1495
1496 if ((!!(cmpl->info3_v & htole32(CMPL_BASE_V)((__uint32_t)(0x1U)))) != (!!v_bit))
1497 return (NULL((void *)0));
1498
1499 cpr->cons = cons;
1500 cpr->v_bit = v_bit;
1501 return (cmpl);
1502}
1503
1504void
1505bnxt_cpr_commit(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1506{
1507 cpr->commit_cons = cpr->cons;
1508 cpr->commit_v_bit = cpr->v_bit;
1509}
1510
1511void
1512bnxt_cpr_rollback(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1513{
1514 cpr->cons = cpr->commit_cons;
1515 cpr->v_bit = cpr->commit_v_bit;
1516}
1517
1518int
1519bnxt_admin_intr(void *xsc)
1520{
1521 struct bnxt_softc *sc = (struct bnxt_softc *)xsc;
1522 struct bnxt_cp_ring *cpr = &sc->sc_cp_ring;
1523 struct cmpl_base *cmpl;
1524 uint16_t type;
1525
1526 bnxt_write_cp_doorbell(sc, &cpr->ring, 0);
1527 cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1528 while (cmpl != NULL((void *)0)) {
1529 type = le16toh(cmpl->type)((__uint16_t)(cmpl->type)) & CMPL_BASE_TYPE_MASK0x3fU;
1530 switch (type) {
1531 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT0x2eU:
1532 bnxt_handle_async_event(sc, cmpl);
1533 break;
1534 default:
1535 printf("%s: unexpected completion type %u\n",
1536 DEVNAME(sc)((sc)->sc_dev.dv_xname), type);
1537 }
1538
1539 bnxt_cpr_commit(sc, cpr);
1540 cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1541 }
1542
1543 bnxt_write_cp_doorbell_index(sc, &cpr->ring,
1544 (cpr->commit_cons+1) % cpr->ring.ring_size, 1);
1545 return (1);
1546}
1547
1548int
1549bnxt_intr(void *xq)
1550{
1551 struct bnxt_queue *q = (struct bnxt_queue *)xq;
1552 struct bnxt_softc *sc = q->q_sc;
1553 struct ifnet *ifp = &sc->sc_ac.ac_if;
1554 struct bnxt_cp_ring *cpr = &q->q_cp;
1555 struct bnxt_rx_queue *rx = &q->q_rx;
1556 struct bnxt_tx_queue *tx = &q->q_tx;
1557 struct cmpl_base *cmpl;
1558 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
1559 uint16_t type;
1560 int rxfree, txfree, agfree, rv, rollback;
1561
1562 bnxt_write_cp_doorbell(sc, &cpr->ring, 0);
1563 rxfree = 0;
1564 txfree = 0;
1565 agfree = 0;
1566 rv = -1;
1567 cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1568 while (cmpl != NULL((void *)0)) {
1569 type = le16toh(cmpl->type)((__uint16_t)(cmpl->type)) & CMPL_BASE_TYPE_MASK0x3fU;
1570 rollback = 0;
1571 switch (type) {
1572 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT0x2eU:
1573 bnxt_handle_async_event(sc, cmpl);
1574 break;
1575 case CMPL_BASE_TYPE_RX_L20x11U:
1576 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
1577 rollback = bnxt_rx(sc, rx, cpr, &ml, &rxfree,
1578 &agfree, cmpl);
1579 break;
1580 case CMPL_BASE_TYPE_TX_L20x0U:
1581 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
1582 bnxt_txeof(sc, tx, &txfree, cmpl);
1583 break;
1584 default:
1585 printf("%s: unexpected completion type %u\n",
1586 DEVNAME(sc)((sc)->sc_dev.dv_xname), type);
1587 }
1588
1589 if (rollback) {
1590 bnxt_cpr_rollback(sc, cpr);
1591 break;
1592 }
1593 rv = 1;
1594 bnxt_cpr_commit(sc, cpr);
1595 cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1596 }
1597
1598 /*
1599 * comments in bnxtreg.h suggest we should be writing cpr->cons here,
1600 * but writing cpr->cons + 1 makes it stop interrupting.
1601 */
1602 bnxt_write_cp_doorbell_index(sc, &cpr->ring,
1603 (cpr->commit_cons+1) % cpr->ring.ring_size, 1);
1604
1605 if (rxfree != 0) {
1606 rx->rx_cons += rxfree;
1607 if (rx->rx_cons >= rx->rx_ring.ring_size)
1608 rx->rx_cons -= rx->rx_ring.ring_size;
1609
1610 rx->rx_ag_cons += agfree;
1611 if (rx->rx_ag_cons >= rx->rx_ag_ring.ring_size)
1612 rx->rx_ag_cons -= rx->rx_ag_ring.ring_size;
1613
1614 if_rxr_put(&rx->rxr[0], rxfree)do { (&rx->rxr[0])->rxr_alive -= (rxfree); } while (
0)
;
1615 if_rxr_put(&rx->rxr[1], agfree)do { (&rx->rxr[1])->rxr_alive -= (agfree); } while (
0)
;
1616
1617 if (ifiq_input(rx->rx_ifiq, &ml)) {
1618 if_rxr_livelocked(&rx->rxr[0]);
1619 if_rxr_livelocked(&rx->rxr[1]);
1620 }
1621
1622 bnxt_rx_fill(q);
1623 if ((rx->rx_cons == rx->rx_prod) ||
1624 (rx->rx_ag_cons == rx->rx_ag_prod))
1625 timeout_add(&rx->rx_refill, 0);
1626 }
1627 if (txfree != 0) {
1628 if (ifq_is_oactive(tx->tx_ifq))
1629 ifq_restart(tx->tx_ifq);
1630 }
1631 return (rv);
1632}
1633
1634void
1635bnxt_watchdog(struct ifnet *ifp)
1636{
1637}
1638
1639void
1640bnxt_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1641{
1642 struct bnxt_softc *sc = (struct bnxt_softc *)ifp->if_softc;
1643 bnxt_hwrm_port_phy_qcfg(sc, ifmr);
1644}
1645
1646uint64_t
1647bnxt_get_media_type(uint64_t speed, int phy_type)
1648{
1649 switch (phy_type) {
1650 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN0x0U:
1651 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR0x1U:
1652 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L0xbU:
1653 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S0xcU:
1654 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N0xdU:
1655 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR40xfU:
1656 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR40x14U:
1657 switch (speed) {
1658 case IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL))):
1659 return IFM_1000_T16;
1660 case IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))):
1661 return IFM_10G_SFP_CU23;
1662 case IF_Gbps(25)((((((25) * 1000ULL) * 1000ULL) * 1000ULL))):
1663 return IFM_25G_CR47;
1664 case IF_Gbps(40)((((((40) * 1000ULL) * 1000ULL) * 1000ULL))):
1665 return IFM_40G_CR425;
1666 case IF_Gbps(50)((((((50) * 1000ULL) * 1000ULL) * 1000ULL))):
1667 return IFM_50G_CR250;
1668 case IF_Gbps(100)((((((100) * 1000ULL) * 1000ULL) * 1000ULL))):
1669 return IFM_100G_CR442;
1670 }
1671 break;
1672
1673 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR0x3U:
1674 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR40x11U:
1675 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR40x16U:
1676 switch (speed) {
1677 case IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL))):
1678 return IFM_1000_LX14;
1679 case IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))):
1680 return IFM_10G_LR18;
1681 case IF_Gbps(25)((((((25) * 1000ULL) * 1000ULL) * 1000ULL))):
1682 return IFM_25G_LR52;
1683 case IF_Gbps(40)((((((40) * 1000ULL) * 1000ULL) * 1000ULL))):
1684 return IFM_40G_LR427;
1685 case IF_Gbps(100)((((((100) * 1000ULL) * 1000ULL) * 1000ULL))):
1686 return IFM_100G_LR445;
1687 }
1688 break;
1689
1690 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR0x4U:
1691 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR0xeU:
1692 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR40x10U:
1693 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR100x13U:
1694 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX0x1aU:
1695 switch (speed) {
1696 case IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL))):
1697 return IFM_1000_SX11;
1698 case IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))):
1699 return IFM_10G_SR19;
1700 case IF_Gbps(25)((((((25) * 1000ULL) * 1000ULL) * 1000ULL))):
1701 return IFM_25G_SR49;
1702 case IF_Gbps(40)((((((40) * 1000ULL) * 1000ULL) * 1000ULL))):
1703 return IFM_40G_SR426;
1704 case IF_Gbps(100)((((((100) * 1000ULL) * 1000ULL) * 1000ULL))):
1705 return IFM_100G_SR443;
1706 }
1707 break;
1708
1709 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER40x12U:
1710 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER40x17U:
1711 switch (speed) {
1712 case IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))):
1713 return IFM_10G_ER41;
1714 case IF_Gbps(25)((((((25) * 1000ULL) * 1000ULL) * 1000ULL))):
1715 return IFM_25G_ER53;
1716 }
1717 /* missing IFM_40G_ER4, IFM_100G_ER4 */
1718 break;
1719
1720 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR40x2U:
1721 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR20x5U:
1722 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR0x7U:
1723 switch (speed) {
1724 case IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))):
1725 return IFM_10G_KR30;
1726 case IF_Gbps(20)((((((20) * 1000ULL) * 1000ULL) * 1000ULL))):
1727 return IFM_20G_KR232;
1728 case IF_Gbps(25)((((((25) * 1000ULL) * 1000ULL) * 1000ULL))):
1729 return IFM_25G_KR48;
1730 case IF_Gbps(40)((((((40) * 1000ULL) * 1000ULL) * 1000ULL))):
1731 return IFM_40G_KR440;
1732 case IF_Gbps(50)((((((50) * 1000ULL) * 1000ULL) * 1000ULL))):
1733 return IFM_50G_KR251;
1734 case IF_Gbps(100)((((((100) * 1000ULL) * 1000ULL) * 1000ULL))):
1735 return IFM_100G_KR444;
1736 }
1737 break;
1738
1739 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX0x6U:
1740 switch (speed) {
1741 case IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL))):
1742 return IFM_1000_KX28;
1743 case IF_Mbps(2500)((((2500) * 1000ULL) * 1000ULL)):
1744 return IFM_2500_KX33;
1745 case IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))):
1746 return IFM_10G_KX429;
1747 }
1748 break;
1749
1750 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET0x8U:
1751 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE0x9U:
1752 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET0x19U:
1753 switch (speed) {
1754 case IF_Mbps(10)((((10) * 1000ULL) * 1000ULL)):
1755 return IFM_10_T3;
1756 case IF_Mbps(100)((((100) * 1000ULL) * 1000ULL)):
1757 return IFM_100_TX6;
1758 case IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL))):
1759 return IFM_1000_T16;
1760 case IF_Mbps(2500)((((2500) * 1000ULL) * 1000ULL)):
1761 return IFM_2500_T34;
1762 case IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))):
1763 return IFM_10G_T22;
1764 }
1765 break;
1766
1767 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY0xaU:
1768 switch (speed) {
1769 case IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL))):
1770 return IFM_1000_SGMII36;
1771 }
1772 break;
1773
1774 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE0x18U:
1775 switch (speed) {
1776 case IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))):
1777 return IFM_10G_AOC54;
1778 case IF_Gbps(25)((((((25) * 1000ULL) * 1000ULL) * 1000ULL))):
1779 return IFM_25G_AOC55;
1780 case IF_Gbps(40)((((((40) * 1000ULL) * 1000ULL) * 1000ULL))):
1781 return IFM_40G_AOC56;
1782 case IF_Gbps(100)((((((100) * 1000ULL) * 1000ULL) * 1000ULL))):
1783 return IFM_100G_AOC57;
1784 }
1785 break;
1786 }
1787
1788 return 0;
1789}
1790
1791void
1792bnxt_add_media_type(struct bnxt_softc *sc, int supported_speeds, uint64_t speed, uint64_t ifmt)
1793{
1794 int speed_bit = 0;
1795 switch (speed) {
1796 case IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL))):
1797 speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB0x8U;
1798 break;
1799 case IF_Gbps(2)((((((2) * 1000ULL) * 1000ULL) * 1000ULL))):
1800 speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB0x10U;
1801 break;
1802 case IF_Mbps(2500)((((2500) * 1000ULL) * 1000ULL)):
1803 speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB0x20U;
1804 break;
1805 case IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))):
1806 speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB0x40U;
1807 break;
1808 case IF_Gbps(20)((((((20) * 1000ULL) * 1000ULL) * 1000ULL))):
1809 speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB0x80U;
1810 break;
1811 case IF_Gbps(25)((((((25) * 1000ULL) * 1000ULL) * 1000ULL))):
1812 speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB0x100U;
1813 break;
1814 case IF_Gbps(40)((((((40) * 1000ULL) * 1000ULL) * 1000ULL))):
1815 speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB0x200U;
1816 break;
1817 case IF_Gbps(50)((((((50) * 1000ULL) * 1000ULL) * 1000ULL))):
1818 speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB0x400U;
1819 break;
1820 case IF_Gbps(100)((((((100) * 1000ULL) * 1000ULL) * 1000ULL))):
1821 speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB0x800U;
1822 break;
1823 }
1824 if (supported_speeds & speed_bit)
1825 ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | ifmt, 0, NULL((void *)0));
1826}
1827
1828int
1829bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *softc, struct ifmediareq *ifmr)
1830{
1831 struct ifnet *ifp = &softc->sc_ac.ac_if;
1832 struct hwrm_port_phy_qcfg_input req = {0};
1833 struct hwrm_port_phy_qcfg_output *resp =
1834 BNXT_DMA_KVA(softc->sc_cmd_resp)((void *)(softc->sc_cmd_resp)->bdm_kva);
1835 int link_state = LINK_STATE_DOWN2;
1836 uint64_t speeds[] = {
1837 IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL))), IF_Gbps(2)((((((2) * 1000ULL) * 1000ULL) * 1000ULL))), IF_Mbps(2500)((((2500) * 1000ULL) * 1000ULL)), IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))), IF_Gbps(20)((((((20) * 1000ULL) * 1000ULL) * 1000ULL))),
1838 IF_Gbps(25)((((((25) * 1000ULL) * 1000ULL) * 1000ULL))), IF_Gbps(40)((((((40) * 1000ULL) * 1000ULL) * 1000ULL))), IF_Gbps(50)((((((50) * 1000ULL) * 1000ULL) * 1000ULL))), IF_Gbps(100)((((((100) * 1000ULL) * 1000ULL) * 1000ULL)))
1839 };
1840 uint64_t media_type;
1841 int duplex;
1842 int rc = 0;
1843 int i;
1844
1845 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
1846 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_QCFG(0x27U));
1847
1848 rc = _hwrm_send_message(softc, &req, sizeof(req));
1849 if (rc) {
1850 printf("%s: failed to query port phy config\n", DEVNAME(softc)((softc)->sc_dev.dv_xname));
1851 goto exit;
1852 }
1853
1854 if (softc->sc_hwrm_ver > 0x10800)
1855 duplex = resp->duplex_state;
1856 else
1857 duplex = resp->duplex_cfg;
1858
1859 if (resp->link == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK0x2U) {
1860 if (duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF0x0U)
1861 link_state = LINK_STATE_HALF_DUPLEX5;
1862 else
1863 link_state = LINK_STATE_FULL_DUPLEX6;
1864
1865 switch (resp->link_speed) {
1866 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB0xffffU:
1867 ifp->if_baudrateif_data.ifi_baudrate = IF_Mbps(10)((((10) * 1000ULL) * 1000ULL));
1868 break;
1869 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB0x1U:
1870 ifp->if_baudrateif_data.ifi_baudrate = IF_Mbps(100)((((100) * 1000ULL) * 1000ULL));
1871 break;
1872 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB0xaU:
1873 ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL)));
1874 break;
1875 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB0x14U:
1876 ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(2)((((((2) * 1000ULL) * 1000ULL) * 1000ULL)));
1877 break;
1878 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB0x19U:
1879 ifp->if_baudrateif_data.ifi_baudrate = IF_Mbps(2500)((((2500) * 1000ULL) * 1000ULL));
1880 break;
1881 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB0x64U:
1882 ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL)));
1883 break;
1884 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB0xc8U:
1885 ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(20)((((((20) * 1000ULL) * 1000ULL) * 1000ULL)));
1886 break;
1887 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB0xfaU:
1888 ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(25)((((((25) * 1000ULL) * 1000ULL) * 1000ULL)));
1889 break;
1890 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB0x190U:
1891 ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(40)((((((40) * 1000ULL) * 1000ULL) * 1000ULL)));
1892 break;
1893 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB0x1f4U:
1894 ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(50)((((((50) * 1000ULL) * 1000ULL) * 1000ULL)));
1895 break;
1896 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB0x3e8U:
1897 ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(100)((((((100) * 1000ULL) * 1000ULL) * 1000ULL)));
1898 break;
1899 }
1900 }
1901
1902 ifmedia_delete_instance(&softc->sc_media, IFM_INST_ANY((uint64_t) -1));
1903 for (i = 0; i < nitems(speeds)(sizeof((speeds)) / sizeof((speeds)[0])); i++) {
1904 media_type = bnxt_get_media_type(speeds[i], resp->phy_type);
1905 if (media_type != 0)
1906 bnxt_add_media_type(softc, resp->support_speeds,
1907 speeds[i], media_type);
1908 }
1909 ifmedia_add(&softc->sc_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL, 0, NULL((void *)0));
1910 ifmedia_set(&softc->sc_media, IFM_ETHER0x0000000000000100ULL|IFM_AUTO0ULL);
1911
1912 if (ifmr != NULL((void *)0)) {
1913 ifmr->ifm_status = IFM_AVALID0x0000000000000001ULL;
1914 if (LINK_STATE_IS_UP(ifp->if_link_state)((ifp->if_data.ifi_link_state) >= 4 || (ifp->if_data
.ifi_link_state) == 0)
) {
1915 ifmr->ifm_status |= IFM_ACTIVE0x0000000000000002ULL;
1916 ifmr->ifm_active = IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL;
1917 if (resp->pause & HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX0x1U)
1918 ifmr->ifm_active |= IFM_ETH_TXPAUSE0x0000000000040000ULL;
1919 if (resp->pause & HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX0x2U)
1920 ifmr->ifm_active |= IFM_ETH_RXPAUSE0x0000000000020000ULL;
1921 if (duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF0x0U)
1922 ifmr->ifm_active |= IFM_HDX0x0000020000000000ULL;
1923 else
1924 ifmr->ifm_active |= IFM_FDX0x0000010000000000ULL;
1925
1926 media_type = bnxt_get_media_type(ifp->if_baudrateif_data.ifi_baudrate, resp->phy_type);
1927 if (media_type != 0)
1928 ifmr->ifm_active |= media_type;
1929 }
1930 }
1931
1932exit:
1933 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
1934
1935 if (rc == 0 && (link_state != ifp->if_link_stateif_data.ifi_link_state)) {
1936 ifp->if_link_stateif_data.ifi_link_state = link_state;
1937 if_link_state_change(ifp);
1938 }
1939
1940 return rc;
1941}
1942
1943int
1944bnxt_media_change(struct ifnet *ifp)
1945{
1946 struct bnxt_softc *sc = (struct bnxt_softc *)ifp->if_softc;
1947 struct hwrm_port_phy_cfg_input req = {0};
1948 uint64_t link_speed;
1949
1950 if (IFM_TYPE(sc->sc_media.ifm_media)((sc->sc_media.ifm_media) & 0x000000000000ff00ULL) != IFM_ETHER0x0000000000000100ULL)
1951 return EINVAL22;
1952
1953 if (sc->sc_flags & BNXT_FLAG_NPAR0x0002)
1954 return ENODEV19;
1955
1956 bnxt_hwrm_cmd_hdr_init(sc, &req, HWRM_PORT_PHY_CFG(0x20U));
1957
1958 switch (IFM_SUBTYPE(sc->sc_media.ifm_media)((sc->sc_media.ifm_media) & 0x00000000000000ffULL)) {
1959 case IFM_100G_CR442:
1960 case IFM_100G_SR443:
1961 case IFM_100G_KR444:
1962 case IFM_100G_LR445:
1963 case IFM_100G_AOC57:
1964 link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB0x3e8U;
1965 break;
1966
1967 case IFM_50G_CR250:
1968 case IFM_50G_KR251:
1969 link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB0x1f4U;
1970 break;
1971
1972 case IFM_40G_CR425:
1973 case IFM_40G_SR426:
1974 case IFM_40G_LR427:
1975 case IFM_40G_KR440:
1976 case IFM_40G_AOC56:
1977 link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB0x190U;
1978 break;
1979
1980 case IFM_25G_CR47:
1981 case IFM_25G_KR48:
1982 case IFM_25G_SR49:
1983 case IFM_25G_LR52:
1984 case IFM_25G_ER53:
1985 case IFM_25G_AOC55:
1986 link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB0xfaU;
1987 break;
1988
1989 case IFM_10G_LR18:
1990 case IFM_10G_SR19:
1991 case IFM_10G_CX420:
1992 case IFM_10G_T22:
1993 case IFM_10G_SFP_CU23:
1994 case IFM_10G_LRM24:
1995 case IFM_10G_KX429:
1996 case IFM_10G_KR30:
1997 case IFM_10G_CR131:
1998 case IFM_10G_ER41:
1999 case IFM_10G_AOC54:
2000 link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB0x64U;
2001 break;
2002
2003 case IFM_2500_SX21:
2004 case IFM_2500_KX33:
2005 case IFM_2500_T34:
2006 link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB0x19U;
2007 break;
2008
2009 case IFM_1000_T16:
2010 case IFM_1000_LX14:
2011 case IFM_1000_SX11:
2012 case IFM_1000_CX15:
2013 case IFM_1000_KX28:
2014 link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB0xaU;
2015 break;
2016
2017 case IFM_100_TX6:
2018 link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB0x1U;
2019 break;
2020
2021 default:
2022 link_speed = 0;
2023 }
2024
2025 req.enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX)((__uint32_t)(0x2U));
2026 req.auto_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH0x2U;
2027 if (link_speed == 0) {
2028 req.auto_mode |=
2029 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS0x1U;
2030 req.flags |=
2031 htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG)((__uint32_t)(0x8U));
2032 req.enables |=
2033 htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE)((__uint32_t)(0x1U));
2034 } else {
2035 req.force_link_speed = htole16(link_speed)((__uint16_t)(link_speed));
2036 req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)((__uint32_t)(0x4U));
2037 }
2038 req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY)((__uint32_t)(0x1U));
2039
2040 return hwrm_send_message(sc, &req, sizeof(req));
2041}
2042
2043int
2044bnxt_media_autonegotiate(struct bnxt_softc *sc)
2045{
2046 struct hwrm_port_phy_cfg_input req = {0};
2047
2048 if (sc->sc_flags & BNXT_FLAG_NPAR0x0002)
2049 return ENODEV19;
2050
2051 bnxt_hwrm_cmd_hdr_init(sc, &req, HWRM_PORT_PHY_CFG(0x20U));
2052 req.auto_mode |= HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS0x1U;
2053 req.auto_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH0x2U;
2054 req.enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE |((__uint32_t)(0x1U | 0x2U))
2055 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX)((__uint32_t)(0x1U | 0x2U));
2056 req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG)((__uint32_t)(0x8U));
2057 req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY)((__uint32_t)(0x1U));
2058
2059 return hwrm_send_message(sc, &req, sizeof(req));
2060}
2061
2062
2063void
2064bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
2065{
2066 struct cmpl_base *cmp = (void *)cpr->ring.vaddr;
2067 int i;
2068
2069 for (i = 0; i < cpr->ring.ring_size; i++)
2070 cmp[i].info3_v = !cpr->v_bit;
2071}
2072
2073void
2074bnxt_write_cp_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring,
2075 int enable)
2076{
2077 uint32_t val = CMPL_DOORBELL_KEY_CMPL(0x2U << 28);
2078 if (enable == 0)
2079 val |= CMPL_DOORBELL_MASK0x8000000U;
2080
2081 bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2082 BUS_SPACE_BARRIER_WRITE0x02);
2083 bus_space_barrier(sc->sc_db_t, sc->sc_db_h, 0, sc->sc_db_s,
2084 BUS_SPACE_BARRIER_WRITE0x02);
2085 bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,((sc->sc_db_t)->write_4((sc->sc_db_h), (ring->doorbell
), (((__uint32_t)(val)))))
2086 htole32(val))((sc->sc_db_t)->write_4((sc->sc_db_h), (ring->doorbell
), (((__uint32_t)(val)))))
;
2087}
2088
2089void
2090bnxt_write_cp_doorbell_index(struct bnxt_softc *sc, struct bnxt_ring *ring,
2091 uint32_t index, int enable)
2092{
2093 uint32_t val = CMPL_DOORBELL_KEY_CMPL(0x2U << 28) | CMPL_DOORBELL_IDX_VALID0x4000000U |
2094 (index & CMPL_DOORBELL_IDX_MASK0xffffffU);
2095 if (enable == 0)
2096 val |= CMPL_DOORBELL_MASK0x8000000U;
2097 bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2098 BUS_SPACE_BARRIER_WRITE0x02);
2099 bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,((sc->sc_db_t)->write_4((sc->sc_db_h), (ring->doorbell
), (((__uint32_t)(val)))))
2100 htole32(val))((sc->sc_db_t)->write_4((sc->sc_db_h), (ring->doorbell
), (((__uint32_t)(val)))))
;
2101 bus_space_barrier(sc->sc_db_t, sc->sc_db_h, 0, sc->sc_db_s,
2102 BUS_SPACE_BARRIER_WRITE0x02);
2103}
2104
2105void
2106bnxt_write_rx_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring, int index)
2107{
2108 uint32_t val = RX_DOORBELL_KEY_RX(0x1U << 28) | index;
2109 bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2110 BUS_SPACE_BARRIER_WRITE0x02);
2111 bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,((sc->sc_db_t)->write_4((sc->sc_db_h), (ring->doorbell
), (((__uint32_t)(val)))))
2112 htole32(val))((sc->sc_db_t)->write_4((sc->sc_db_h), (ring->doorbell
), (((__uint32_t)(val)))))
;
2113
2114 /* second write isn't necessary on all hardware */
2115 bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2116 BUS_SPACE_BARRIER_WRITE0x02);
2117 bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,((sc->sc_db_t)->write_4((sc->sc_db_h), (ring->doorbell
), (((__uint32_t)(val)))))
2118 htole32(val))((sc->sc_db_t)->write_4((sc->sc_db_h), (ring->doorbell
), (((__uint32_t)(val)))))
;
2119}
2120
2121void
2122bnxt_write_tx_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring, int index)
2123{
2124 uint32_t val = TX_DOORBELL_KEY_TX(0x0U << 28) | index;
2125 bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2126 BUS_SPACE_BARRIER_WRITE0x02);
2127 bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,((sc->sc_db_t)->write_4((sc->sc_db_h), (ring->doorbell
), (((__uint32_t)(val)))))
2128 htole32(val))((sc->sc_db_t)->write_4((sc->sc_db_h), (ring->doorbell
), (((__uint32_t)(val)))))
;
2129
2130 /* second write isn't necessary on all hardware */
2131 bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2132 BUS_SPACE_BARRIER_WRITE0x02);
2133 bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,((sc->sc_db_t)->write_4((sc->sc_db_h), (ring->doorbell
), (((__uint32_t)(val)))))
2134 htole32(val))((sc->sc_db_t)->write_4((sc->sc_db_h), (ring->doorbell
), (((__uint32_t)(val)))))
;
2135}
2136
2137u_int
2138bnxt_rx_fill_slots(struct bnxt_softc *sc, struct bnxt_ring *ring, void *ring_mem,
2139 struct bnxt_slot *slots, uint *prod, int bufsize, uint16_t bdtype,
2140 u_int nslots)
2141{
2142 struct rx_prod_pkt_bd *rxring;
2143 struct bnxt_slot *bs;
2144 struct mbuf *m;
2145 uint p, fills;
2146
2147 rxring = (struct rx_prod_pkt_bd *)ring_mem;
2148 p = *prod;
2149 for (fills = 0; fills < nslots; fills++) {
2150 bs = &slots[p];
2151 m = MCLGETL(NULL, M_DONTWAIT, bufsize)m_clget((((void *)0)), (0x0002), (bufsize));
2152 if (m == NULL((void *)0))
2153 break;
2154
2155 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = bufsize;
2156 if (bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bs->bs_map), (m), (0x0001))
2157 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bs->bs_map), (m), (0x0001))
!= 0) {
2158 m_freem(m);
2159 break;
2160 }
2161 bs->bs_m = m;
2162
2163 rxring[p].flags_type = htole16(bdtype)((__uint16_t)(bdtype));
2164 rxring[p].len = htole16(bufsize)((__uint16_t)(bufsize));
2165 rxring[p].opaque = p;
2166 rxring[p].addr = htole64(bs->bs_map->dm_segs[0].ds_addr)((__uint64_t)(bs->bs_map->dm_segs[0].ds_addr));
2167
2168 if (++p >= ring->ring_size)
2169 p = 0;
2170 }
2171
2172 if (fills != 0)
2173 bnxt_write_rx_doorbell(sc, ring, p);
2174 *prod = p;
2175
2176 return (nslots - fills);
2177}
2178
2179int
2180bnxt_rx_fill(struct bnxt_queue *q)
2181{
2182 struct bnxt_rx_queue *rx = &q->q_rx;
2183 struct bnxt_softc *sc = q->q_sc;
2184 u_int slots;
2185 int rv = 0;
2186
2187 slots = if_rxr_get(&rx->rxr[0], rx->rx_ring.ring_size);
2188 if (slots > 0) {
2189 slots = bnxt_rx_fill_slots(sc, &rx->rx_ring,
2190 BNXT_DMA_KVA(rx->rx_ring_mem)((void *)(rx->rx_ring_mem)->bdm_kva), rx->rx_slots,
2191 &rx->rx_prod, MCLBYTES(1 << 11),
2192 RX_PROD_PKT_BD_TYPE_RX_PROD_PKT0x4U, slots);
2193 if_rxr_put(&rx->rxr[0], slots)do { (&rx->rxr[0])->rxr_alive -= (slots); } while (
0)
;
2194 } else
2195 rv = 1;
2196
2197 slots = if_rxr_get(&rx->rxr[1], rx->rx_ag_ring.ring_size);
2198 if (slots > 0) {
2199 slots = bnxt_rx_fill_slots(sc, &rx->rx_ag_ring,
2200 BNXT_DMA_KVA(rx->rx_ring_mem)((void *)(rx->rx_ring_mem)->bdm_kva) + PAGE_SIZE(1 << 12),
2201 rx->rx_ag_slots, &rx->rx_ag_prod,
2202 BNXT_AG_BUFFER_SIZE8192,
2203 RX_PROD_AGG_BD_TYPE_RX_PROD_AGG0x6U, slots);
2204 if_rxr_put(&rx->rxr[1], slots)do { (&rx->rxr[1])->rxr_alive -= (slots); } while (
0)
;
2205 } else
2206 rv = 1;
2207
2208 return (rv);
2209}
2210
2211void
2212bnxt_refill(void *xq)
2213{
2214 struct bnxt_queue *q = xq;
2215 struct bnxt_rx_queue *rx = &q->q_rx;
2216
2217 bnxt_rx_fill(q);
2218
2219 if (rx->rx_cons == rx->rx_prod)
2220 timeout_add(&rx->rx_refill, 1);
2221}
2222
2223int
2224bnxt_rx(struct bnxt_softc *sc, struct bnxt_rx_queue *rx,
2225 struct bnxt_cp_ring *cpr, struct mbuf_list *ml, int *slots, int *agslots,
2226 struct cmpl_base *cmpl)
2227{
2228 struct mbuf *m, *am;
2229 struct bnxt_slot *bs;
2230 struct rx_pkt_cmpl *rxlo = (struct rx_pkt_cmpl *)cmpl;
2231 struct rx_pkt_cmpl_hi *rxhi;
2232 struct rx_abuf_cmpl *ag;
2233 uint32_t flags;
2234 uint16_t errors;
2235
2236 /* second part of the rx completion */
2237 rxhi = (struct rx_pkt_cmpl_hi *)bnxt_cpr_next_cmpl(sc, cpr);
2238 if (rxhi == NULL((void *)0)) {
2239 return (1);
2240 }
2241
2242 /* packets over 2k in size use an aggregation buffer completion too */
2243 ag = NULL((void *)0);
2244 if ((rxlo->agg_bufs_v1 >> RX_PKT_CMPL_AGG_BUFS_SFT1) != 0) {
2245 ag = (struct rx_abuf_cmpl *)bnxt_cpr_next_cmpl(sc, cpr);
2246 if (ag == NULL((void *)0)) {
2247 return (1);
2248 }
2249 }
2250
2251 bs = &rx->rx_slots[rxlo->opaque];
2252 bus_dmamap_sync(sc->sc_dmat, bs->bs_map, 0, bs->bs_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bs->
bs_map), (0), (bs->bs_map->dm_mapsize), (0x02))
2253 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bs->
bs_map), (0), (bs->bs_map->dm_mapsize), (0x02))
;
2254 bus_dmamap_unload(sc->sc_dmat, bs->bs_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (bs->
bs_map))
;
2255
2256 m = bs->bs_m;
2257 bs->bs_m = NULL((void *)0);
2258 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = letoh16(rxlo->len)((__uint16_t)(rxlo->len));
2259 (*slots)++;
2260
2261 /* checksum flags */
2262 flags = lemtoh32(&rxhi->flags2)((__uint32_t)(*(__uint32_t *)(&rxhi->flags2)));
2263 errors = lemtoh16(&rxhi->errors_v2)((__uint16_t)(*(__uint16_t *)(&rxhi->errors_v2)));
2264 if ((flags & RX_PKT_CMPL_FLAGS2_IP_CS_CALC0x1U) != 0 &&
2265 (errors & RX_PKT_CMPL_ERRORS_IP_CS_ERROR0x10U) == 0)
2266 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK0x0008;
2267
2268 if ((flags & RX_PKT_CMPL_FLAGS2_L4_CS_CALC0x2U) != 0 &&
2269 (errors & RX_PKT_CMPL_ERRORS_L4_CS_ERROR0x20U) == 0)
2270 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK0x0020 |
2271 M_UDP_CSUM_IN_OK0x0080;
2272
2273#if NVLAN1 > 0
2274 if ((flags & RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK0xf0U) ==
2275 RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN(0x1U << 4)) {
2276 m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag = lemtoh16(&rxhi->metadata)((__uint16_t)(*(__uint16_t *)(&rxhi->metadata)));
2277 m->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020;
2278 }
2279#endif
2280
2281 if (lemtoh16(&rxlo->flags_type)((__uint16_t)(*(__uint16_t *)(&rxlo->flags_type))) & RX_PKT_CMPL_FLAGS_RSS_VALID0x400U) {
2282 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid = lemtoh32(&rxlo->rss_hash)((__uint32_t)(*(__uint32_t *)(&rxlo->rss_hash)));
2283 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_FLOWID0x4000;
2284 }
2285
2286 if (ag != NULL((void *)0)) {
2287 bs = &rx->rx_ag_slots[ag->opaque];
2288 bus_dmamap_sync(sc->sc_dmat, bs->bs_map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bs->
bs_map), (0), (bs->bs_map->dm_mapsize), (0x02))
2289 bs->bs_map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bs->
bs_map), (0), (bs->bs_map->dm_mapsize), (0x02))
;
2290 bus_dmamap_unload(sc->sc_dmat, bs->bs_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (bs->
bs_map))
;
2291
2292 am = bs->bs_m;
2293 bs->bs_m = NULL((void *)0);
2294 am->m_lenm_hdr.mh_len = letoh16(ag->len)((__uint16_t)(ag->len));
2295 m->m_nextm_hdr.mh_next = am;
2296 m->m_pkthdrM_dat.MH.MH_pkthdr.len += am->m_lenm_hdr.mh_len;
2297 (*agslots)++;
2298 }
2299
2300 ml_enqueue(ml, m);
2301 return (0);
2302}
2303
2304void
2305bnxt_txeof(struct bnxt_softc *sc, struct bnxt_tx_queue *tx, int *txfree,
2306 struct cmpl_base *cmpl)
2307{
2308 struct tx_cmpl *txcmpl = (struct tx_cmpl *)cmpl;
2309 struct bnxt_slot *bs;
2310 bus_dmamap_t map;
2311 u_int idx, segs, last;
2312
2313 idx = tx->tx_ring_cons;
2314 last = tx->tx_cons;
2315 do {
2316 bs = &tx->tx_slots[tx->tx_cons];
2317 map = bs->bs_map;
2318
2319 segs = BNXT_TX_SLOTS(bs)(bs->bs_map->dm_nsegs + 1);
2320 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x08))
2321 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x08))
;
2322 bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map
))
;
2323 m_freem(bs->bs_m);
2324 bs->bs_m = NULL((void *)0);
2325
2326 idx += segs;
2327 (*txfree) += segs;
2328 if (idx >= tx->tx_ring.ring_size)
2329 idx -= tx->tx_ring.ring_size;
2330
2331 last = tx->tx_cons;
2332 if (++tx->tx_cons >= tx->tx_ring.ring_size)
2333 tx->tx_cons = 0;
2334
2335 } while (last != txcmpl->opaque);
2336 tx->tx_ring_cons = idx;
2337}
2338
2339/* bnxt_hwrm.c */
2340
2341int
2342bnxt_hwrm_err_map(uint16_t err)
2343{
2344 int rc;
2345
2346 switch (err) {
2347 case HWRM_ERR_CODE_SUCCESS(0x0U):
2348 return 0;
2349 case HWRM_ERR_CODE_INVALID_PARAMS(0x2U):
2350 case HWRM_ERR_CODE_INVALID_FLAGS(0x5U):
2351 case HWRM_ERR_CODE_INVALID_ENABLES(0x6U):
2352 return EINVAL22;
2353 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED(0x3U):
2354 return EACCES13;
2355 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR(0x4U):
2356 return ENOMEM12;
2357 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED(0xffffU):
2358 return ENOSYS78;
2359 case HWRM_ERR_CODE_FAIL(0x1U):
2360 return EIO5;
2361 case HWRM_ERR_CODE_HWRM_ERROR(0xfU):
2362 case HWRM_ERR_CODE_UNKNOWN_ERR(0xfffeU):
2363 default:
2364 return EIO5;
2365 }
2366
2367 return rc;
2368}
2369
2370void
2371bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *softc, void *request,
2372 uint16_t req_type)
2373{
2374 struct input *req = request;
2375
2376 req->req_type = htole16(req_type)((__uint16_t)(req_type));
2377 req->cmpl_ring = 0xffff;
2378 req->target_id = 0xffff;
2379 req->resp_addr = htole64(BNXT_DMA_DVA(softc->sc_cmd_resp))((__uint64_t)(((u_int64_t)(softc->sc_cmd_resp)->bdm_map
->dm_segs[0].ds_addr)))
;
2380}
2381
2382int
2383_hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
2384{
2385 struct input *req = msg;
2386 struct hwrm_err_output *resp = BNXT_DMA_KVA(softc->sc_cmd_resp)((void *)(softc->sc_cmd_resp)->bdm_kva);
2387 uint32_t *data = msg;
2388 int i;
2389 uint8_t *valid;
2390 uint16_t err;
2391 uint16_t max_req_len = HWRM_MAX_REQ_LEN(128);
2392 struct hwrm_short_input short_input = {0};
2393
2394 /* TODO: DMASYNC in here. */
2395 req->seq_id = htole16(softc->sc_cmd_seq++)((__uint16_t)(softc->sc_cmd_seq++));
2396 memset(resp, 0, PAGE_SIZE)__builtin_memset((resp), (0), ((1 << 12)));
2397
2398 if (softc->sc_flags & BNXT_FLAG_SHORT_CMD0x0008) {
1
Assuming the condition is true
2
Taking true branch
2399 void *short_cmd_req = BNXT_DMA_KVA(softc->sc_cmd_resp)((void *)(softc->sc_cmd_resp)->bdm_kva);
2400
2401 memcpy(short_cmd_req, req, msg_len)__builtin_memcpy((short_cmd_req), (req), (msg_len));
2402 memset((uint8_t *) short_cmd_req + msg_len, 0,__builtin_memset(((uint8_t *) short_cmd_req + msg_len), (0), (
softc->sc_max_req_len - msg_len))
2403 softc->sc_max_req_len - msg_len)__builtin_memset(((uint8_t *) short_cmd_req + msg_len), (0), (
softc->sc_max_req_len - msg_len))
;
2404
2405 short_input.req_type = req->req_type;
2406 short_input.signature =
2407 htole16(HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD)((__uint16_t)(0x4321U));
2408 short_input.size = htole16(msg_len)((__uint16_t)(msg_len));
2409 short_input.req_addr =
2410 htole64(BNXT_DMA_DVA(softc->sc_cmd_resp))((__uint64_t)(((u_int64_t)(softc->sc_cmd_resp)->bdm_map
->dm_segs[0].ds_addr)))
;
2411
2412 data = (uint32_t *)&short_input;
2413 msg_len = sizeof(short_input);
2414
2415 /* Sync memory write before updating doorbell */
2416 membar_sync()do { __asm volatile("mfence" ::: "memory"); } while (0);
3
Loop condition is false. Exiting loop
2417
2418 max_req_len = BNXT_HWRM_SHORT_REQ_LENsizeof(struct hwrm_short_input);
2419 }
2420
2421 /* Write request msg to hwrm channel */
2422 for (i = 0; i < msg_len; i += 4) {
4
Loop condition is true. Entering loop body
5
Loop condition is true. Entering loop body
6
Loop condition is true. Entering loop body
7
Loop condition is true. Entering loop body
2423 bus_space_write_4(softc->sc_hwrm_t,((softc->sc_hwrm_t)->write_4((softc->sc_hwrm_h), (i)
, (*data)))
8
3rd function call argument is an uninitialized value
2424 softc->sc_hwrm_h,((softc->sc_hwrm_t)->write_4((softc->sc_hwrm_h), (i)
, (*data)))
2425 i, *data)((softc->sc_hwrm_t)->write_4((softc->sc_hwrm_h), (i)
, (*data)))
;
2426 data++;
2427 }
2428
2429 /* Clear to the end of the request buffer */
2430 for (i = msg_len; i < max_req_len; i += 4)
2431 bus_space_write_4(softc->sc_hwrm_t, softc->sc_hwrm_h,((softc->sc_hwrm_t)->write_4((softc->sc_hwrm_h), (i)
, (0)))
2432 i, 0)((softc->sc_hwrm_t)->write_4((softc->sc_hwrm_h), (i)
, (0)))
;
2433
2434 /* Ring channel doorbell */
2435 bus_space_write_4(softc->sc_hwrm_t, softc->sc_hwrm_h, 0x100,((softc->sc_hwrm_t)->write_4((softc->sc_hwrm_h), (0x100
), (((__uint32_t)(1)))))
2436 htole32(1))((softc->sc_hwrm_t)->write_4((softc->sc_hwrm_h), (0x100
), (((__uint32_t)(1)))))
;
2437
2438 /* Check if response len is updated */
2439 for (i = 0; i < softc->sc_cmd_timeo; i++) {
2440 if (resp->resp_len && resp->resp_len <= 4096)
2441 break;
2442 DELAY(1000)(*delay_func)(1000);
2443 }
2444 if (i >= softc->sc_cmd_timeo) {
2445 printf("%s: timeout sending %s: (timeout: %u) seq: %d\n",
2446 DEVNAME(softc)((softc)->sc_dev.dv_xname), GET_HWRM_REQ_TYPE(req->req_type)((req->req_type) == 0x99 ? "HWRM_CFA_NTUPLE_FILTER_ALLOC":
((req->req_type) == 0x90 ? "HWRM_CFA_L2_FILTER_ALLOC": ((
req->req_type) == 0x91 ? "HWRM_CFA_L2_FILTER_FREE": ((req->
req_type) == 0x92 ? "HWRM_CFA_L2_FILTER_CFG": ((req->req_type
) == 0x93 ? "HWRM_CFA_L2_SET_RX_MASK": ((req->req_type) ==
0x94 ? "HWRM_CFA_VLAN_ANTISPOOF_CFG": ((req->req_type) ==
0x95 ? "HWRM_CFA_TUNNEL_FILTER_ALLOC": ((req->req_type) ==
0x96 ? "HWRM_CFA_TUNNEL_FILTER_FREE": ((req->req_type) ==
0x10 ? "RESERVED1": ((req->req_type) == 0x11 ? "HWRM_FUNC_RESET"
: ((req->req_type) == 0x12 ? "HWRM_FUNC_GETFID": ((req->
req_type) == 0x13 ? "HWRM_FUNC_VF_ALLOC": ((req->req_type)
== 0x14 ? "HWRM_FUNC_VF_FREE": ((req->req_type) == 0x15 ?
"HWRM_FUNC_QCAPS": ((req->req_type) == 0x16 ? "HWRM_FUNC_QCFG"
: ((req->req_type) == 0x17 ? "HWRM_FUNC_CFG": ((req->req_type
) == 0x18 ? "HWRM_FUNC_QSTATS": ((req->req_type) == 0x19 ?
"HWRM_FUNC_CLR_STATS": ((req->req_type) == 0xe0 ? "HWRM_TEMP_MONITOR_QUERY"
: ((req->req_type) == 0x1a ? "HWRM_FUNC_DRV_UNRGTR": ((req
->req_type) == 0x1b ? "HWRM_FUNC_VF_RESC_FREE": ((req->
req_type) == 0x1c ? "HWRM_FUNC_VF_VNIC_IDS_QUERY": ((req->
req_type) == 0x1d ? "HWRM_FUNC_DRV_RGTR": ((req->req_type)
== 0x1e ? "HWRM_FUNC_DRV_QVER": ((req->req_type) == 0x1f ?
"HWRM_FUNC_BUF_RGTR": ((req->req_type) == 0x9a ? "HWRM_CFA_NTUPLE_FILTER_FREE"
: ((req->req_type) == 0x9b ? "HWRM_CFA_NTUPLE_FILTER_CFG":
((req->req_type) == 0xd3 ? "HWRM_FWD_ASYNC_EVENT_CMPL": (
(req->req_type) == 0xd2 ? "HWRM_FWD_RESP": ((req->req_type
) == 0xd1 ? "HWRM_REJECT_FWD_RESP": ((req->req_type) == 0xd0
? "HWRM_EXEC_FWD_RESP": ((req->req_type) == 0xc0 ? "HWRM_FW_RESET"
: ((req->req_type) == 0xc1 ? "HWRM_FW_QSTATUS": ((req->
req_type) == 0x70 ? "HWRM_VNIC_RSS_COS_LB_CTX_ALLOC": ((req->
req_type) == 0x71 ? "HWRM_VNIC_RSS_COS_LB_CTX_FREE": ((req->
req_type) == 0xb1 ? "HWRM_STAT_CTX_FREE": ((req->req_type)
== 0xb0 ? "HWRM_STAT_CTX_ALLOC": ((req->req_type) == 0xb3
? "HWRM_STAT_CTX_CLR_STATS": ((req->req_type) == 0xb2 ? "HWRM_STAT_CTX_QUERY"
: ((req->req_type) == 0xfff6 ? "HWRM_NVM_GET_DEV_INFO": ((
req->req_type) == 0x61 ? "HWRM_RING_GRP_FREE": ((req->req_type
) == 0x60 ? "HWRM_RING_GRP_ALLOC": ((req->req_type) == 0x24
? "HWRM_PORT_LPBK_QSTATS": ((req->req_type) == 0xf3 ? "HWRM_WOL_REASON_QCFG"
: ((req->req_type) == 0xa0 ? "HWRM_TUNNEL_DST_PORT_QUERY":
((req->req_type) == 0xa1 ? "HWRM_TUNNEL_DST_PORT_ALLOC": (
(req->req_type) == 0xa2 ? "HWRM_TUNNEL_DST_PORT_FREE": ((req
->req_type) == 0xfffc ? "HWRM_NVM_RAW_DUMP": ((req->req_type
) == 0xfffb ? "HWRM_NVM_GET_DIR_INFO": ((req->req_type) ==
0xfffa ? "HWRM_NVM_GET_DIR_ENTRIES": ((req->req_type) == 0x10a
? "HWRM_CFA_VLAN_ANTISPOOF_QCFG": ((req->req_type) == 0xe
? "HWRM_FUNC_BUF_UNRGTR": ((req->req_type) == 0xf ? "HWRM_FUNC_VF_CFG"
: ((req->req_type) == 0xffff ? "HWRM_NVM_RAW_WRITE_BLK": (
(req->req_type) == 0xfffe ? "HWRM_NVM_WRITE": ((req->req_type
) == 0xfffd ? "HWRM_NVM_READ": ((req->req_type) == 0x50 ? "HWRM_RING_ALLOC"
: ((req->req_type) == 0x51 ? "HWRM_RING_FREE": ((req->req_type
) == 0x52 ? "HWRM_RING_CMPL_RING_QAGGINT_PARAMS": ((req->req_type
) == 0x53 ? "HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS": ((req->
req_type) == 0x4a ? "HWRM_VNIC_QCAPS": ((req->req_type) ==
0x49 ? "HWRM_VNIC_PLCMODES_QCFG": ((req->req_type) == 0x48
? "HWRM_VNIC_PLCMODES_CFG": ((req->req_type) == 0x47 ? "HWRM_VNIC_RSS_QCFG"
: ((req->req_type) == 0x46 ? "HWRM_VNIC_RSS_CFG": ((req->
req_type) == 0x44 ? "HWRM_VNIC_TPA_CFG": ((req->req_type) ==
0x43 ? "HWRM_VNIC_QCFG": ((req->req_type) == 0x42 ? "HWRM_VNIC_CFG"
: ((req->req_type) == 0x41 ? "HWRM_VNIC_FREE": ((req->req_type
) == 0x40 ? "HWRM_VNIC_ALLOC": ((req->req_type) == 0x0 ? "HWRM_VER_GET"
: ((req->req_type) == 0xfff9 ? "HWRM_NVM_FIND_DIR_ENTRY": (
(req->req_type) == 0xfff8 ? "HWRM_NVM_MOD_DIR_ENTRY": ((req
->req_type) == 0xfff7 ? "HWRM_NVM_ERASE_DIR_ENTRY": ((req->
req_type) == 0x5e ? "HWRM_RING_RESET": ((req->req_type) ==
0xfff5 ? "HWRM_NVM_VERIFY_UPDATE": ((req->req_type) == 0xfff4
? "HWRM_NVM_MODIFY": ((req->req_type) == 0xfff3 ? "HWRM_NVM_INSTALL_UPDATE"
: ((req->req_type) == 0xfff2 ? "HWRM_NVM_SET_VARIABLE": ((
req->req_type) == 0xfff1 ? "HWRM_NVM_GET_VARIABLE": ((req->
req_type) == 0xfff0 ? "HWRM_NVM_FLUSH": ((req->req_type) ==
0x2e ? "HWRM_PORT_LED_QCFG": ((req->req_type) == 0x2d ? "HWRM_PORT_LED_CFG"
: ((req->req_type) == 0x2f ? "HWRM_PORT_LED_QCAPS": ((req->
req_type) == 0x2a ? "HWRM_PORT_PHY_QCAPS": ((req->req_type
) == 0x38 ? "HWRM_QUEUE_PRI2COS_CFG": ((req->req_type) == 0x39
? "HWRM_QUEUE_COS2BW_QCFG": ((req->req_type) == 0x32 ? "HWRM_QUEUE_CFG"
: ((req->req_type) == 0x33 ? "HWRM_FUNC_VLAN_CFG": ((req->
req_type) == 0x30 ? "HWRM_QUEUE_QPORTCFG": ((req->req_type
) == 0x31 ? "HWRM_QUEUE_QCFG": ((req->req_type) == 0x36 ? "HWRM_QUEUE_PFCENABLE_CFG"
: ((req->req_type) == 0x37 ? "HWRM_QUEUE_PRI2COS_QCFG": ((
req->req_type) == 0x34 ? "HWRM_FUNC_VLAN_QCFG": ((req->
req_type) == 0x35 ? "HWRM_QUEUE_PFCENABLE_QCFG": ((req->req_type
) == 0xff14 ? "HWRM_DBG_DUMP": ((req->req_type) == 0xc8 ? "HWRM_FW_SET_TIME"
: ((req->req_type) == 0xc9 ? "HWRM_FW_GET_TIME": ((req->
req_type) == 0xf1 ? "HWRM_WOL_FILTER_FREE": ((req->req_type
) == 0xf0 ? "HWRM_WOL_FILTER_ALLOC": ((req->req_type) == 0x27
? "HWRM_PORT_PHY_QCFG": ((req->req_type) == 0xf2 ? "HWRM_WOL_FILTER_QCFG"
: ((req->req_type) == 0x21 ? "HWRM_PORT_MAC_CFG": ((req->
req_type) == 0x20 ? "HWRM_PORT_PHY_CFG": ((req->req_type) ==
0x23 ? "HWRM_PORT_QSTATS": ((req->req_type) == 0x28 ? "HWRM_PORT_MAC_QCFG"
: ((req->req_type) == 0xffef ? "HWRM_NVM_VALIDATE_OPTION":
((req->req_type) == 0x3a ? "HWRM_QUEUE_COS2BW_CFG": "Unknown req_type"
)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))
)))))))))))))))))))))))))))))))))))))))))))))))
,
2447 softc->sc_cmd_timeo,
2448 le16toh(req->seq_id)((__uint16_t)(req->seq_id)));
2449 return ETIMEDOUT60;
2450 }
2451 /* Last byte of resp contains the valid key */
2452 valid = (uint8_t *)resp + resp->resp_len - 1;
2453 for (i = 0; i < softc->sc_cmd_timeo; i++) {
2454 if (*valid == HWRM_RESP_VALID_KEY1)
2455 break;
2456 DELAY(1000)(*delay_func)(1000);
2457 }
2458 if (i >= softc->sc_cmd_timeo) {
2459 printf("%s: timeout sending %s: "
2460 "(timeout: %u) msg {0x%x 0x%x} len:%d v: %d\n",
2461 DEVNAME(softc)((softc)->sc_dev.dv_xname), GET_HWRM_REQ_TYPE(req->req_type)((req->req_type) == 0x99 ? "HWRM_CFA_NTUPLE_FILTER_ALLOC":
((req->req_type) == 0x90 ? "HWRM_CFA_L2_FILTER_ALLOC": ((
req->req_type) == 0x91 ? "HWRM_CFA_L2_FILTER_FREE": ((req->
req_type) == 0x92 ? "HWRM_CFA_L2_FILTER_CFG": ((req->req_type
) == 0x93 ? "HWRM_CFA_L2_SET_RX_MASK": ((req->req_type) ==
0x94 ? "HWRM_CFA_VLAN_ANTISPOOF_CFG": ((req->req_type) ==
0x95 ? "HWRM_CFA_TUNNEL_FILTER_ALLOC": ((req->req_type) ==
0x96 ? "HWRM_CFA_TUNNEL_FILTER_FREE": ((req->req_type) ==
0x10 ? "RESERVED1": ((req->req_type) == 0x11 ? "HWRM_FUNC_RESET"
: ((req->req_type) == 0x12 ? "HWRM_FUNC_GETFID": ((req->
req_type) == 0x13 ? "HWRM_FUNC_VF_ALLOC": ((req->req_type)
== 0x14 ? "HWRM_FUNC_VF_FREE": ((req->req_type) == 0x15 ?
"HWRM_FUNC_QCAPS": ((req->req_type) == 0x16 ? "HWRM_FUNC_QCFG"
: ((req->req_type) == 0x17 ? "HWRM_FUNC_CFG": ((req->req_type
) == 0x18 ? "HWRM_FUNC_QSTATS": ((req->req_type) == 0x19 ?
"HWRM_FUNC_CLR_STATS": ((req->req_type) == 0xe0 ? "HWRM_TEMP_MONITOR_QUERY"
: ((req->req_type) == 0x1a ? "HWRM_FUNC_DRV_UNRGTR": ((req
->req_type) == 0x1b ? "HWRM_FUNC_VF_RESC_FREE": ((req->
req_type) == 0x1c ? "HWRM_FUNC_VF_VNIC_IDS_QUERY": ((req->
req_type) == 0x1d ? "HWRM_FUNC_DRV_RGTR": ((req->req_type)
== 0x1e ? "HWRM_FUNC_DRV_QVER": ((req->req_type) == 0x1f ?
"HWRM_FUNC_BUF_RGTR": ((req->req_type) == 0x9a ? "HWRM_CFA_NTUPLE_FILTER_FREE"
: ((req->req_type) == 0x9b ? "HWRM_CFA_NTUPLE_FILTER_CFG":
((req->req_type) == 0xd3 ? "HWRM_FWD_ASYNC_EVENT_CMPL": (
(req->req_type) == 0xd2 ? "HWRM_FWD_RESP": ((req->req_type
) == 0xd1 ? "HWRM_REJECT_FWD_RESP": ((req->req_type) == 0xd0
? "HWRM_EXEC_FWD_RESP": ((req->req_type) == 0xc0 ? "HWRM_FW_RESET"
: ((req->req_type) == 0xc1 ? "HWRM_FW_QSTATUS": ((req->
req_type) == 0x70 ? "HWRM_VNIC_RSS_COS_LB_CTX_ALLOC": ((req->
req_type) == 0x71 ? "HWRM_VNIC_RSS_COS_LB_CTX_FREE": ((req->
req_type) == 0xb1 ? "HWRM_STAT_CTX_FREE": ((req->req_type)
== 0xb0 ? "HWRM_STAT_CTX_ALLOC": ((req->req_type) == 0xb3
? "HWRM_STAT_CTX_CLR_STATS": ((req->req_type) == 0xb2 ? "HWRM_STAT_CTX_QUERY"
: ((req->req_type) == 0xfff6 ? "HWRM_NVM_GET_DEV_INFO": ((
req->req_type) == 0x61 ? "HWRM_RING_GRP_FREE": ((req->req_type
) == 0x60 ? "HWRM_RING_GRP_ALLOC": ((req->req_type) == 0x24
? "HWRM_PORT_LPBK_QSTATS": ((req->req_type) == 0xf3 ? "HWRM_WOL_REASON_QCFG"
: ((req->req_type) == 0xa0 ? "HWRM_TUNNEL_DST_PORT_QUERY":
((req->req_type) == 0xa1 ? "HWRM_TUNNEL_DST_PORT_ALLOC": (
(req->req_type) == 0xa2 ? "HWRM_TUNNEL_DST_PORT_FREE": ((req
->req_type) == 0xfffc ? "HWRM_NVM_RAW_DUMP": ((req->req_type
) == 0xfffb ? "HWRM_NVM_GET_DIR_INFO": ((req->req_type) ==
0xfffa ? "HWRM_NVM_GET_DIR_ENTRIES": ((req->req_type) == 0x10a
? "HWRM_CFA_VLAN_ANTISPOOF_QCFG": ((req->req_type) == 0xe
? "HWRM_FUNC_BUF_UNRGTR": ((req->req_type) == 0xf ? "HWRM_FUNC_VF_CFG"
: ((req->req_type) == 0xffff ? "HWRM_NVM_RAW_WRITE_BLK": (
(req->req_type) == 0xfffe ? "HWRM_NVM_WRITE": ((req->req_type
) == 0xfffd ? "HWRM_NVM_READ": ((req->req_type) == 0x50 ? "HWRM_RING_ALLOC"
: ((req->req_type) == 0x51 ? "HWRM_RING_FREE": ((req->req_type
) == 0x52 ? "HWRM_RING_CMPL_RING_QAGGINT_PARAMS": ((req->req_type
) == 0x53 ? "HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS": ((req->
req_type) == 0x4a ? "HWRM_VNIC_QCAPS": ((req->req_type) ==
0x49 ? "HWRM_VNIC_PLCMODES_QCFG": ((req->req_type) == 0x48
? "HWRM_VNIC_PLCMODES_CFG": ((req->req_type) == 0x47 ? "HWRM_VNIC_RSS_QCFG"
: ((req->req_type) == 0x46 ? "HWRM_VNIC_RSS_CFG": ((req->
req_type) == 0x44 ? "HWRM_VNIC_TPA_CFG": ((req->req_type) ==
0x43 ? "HWRM_VNIC_QCFG": ((req->req_type) == 0x42 ? "HWRM_VNIC_CFG"
: ((req->req_type) == 0x41 ? "HWRM_VNIC_FREE": ((req->req_type
) == 0x40 ? "HWRM_VNIC_ALLOC": ((req->req_type) == 0x0 ? "HWRM_VER_GET"
: ((req->req_type) == 0xfff9 ? "HWRM_NVM_FIND_DIR_ENTRY": (
(req->req_type) == 0xfff8 ? "HWRM_NVM_MOD_DIR_ENTRY": ((req
->req_type) == 0xfff7 ? "HWRM_NVM_ERASE_DIR_ENTRY": ((req->
req_type) == 0x5e ? "HWRM_RING_RESET": ((req->req_type) ==
0xfff5 ? "HWRM_NVM_VERIFY_UPDATE": ((req->req_type) == 0xfff4
? "HWRM_NVM_MODIFY": ((req->req_type) == 0xfff3 ? "HWRM_NVM_INSTALL_UPDATE"
: ((req->req_type) == 0xfff2 ? "HWRM_NVM_SET_VARIABLE": ((
req->req_type) == 0xfff1 ? "HWRM_NVM_GET_VARIABLE": ((req->
req_type) == 0xfff0 ? "HWRM_NVM_FLUSH": ((req->req_type) ==
0x2e ? "HWRM_PORT_LED_QCFG": ((req->req_type) == 0x2d ? "HWRM_PORT_LED_CFG"
: ((req->req_type) == 0x2f ? "HWRM_PORT_LED_QCAPS": ((req->
req_type) == 0x2a ? "HWRM_PORT_PHY_QCAPS": ((req->req_type
) == 0x38 ? "HWRM_QUEUE_PRI2COS_CFG": ((req->req_type) == 0x39
? "HWRM_QUEUE_COS2BW_QCFG": ((req->req_type) == 0x32 ? "HWRM_QUEUE_CFG"
: ((req->req_type) == 0x33 ? "HWRM_FUNC_VLAN_CFG": ((req->
req_type) == 0x30 ? "HWRM_QUEUE_QPORTCFG": ((req->req_type
) == 0x31 ? "HWRM_QUEUE_QCFG": ((req->req_type) == 0x36 ? "HWRM_QUEUE_PFCENABLE_CFG"
: ((req->req_type) == 0x37 ? "HWRM_QUEUE_PRI2COS_QCFG": ((
req->req_type) == 0x34 ? "HWRM_FUNC_VLAN_QCFG": ((req->
req_type) == 0x35 ? "HWRM_QUEUE_PFCENABLE_QCFG": ((req->req_type
) == 0xff14 ? "HWRM_DBG_DUMP": ((req->req_type) == 0xc8 ? "HWRM_FW_SET_TIME"
: ((req->req_type) == 0xc9 ? "HWRM_FW_GET_TIME": ((req->
req_type) == 0xf1 ? "HWRM_WOL_FILTER_FREE": ((req->req_type
) == 0xf0 ? "HWRM_WOL_FILTER_ALLOC": ((req->req_type) == 0x27
? "HWRM_PORT_PHY_QCFG": ((req->req_type) == 0xf2 ? "HWRM_WOL_FILTER_QCFG"
: ((req->req_type) == 0x21 ? "HWRM_PORT_MAC_CFG": ((req->
req_type) == 0x20 ? "HWRM_PORT_PHY_CFG": ((req->req_type) ==
0x23 ? "HWRM_PORT_QSTATS": ((req->req_type) == 0x28 ? "HWRM_PORT_MAC_QCFG"
: ((req->req_type) == 0xffef ? "HWRM_NVM_VALIDATE_OPTION":
((req->req_type) == 0x3a ? "HWRM_QUEUE_COS2BW_CFG": "Unknown req_type"
)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))
)))))))))))))))))))))))))))))))))))))))))))))))
,
2462 softc->sc_cmd_timeo, le16toh(req->req_type)((__uint16_t)(req->req_type)),
2463 le16toh(req->seq_id)((__uint16_t)(req->seq_id)), msg_len,
2464 *valid);
2465 return ETIMEDOUT60;
2466 }
2467
2468 err = le16toh(resp->error_code)((__uint16_t)(resp->error_code));
2469 if (err) {
2470 /* HWRM_ERR_CODE_FAIL is a "normal" error, don't log */
2471 if (err != HWRM_ERR_CODE_FAIL(0x1U)) {
2472 printf("%s: %s command returned %s error.\n",
2473 DEVNAME(softc)((softc)->sc_dev.dv_xname),
2474 GET_HWRM_REQ_TYPE(req->req_type)((req->req_type) == 0x99 ? "HWRM_CFA_NTUPLE_FILTER_ALLOC":
((req->req_type) == 0x90 ? "HWRM_CFA_L2_FILTER_ALLOC": ((
req->req_type) == 0x91 ? "HWRM_CFA_L2_FILTER_FREE": ((req->
req_type) == 0x92 ? "HWRM_CFA_L2_FILTER_CFG": ((req->req_type
) == 0x93 ? "HWRM_CFA_L2_SET_RX_MASK": ((req->req_type) ==
0x94 ? "HWRM_CFA_VLAN_ANTISPOOF_CFG": ((req->req_type) ==
0x95 ? "HWRM_CFA_TUNNEL_FILTER_ALLOC": ((req->req_type) ==
0x96 ? "HWRM_CFA_TUNNEL_FILTER_FREE": ((req->req_type) ==
0x10 ? "RESERVED1": ((req->req_type) == 0x11 ? "HWRM_FUNC_RESET"
: ((req->req_type) == 0x12 ? "HWRM_FUNC_GETFID": ((req->
req_type) == 0x13 ? "HWRM_FUNC_VF_ALLOC": ((req->req_type)
== 0x14 ? "HWRM_FUNC_VF_FREE": ((req->req_type) == 0x15 ?
"HWRM_FUNC_QCAPS": ((req->req_type) == 0x16 ? "HWRM_FUNC_QCFG"
: ((req->req_type) == 0x17 ? "HWRM_FUNC_CFG": ((req->req_type
) == 0x18 ? "HWRM_FUNC_QSTATS": ((req->req_type) == 0x19 ?
"HWRM_FUNC_CLR_STATS": ((req->req_type) == 0xe0 ? "HWRM_TEMP_MONITOR_QUERY"
: ((req->req_type) == 0x1a ? "HWRM_FUNC_DRV_UNRGTR": ((req
->req_type) == 0x1b ? "HWRM_FUNC_VF_RESC_FREE": ((req->
req_type) == 0x1c ? "HWRM_FUNC_VF_VNIC_IDS_QUERY": ((req->
req_type) == 0x1d ? "HWRM_FUNC_DRV_RGTR": ((req->req_type)
== 0x1e ? "HWRM_FUNC_DRV_QVER": ((req->req_type) == 0x1f ?
"HWRM_FUNC_BUF_RGTR": ((req->req_type) == 0x9a ? "HWRM_CFA_NTUPLE_FILTER_FREE"
: ((req->req_type) == 0x9b ? "HWRM_CFA_NTUPLE_FILTER_CFG":
((req->req_type) == 0xd3 ? "HWRM_FWD_ASYNC_EVENT_CMPL": (
(req->req_type) == 0xd2 ? "HWRM_FWD_RESP": ((req->req_type
) == 0xd1 ? "HWRM_REJECT_FWD_RESP": ((req->req_type) == 0xd0
? "HWRM_EXEC_FWD_RESP": ((req->req_type) == 0xc0 ? "HWRM_FW_RESET"
: ((req->req_type) == 0xc1 ? "HWRM_FW_QSTATUS": ((req->
req_type) == 0x70 ? "HWRM_VNIC_RSS_COS_LB_CTX_ALLOC": ((req->
req_type) == 0x71 ? "HWRM_VNIC_RSS_COS_LB_CTX_FREE": ((req->
req_type) == 0xb1 ? "HWRM_STAT_CTX_FREE": ((req->req_type)
== 0xb0 ? "HWRM_STAT_CTX_ALLOC": ((req->req_type) == 0xb3
? "HWRM_STAT_CTX_CLR_STATS": ((req->req_type) == 0xb2 ? "HWRM_STAT_CTX_QUERY"
: ((req->req_type) == 0xfff6 ? "HWRM_NVM_GET_DEV_INFO": ((
req->req_type) == 0x61 ? "HWRM_RING_GRP_FREE": ((req->req_type
) == 0x60 ? "HWRM_RING_GRP_ALLOC": ((req->req_type) == 0x24
? "HWRM_PORT_LPBK_QSTATS": ((req->req_type) == 0xf3 ? "HWRM_WOL_REASON_QCFG"
: ((req->req_type) == 0xa0 ? "HWRM_TUNNEL_DST_PORT_QUERY":
((req->req_type) == 0xa1 ? "HWRM_TUNNEL_DST_PORT_ALLOC": (
(req->req_type) == 0xa2 ? "HWRM_TUNNEL_DST_PORT_FREE": ((req
->req_type) == 0xfffc ? "HWRM_NVM_RAW_DUMP": ((req->req_type
) == 0xfffb ? "HWRM_NVM_GET_DIR_INFO": ((req->req_type) ==
0xfffa ? "HWRM_NVM_GET_DIR_ENTRIES": ((req->req_type) == 0x10a
? "HWRM_CFA_VLAN_ANTISPOOF_QCFG": ((req->req_type) == 0xe
? "HWRM_FUNC_BUF_UNRGTR": ((req->req_type) == 0xf ? "HWRM_FUNC_VF_CFG"
: ((req->req_type) == 0xffff ? "HWRM_NVM_RAW_WRITE_BLK": (
(req->req_type) == 0xfffe ? "HWRM_NVM_WRITE": ((req->req_type
) == 0xfffd ? "HWRM_NVM_READ": ((req->req_type) == 0x50 ? "HWRM_RING_ALLOC"
: ((req->req_type) == 0x51 ? "HWRM_RING_FREE": ((req->req_type
) == 0x52 ? "HWRM_RING_CMPL_RING_QAGGINT_PARAMS": ((req->req_type
) == 0x53 ? "HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS": ((req->
req_type) == 0x4a ? "HWRM_VNIC_QCAPS": ((req->req_type) ==
0x49 ? "HWRM_VNIC_PLCMODES_QCFG": ((req->req_type) == 0x48
? "HWRM_VNIC_PLCMODES_CFG": ((req->req_type) == 0x47 ? "HWRM_VNIC_RSS_QCFG"
: ((req->req_type) == 0x46 ? "HWRM_VNIC_RSS_CFG": ((req->
req_type) == 0x44 ? "HWRM_VNIC_TPA_CFG": ((req->req_type) ==
0x43 ? "HWRM_VNIC_QCFG": ((req->req_type) == 0x42 ? "HWRM_VNIC_CFG"
: ((req->req_type) == 0x41 ? "HWRM_VNIC_FREE": ((req->req_type
) == 0x40 ? "HWRM_VNIC_ALLOC": ((req->req_type) == 0x0 ? "HWRM_VER_GET"
: ((req->req_type) == 0xfff9 ? "HWRM_NVM_FIND_DIR_ENTRY": (
(req->req_type) == 0xfff8 ? "HWRM_NVM_MOD_DIR_ENTRY": ((req
->req_type) == 0xfff7 ? "HWRM_NVM_ERASE_DIR_ENTRY": ((req->
req_type) == 0x5e ? "HWRM_RING_RESET": ((req->req_type) ==
0xfff5 ? "HWRM_NVM_VERIFY_UPDATE": ((req->req_type) == 0xfff4
? "HWRM_NVM_MODIFY": ((req->req_type) == 0xfff3 ? "HWRM_NVM_INSTALL_UPDATE"
: ((req->req_type) == 0xfff2 ? "HWRM_NVM_SET_VARIABLE": ((
req->req_type) == 0xfff1 ? "HWRM_NVM_GET_VARIABLE": ((req->
req_type) == 0xfff0 ? "HWRM_NVM_FLUSH": ((req->req_type) ==
0x2e ? "HWRM_PORT_LED_QCFG": ((req->req_type) == 0x2d ? "HWRM_PORT_LED_CFG"
: ((req->req_type) == 0x2f ? "HWRM_PORT_LED_QCAPS": ((req->
req_type) == 0x2a ? "HWRM_PORT_PHY_QCAPS": ((req->req_type
) == 0x38 ? "HWRM_QUEUE_PRI2COS_CFG": ((req->req_type) == 0x39
? "HWRM_QUEUE_COS2BW_QCFG": ((req->req_type) == 0x32 ? "HWRM_QUEUE_CFG"
: ((req->req_type) == 0x33 ? "HWRM_FUNC_VLAN_CFG": ((req->
req_type) == 0x30 ? "HWRM_QUEUE_QPORTCFG": ((req->req_type
) == 0x31 ? "HWRM_QUEUE_QCFG": ((req->req_type) == 0x36 ? "HWRM_QUEUE_PFCENABLE_CFG"
: ((req->req_type) == 0x37 ? "HWRM_QUEUE_PRI2COS_QCFG": ((
req->req_type) == 0x34 ? "HWRM_FUNC_VLAN_QCFG": ((req->
req_type) == 0x35 ? "HWRM_QUEUE_PFCENABLE_QCFG": ((req->req_type
) == 0xff14 ? "HWRM_DBG_DUMP": ((req->req_type) == 0xc8 ? "HWRM_FW_SET_TIME"
: ((req->req_type) == 0xc9 ? "HWRM_FW_GET_TIME": ((req->
req_type) == 0xf1 ? "HWRM_WOL_FILTER_FREE": ((req->req_type
) == 0xf0 ? "HWRM_WOL_FILTER_ALLOC": ((req->req_type) == 0x27
? "HWRM_PORT_PHY_QCFG": ((req->req_type) == 0xf2 ? "HWRM_WOL_FILTER_QCFG"
: ((req->req_type) == 0x21 ? "HWRM_PORT_MAC_CFG": ((req->
req_type) == 0x20 ? "HWRM_PORT_PHY_CFG": ((req->req_type) ==
0x23 ? "HWRM_PORT_QSTATS": ((req->req_type) == 0x28 ? "HWRM_PORT_MAC_QCFG"
: ((req->req_type) == 0xffef ? "HWRM_NVM_VALIDATE_OPTION":
((req->req_type) == 0x3a ? "HWRM_QUEUE_COS2BW_CFG": "Unknown req_type"
)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))
)))))))))))))))))))))))))))))))))))))))))))))))
,
2475 GET_HWRM_ERROR_CODE(err)((err) == 0xf ? "HWRM_ERROR": ((err) == 0xffff ? "CMD_NOT_SUPPORTED"
: ((err) == 0xfffe ? "UNKNOWN_ERR": ((err) == 0x4 ? "RESOURCE_ALLOC_ERROR"
: ((err) == 0x5 ? "INVALID_FLAGS": ((err) == 0x6 ? "INVALID_ENABLES"
: ((err) == 0x0 ? "SUCCESS": ((err) == 0x1 ? "FAIL": ((err) ==
0x2 ? "INVALID_PARAMS": ((err) == 0x3 ? "RESOURCE_ACCESS_DENIED"
: "Unknown error_code"))))))))))
);
2476 }
2477 return bnxt_hwrm_err_map(err);
2478 }
2479
2480 return 0;
2481}
2482
2483
2484int
2485hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
2486{
2487 int rc;
2488
2489 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
2490 rc = _hwrm_send_message(softc, msg, msg_len);
2491 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
2492 return rc;
2493}
2494
2495
2496int
2497bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc)
2498{
2499 struct hwrm_queue_qportcfg_input req = {0};
2500 struct hwrm_queue_qportcfg_output *resp =
2501 BNXT_DMA_KVA(softc->sc_cmd_resp)((void *)(softc->sc_cmd_resp)->bdm_kva);
2502 int rc = 0;
2503
2504 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_QPORTCFG(0x30U));
2505
2506 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
2507 rc = _hwrm_send_message(softc, &req, sizeof(req));
2508 if (rc)
2509 goto qportcfg_exit;
2510
2511 if (!resp->max_configurable_queues) {
2512 rc = -EINVAL22;
2513 goto qportcfg_exit;
2514 }
2515
2516 softc->sc_tx_queue_id = resp->queue_id0;
2517
2518qportcfg_exit:
2519 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
2520 return rc;
2521}
2522
2523int
2524bnxt_hwrm_ver_get(struct bnxt_softc *softc)
2525{
2526 struct hwrm_ver_get_input req = {0};
2527 struct hwrm_ver_get_output *resp =
2528 BNXT_DMA_KVA(softc->sc_cmd_resp)((void *)(softc->sc_cmd_resp)->bdm_kva);
2529 int rc;
2530#if 0
2531 const char nastr[] = "<not installed>";
2532 const char naver[] = "<N/A>";
2533#endif
2534 uint32_t dev_caps_cfg;
2535
2536 softc->sc_max_req_len = HWRM_MAX_REQ_LEN(128);
2537 softc->sc_cmd_timeo = 1000;
2538 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VER_GET(0x0U));
2539
2540 req.hwrm_intf_maj = HWRM_VERSION_MAJOR1;
2541 req.hwrm_intf_min = HWRM_VERSION_MINOR8;
2542 req.hwrm_intf_upd = HWRM_VERSION_UPDATE1;
2543
2544 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
2545 rc = _hwrm_send_message(softc, &req, sizeof(req));
2546 if (rc)
2547 goto fail;
2548
2549 printf(": fw ver %d.%d.%d, ", resp->hwrm_fw_maj, resp->hwrm_fw_min,
2550 resp->hwrm_fw_bld);
2551
2552 softc->sc_hwrm_ver = (resp->hwrm_intf_maj << 16) |
2553 (resp->hwrm_intf_min << 8) | resp->hwrm_intf_upd;
2554#if 0
2555 snprintf(softc->ver_info->hwrm_if_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
2556 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
2557 softc->ver_info->hwrm_if_major = resp->hwrm_intf_maj;
2558 softc->ver_info->hwrm_if_minor = resp->hwrm_intf_min;
2559 softc->ver_info->hwrm_if_update = resp->hwrm_intf_upd;
2560 snprintf(softc->ver_info->hwrm_fw_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
2561 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
2562 strlcpy(softc->ver_info->driver_hwrm_if_ver, HWRM_VERSION_STR"1.8.1.7",
2563 BNXT_VERSTR_SIZE);
2564 strlcpy(softc->ver_info->hwrm_fw_name, resp->hwrm_fw_name,
2565 BNXT_NAME_SIZE);
2566
2567 if (resp->mgmt_fw_maj == 0 && resp->mgmt_fw_min == 0 &&
2568 resp->mgmt_fw_bld == 0) {
2569 strlcpy(softc->ver_info->mgmt_fw_ver, naver, BNXT_VERSTR_SIZE);
2570 strlcpy(softc->ver_info->mgmt_fw_name, nastr, BNXT_NAME_SIZE);
2571 }
2572 else {
2573 snprintf(softc->ver_info->mgmt_fw_ver, BNXT_VERSTR_SIZE,
2574 "%d.%d.%d", resp->mgmt_fw_maj, resp->mgmt_fw_min,
2575 resp->mgmt_fw_bld);
2576 strlcpy(softc->ver_info->mgmt_fw_name, resp->mgmt_fw_name,
2577 BNXT_NAME_SIZE);
2578 }
2579 if (resp->netctrl_fw_maj == 0 && resp->netctrl_fw_min == 0 &&
2580 resp->netctrl_fw_bld == 0) {
2581 strlcpy(softc->ver_info->netctrl_fw_ver, naver,
2582 BNXT_VERSTR_SIZE);
2583 strlcpy(softc->ver_info->netctrl_fw_name, nastr,
2584 BNXT_NAME_SIZE);
2585 }
2586 else {
2587 snprintf(softc->ver_info->netctrl_fw_ver, BNXT_VERSTR_SIZE,
2588 "%d.%d.%d", resp->netctrl_fw_maj, resp->netctrl_fw_min,
2589 resp->netctrl_fw_bld);
2590 strlcpy(softc->ver_info->netctrl_fw_name, resp->netctrl_fw_name,
2591 BNXT_NAME_SIZE);
2592 }
2593 if (resp->roce_fw_maj == 0 && resp->roce_fw_min == 0 &&
2594 resp->roce_fw_bld == 0) {
2595 strlcpy(softc->ver_info->roce_fw_ver, naver, BNXT_VERSTR_SIZE);
2596 strlcpy(softc->ver_info->roce_fw_name, nastr, BNXT_NAME_SIZE);
2597 }
2598 else {
2599 snprintf(softc->ver_info->roce_fw_ver, BNXT_VERSTR_SIZE,
2600 "%d.%d.%d", resp->roce_fw_maj, resp->roce_fw_min,
2601 resp->roce_fw_bld);
2602 strlcpy(softc->ver_info->roce_fw_name, resp->roce_fw_name,
2603 BNXT_NAME_SIZE);
2604 }
2605 softc->ver_info->chip_num = le16toh(resp->chip_num)((__uint16_t)(resp->chip_num));
2606 softc->ver_info->chip_rev = resp->chip_rev;
2607 softc->ver_info->chip_metal = resp->chip_metal;
2608 softc->ver_info->chip_bond_id = resp->chip_bond_id;
2609 softc->ver_info->chip_type = resp->chip_platform_type;
2610#endif
2611
2612 if (resp->max_req_win_len)
2613 softc->sc_max_req_len = le16toh(resp->max_req_win_len)((__uint16_t)(resp->max_req_win_len));
2614 if (resp->def_req_timeout)
2615 softc->sc_cmd_timeo = le16toh(resp->def_req_timeout)((__uint16_t)(resp->def_req_timeout));
2616
2617 dev_caps_cfg = le32toh(resp->dev_caps_cfg)((__uint32_t)(resp->dev_caps_cfg));
2618 if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED0x4U) &&
2619 (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED0x8U))
2620 softc->sc_flags |= BNXT_FLAG_SHORT_CMD0x0008;
2621
2622fail:
2623 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
2624 return rc;
2625}
2626
2627
2628int
2629bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *softc)
2630{
2631 struct hwrm_func_drv_rgtr_input req = {0};
2632
2633 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR(0x1dU));
2634
2635 req.enables = htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |((__uint32_t)(0x2U | 0x1U))
2636 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE)((__uint32_t)(0x2U | 0x1U));
2637 req.os_type = htole16(HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD)((__uint16_t)(0x2aU));
2638
2639 req.ver_maj = 6;
2640 req.ver_min = 4;
2641 req.ver_upd = 0;
2642
2643 return hwrm_send_message(softc, &req, sizeof(req));
2644}
2645
2646#if 0
2647
2648int
2649bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool_Bool shutdown)
2650{
2651 struct hwrm_func_drv_unrgtr_input req = {0};
2652
2653 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_UNRGTR(0x1aU));
2654 if (shutdown == true1)
2655 req.flags |=
2656 HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN0x1U;
2657 return hwrm_send_message(softc, &req, sizeof(req));
2658}
2659
2660#endif
2661
2662int
2663bnxt_hwrm_func_qcaps(struct bnxt_softc *softc)
2664{
2665 int rc = 0;
2666 struct hwrm_func_qcaps_input req = {0};
2667 struct hwrm_func_qcaps_output *resp =
2668 BNXT_DMA_KVA(softc->sc_cmd_resp)((void *)(softc->sc_cmd_resp)->bdm_kva);
2669 /* struct bnxt_func_info *func = &softc->func; */
2670
2671 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCAPS(0x15U));
2672 req.fid = htole16(0xffff)((__uint16_t)(0xffff));
2673
2674 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
2675 rc = _hwrm_send_message(softc, &req, sizeof(req));
2676 if (rc)
2677 goto fail;
2678
2679 if (resp->flags &
2680 htole32(HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED)((__uint32_t)(0x20U)))
2681 softc->sc_flags |= BNXT_FLAG_WOL_CAP0x0004;
2682
2683 memcpy(softc->sc_ac.ac_enaddr, resp->mac_address, 6)__builtin_memcpy((softc->sc_ac.ac_enaddr), (resp->mac_address
), (6))
;
2684 /*
2685 func->fw_fid = le16toh(resp->fid);
2686 memcpy(func->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
2687 func->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
2688 func->max_cp_rings = le16toh(resp->max_cmpl_rings);
2689 func->max_tx_rings = le16toh(resp->max_tx_rings);
2690 func->max_rx_rings = le16toh(resp->max_rx_rings);
2691 func->max_hw_ring_grps = le32toh(resp->max_hw_ring_grps);
2692 if (!func->max_hw_ring_grps)
2693 func->max_hw_ring_grps = func->max_tx_rings;
2694 func->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
2695 func->max_vnics = le16toh(resp->max_vnics);
2696 func->max_stat_ctxs = le16toh(resp->max_stat_ctx);
2697 if (BNXT_PF(softc)) {
2698 struct bnxt_pf_info *pf = &softc->pf;
2699
2700 pf->port_id = le16toh(resp->port_id);
2701 pf->first_vf_id = le16toh(resp->first_vf_id);
2702 pf->max_vfs = le16toh(resp->max_vfs);
2703 pf->max_encap_records = le32toh(resp->max_encap_records);
2704 pf->max_decap_records = le32toh(resp->max_decap_records);
2705 pf->max_tx_em_flows = le32toh(resp->max_tx_em_flows);
2706 pf->max_tx_wm_flows = le32toh(resp->max_tx_wm_flows);
2707 pf->max_rx_em_flows = le32toh(resp->max_rx_em_flows);
2708 pf->max_rx_wm_flows = le32toh(resp->max_rx_wm_flows);
2709 }
2710 if (!_is_valid_ether_addr(func->mac_addr)) {
2711 device_printf(softc->dev, "Invalid ethernet address, generating random locally administered address\n");
2712 get_random_ether_addr(func->mac_addr);
2713 }
2714 */
2715
2716fail:
2717 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
2718 return rc;
2719}
2720
2721
2722int
2723bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
2724{
2725 struct hwrm_func_qcfg_input req = {0};
2726 /* struct hwrm_func_qcfg_output *resp =
2727 BNXT_DMA_KVA(softc->sc_cmd_resp);
2728 struct bnxt_func_qcfg *fn_qcfg = &softc->fn_qcfg; */
2729 int rc;
2730
2731 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCFG(0x16U));
2732 req.fid = htole16(0xffff)((__uint16_t)(0xffff));
2733 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
2734 rc = _hwrm_send_message(softc, &req, sizeof(req));
2735 if (rc)
2736 goto fail;
2737
2738 /*
2739 fn_qcfg->alloc_completion_rings = le16toh(resp->alloc_cmpl_rings);
2740 fn_qcfg->alloc_tx_rings = le16toh(resp->alloc_tx_rings);
2741 fn_qcfg->alloc_rx_rings = le16toh(resp->alloc_rx_rings);
2742 fn_qcfg->alloc_vnics = le16toh(resp->alloc_vnics);
2743 */
2744fail:
2745 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
2746 return rc;
2747}
2748
2749
2750int
2751bnxt_hwrm_func_reset(struct bnxt_softc *softc)
2752{
2753 struct hwrm_func_reset_input req = {0};
2754
2755 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESET(0x11U));
2756 req.enables = 0;
2757
2758 return hwrm_send_message(softc, &req, sizeof(req));
2759}
2760
2761int
2762bnxt_hwrm_vnic_cfg_placement(struct bnxt_softc *softc,
2763 struct bnxt_vnic_info *vnic)
2764{
2765 struct hwrm_vnic_plcmodes_cfg_input req = {0};
2766
2767 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_PLCMODES_CFG(0x48U));
2768
2769 req.flags = htole32(((__uint32_t)(0x2U))
2770 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT)((__uint32_t)(0x2U));
2771 req.enables = htole32(((__uint32_t)(0x1U))
2772 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID)((__uint32_t)(0x1U));
2773 req.vnic_id = htole16(vnic->id)((__uint16_t)(vnic->id));
2774 req.jumbo_thresh = htole16(MCLBYTES)((__uint16_t)((1 << 11)));
2775
2776 return hwrm_send_message(softc, &req, sizeof(req));
2777}
2778
2779int
2780bnxt_hwrm_vnic_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2781{
2782 struct hwrm_vnic_cfg_input req = {0};
2783
2784 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_CFG(0x42U));
2785
2786 if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT0x01)
2787 req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT)((__uint32_t)(0x1U));
2788 if (vnic->flags & BNXT_VNIC_FLAG_BD_STALL0x02)
2789 req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE)((__uint32_t)(0x4U));
2790 if (vnic->flags & BNXT_VNIC_FLAG_VLAN_STRIP0x04)
2791 req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE)((__uint32_t)(0x2U));
2792 req.enables = htole32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |((__uint32_t)(0x1U | 0x2U | 0x10U))
2793 HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |((__uint32_t)(0x1U | 0x2U | 0x10U))
2794 HWRM_VNIC_CFG_INPUT_ENABLES_MRU)((__uint32_t)(0x1U | 0x2U | 0x10U));
2795 req.vnic_id = htole16(vnic->id)((__uint16_t)(vnic->id));
2796 req.dflt_ring_grp = htole16(vnic->def_ring_grp)((__uint16_t)(vnic->def_ring_grp));
2797 req.rss_rule = htole16(vnic->rss_id)((__uint16_t)(vnic->rss_id));
2798 req.cos_rule = htole16(vnic->cos_rule)((__uint16_t)(vnic->cos_rule));
2799 req.lb_rule = htole16(vnic->lb_rule)((__uint16_t)(vnic->lb_rule));
2800 req.mru = htole16(vnic->mru)((__uint16_t)(vnic->mru));
2801
2802 return hwrm_send_message(softc, &req, sizeof(req));
2803}
2804
2805int
2806bnxt_hwrm_vnic_alloc(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2807{
2808 struct hwrm_vnic_alloc_input req = {0};
2809 struct hwrm_vnic_alloc_output *resp =
2810 BNXT_DMA_KVA(softc->sc_cmd_resp)((void *)(softc->sc_cmd_resp)->bdm_kva);
2811 int rc;
2812
2813 if (vnic->id != (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1))) {
2814 printf("%s: attempt to re-allocate vnic %04x\n",
2815 DEVNAME(softc)((softc)->sc_dev.dv_xname), vnic->id);
2816 return EINVAL22;
2817 }
2818
2819 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_ALLOC(0x40U));
2820
2821 if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT0x01)
2822 req.flags = htole32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT)((__uint32_t)(0x1U));
2823
2824 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
2825 rc = _hwrm_send_message(softc, &req, sizeof(req));
2826 if (rc)
2827 goto fail;
2828
2829 vnic->id = le32toh(resp->vnic_id)((__uint32_t)(resp->vnic_id));
2830
2831fail:
2832 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
2833 return rc;
2834}
2835
2836int
2837bnxt_hwrm_vnic_free(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2838{
2839 struct hwrm_vnic_free_input req = {0};
2840 int rc;
2841
2842 if (vnic->id == (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1))) {
2843 printf("%s: attempt to deallocate vnic %04x\n",
2844 DEVNAME(softc)((softc)->sc_dev.dv_xname), vnic->id);
2845 return (EINVAL22);
2846 }
2847
2848 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_FREE(0x41U));
2849 req.vnic_id = htole16(vnic->id)((__uint16_t)(vnic->id));
2850
2851 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
2852 rc = _hwrm_send_message(softc, &req, sizeof(req));
2853 if (rc == 0)
2854 vnic->id = (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1));
2855 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
2856
2857 return (rc);
2858}
2859
2860int
2861bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *softc, uint16_t *ctx_id)
2862{
2863 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
2864 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2865 BNXT_DMA_KVA(softc->sc_cmd_resp)((void *)(softc->sc_cmd_resp)->bdm_kva);
2866 int rc;
2867
2868 if (*ctx_id != (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1))) {
2869 printf("%s: attempt to re-allocate vnic ctx %04x\n",
2870 DEVNAME(softc)((softc)->sc_dev.dv_xname), *ctx_id);
2871 return EINVAL22;
2872 }
2873
2874 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC(0x70U));
2875
2876 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
2877 rc = _hwrm_send_message(softc, &req, sizeof(req));
2878 if (rc)
2879 goto fail;
2880
2881 *ctx_id = letoh16(resp->rss_cos_lb_ctx_id)((__uint16_t)(resp->rss_cos_lb_ctx_id));
2882
2883fail:
2884 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
2885 return (rc);
2886}
2887
2888int
2889bnxt_hwrm_vnic_ctx_free(struct bnxt_softc *softc, uint16_t *ctx_id)
2890{
2891 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
2892 int rc;
2893
2894 if (*ctx_id == (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1))) {
2895 printf("%s: attempt to deallocate vnic ctx %04x\n",
2896 DEVNAME(softc)((softc)->sc_dev.dv_xname), *ctx_id);
2897 return (EINVAL22);
2898 }
2899
2900 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE(0x71U));
2901 req.rss_cos_lb_ctx_id = htole32(*ctx_id)((__uint32_t)(*ctx_id));
2902
2903 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
2904 rc = _hwrm_send_message(softc, &req, sizeof(req));
2905 if (rc == 0)
2906 *ctx_id = (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1));
2907 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
2908 return (rc);
2909}
2910
2911int
2912bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
2913{
2914 struct hwrm_ring_grp_alloc_input req = {0};
2915 struct hwrm_ring_grp_alloc_output *resp;
2916 int rc = 0;
2917
2918 if (grp->grp_id != HWRM_NA_SIGNATURE((uint32_t)(-1))) {
2919 printf("%s: attempt to re-allocate ring group %04x\n",
2920 DEVNAME(softc)((softc)->sc_dev.dv_xname), grp->grp_id);
2921 return EINVAL22;
2922 }
2923
2924 resp = BNXT_DMA_KVA(softc->sc_cmd_resp)((void *)(softc->sc_cmd_resp)->bdm_kva);
2925 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_ALLOC(0x60U));
2926 req.cr = htole16(grp->cp_ring_id)((__uint16_t)(grp->cp_ring_id));
2927 req.rr = htole16(grp->rx_ring_id)((__uint16_t)(grp->rx_ring_id));
2928 req.ar = htole16(grp->ag_ring_id)((__uint16_t)(grp->ag_ring_id));
2929 req.sc = htole16(grp->stats_ctx)((__uint16_t)(grp->stats_ctx));
2930
2931 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
2932 rc = _hwrm_send_message(softc, &req, sizeof(req));
2933 if (rc)
2934 goto fail;
2935
2936 grp->grp_id = letoh32(resp->ring_group_id)((__uint32_t)(resp->ring_group_id));
2937
2938fail:
2939 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
2940 return rc;
2941}
2942
2943int
2944bnxt_hwrm_ring_grp_free(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
2945{
2946 struct hwrm_ring_grp_free_input req = {0};
2947 int rc = 0;
2948
2949 if (grp->grp_id == HWRM_NA_SIGNATURE((uint32_t)(-1))) {
2950 printf("%s: attempt to free ring group %04x\n",
2951 DEVNAME(softc)((softc)->sc_dev.dv_xname), grp->grp_id);
2952 return EINVAL22;
2953 }
2954
2955 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_FREE(0x61U));
2956 req.ring_group_id = htole32(grp->grp_id)((__uint32_t)(grp->grp_id));
2957
2958 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
2959 rc = _hwrm_send_message(softc, &req, sizeof(req));
2960 if (rc == 0)
2961 grp->grp_id = HWRM_NA_SIGNATURE((uint32_t)(-1));
2962
2963 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
2964 return (rc);
2965}
2966
2967/*
2968 * Ring allocation message to the firmware
2969 */
2970int
2971bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type,
2972 struct bnxt_ring *ring, uint16_t cmpl_ring_id, uint32_t stat_ctx_id,
2973 int irq)
2974{
2975 struct hwrm_ring_alloc_input req = {0};
2976 struct hwrm_ring_alloc_output *resp;
2977 int rc;
2978
2979 if (ring->phys_id != (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1))) {
2980 printf("%s: attempt to re-allocate ring %04x\n",
2981 DEVNAME(softc)((softc)->sc_dev.dv_xname), ring->phys_id);
2982 return EINVAL22;
2983 }
2984
2985 resp = BNXT_DMA_KVA(softc->sc_cmd_resp)((void *)(softc->sc_cmd_resp)->bdm_kva);
2986 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_ALLOC(0x50U));
2987 req.enables = htole32(0)((__uint32_t)(0));
2988 req.fbo = htole32(0)((__uint32_t)(0));
2989
2990 if (stat_ctx_id != HWRM_NA_SIGNATURE((uint32_t)(-1))) {
2991 req.enables |= htole32(((__uint32_t)(0x8U))
2992 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID)((__uint32_t)(0x8U));
2993 req.stat_ctx_id = htole32(stat_ctx_id)((__uint32_t)(stat_ctx_id));
2994 }
2995 req.ring_type = type;
2996 req.page_tbl_addr = htole64(ring->paddr)((__uint64_t)(ring->paddr));
2997 req.length = htole32(ring->ring_size)((__uint32_t)(ring->ring_size));
2998 req.logical_id = htole16(ring->id)((__uint16_t)(ring->id));
2999 req.cmpl_ring_id = htole16(cmpl_ring_id)((__uint16_t)(cmpl_ring_id));
3000 req.queue_id = htole16(softc->sc_tx_queue_id)((__uint16_t)(softc->sc_tx_queue_id));
3001 req.int_mode = (softc->sc_flags & BNXT_FLAG_MSIX0x0010) ?
3002 HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX0x2U :
3003 HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY0x0U;
3004 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
3005 rc = _hwrm_send_message(softc, &req, sizeof(req));
3006 if (rc)
3007 goto fail;
3008
3009 ring->phys_id = le16toh(resp->ring_id)((__uint16_t)(resp->ring_id));
3010
3011fail:
3012 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
3013 return rc;
3014}
3015
3016int
3017bnxt_hwrm_ring_free(struct bnxt_softc *softc, uint8_t type, struct bnxt_ring *ring)
3018{
3019 struct hwrm_ring_free_input req = {0};
3020 int rc;
3021
3022 if (ring->phys_id == (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1))) {
3023 printf("%s: attempt to deallocate ring %04x\n",
3024 DEVNAME(softc)((softc)->sc_dev.dv_xname), ring->phys_id);
3025 return (EINVAL22);
3026 }
3027
3028 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_FREE(0x51U));
3029 req.ring_type = type;
3030 req.ring_id = htole16(ring->phys_id)((__uint16_t)(ring->phys_id));
3031 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
3032 rc = _hwrm_send_message(softc, &req, sizeof(req));
3033 if (rc)
3034 goto fail;
3035
3036 ring->phys_id = (uint16_t)HWRM_NA_SIGNATURE((uint32_t)(-1));
3037fail:
3038 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
3039 return (rc);
3040}
3041
3042
3043int
3044bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr,
3045 uint64_t paddr)
3046{
3047 struct hwrm_stat_ctx_alloc_input req = {0};
3048 struct hwrm_stat_ctx_alloc_output *resp;
3049 int rc = 0;
3050
3051 if (cpr->stats_ctx_id != HWRM_NA_SIGNATURE((uint32_t)(-1))) {
3052 printf("%s: attempt to re-allocate stats ctx %08x\n",
3053 DEVNAME(softc)((softc)->sc_dev.dv_xname), cpr->stats_ctx_id);
3054 return EINVAL22;
3055 }
3056
3057 resp = BNXT_DMA_KVA(softc->sc_cmd_resp)((void *)(softc->sc_cmd_resp)->bdm_kva);
3058 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_ALLOC(0xb0U));
3059
3060 req.update_period_ms = htole32(1000)((__uint32_t)(1000));
3061 req.stats_dma_addr = htole64(paddr)((__uint64_t)(paddr));
3062
3063 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
3064 rc = _hwrm_send_message(softc, &req, sizeof(req));
3065 if (rc)
3066 goto fail;
3067
3068 cpr->stats_ctx_id = le32toh(resp->stat_ctx_id)((__uint32_t)(resp->stat_ctx_id));
3069
3070fail:
3071 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
3072
3073 return rc;
3074}
3075
3076int
3077bnxt_hwrm_stat_ctx_free(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr)
3078{
3079 struct hwrm_stat_ctx_free_input req = {0};
3080 int rc = 0;
3081
3082 if (cpr->stats_ctx_id == HWRM_NA_SIGNATURE((uint32_t)(-1))) {
3083 printf("%s: attempt to free stats ctx %08x\n",
3084 DEVNAME(softc)((softc)->sc_dev.dv_xname), cpr->stats_ctx_id);
3085 return EINVAL22;
3086 }
3087
3088 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_FREE(0xb1U));
3089 req.stat_ctx_id = htole32(cpr->stats_ctx_id)((__uint32_t)(cpr->stats_ctx_id));
3090
3091 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
3092 rc = _hwrm_send_message(softc, &req, sizeof(req));
3093 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
3094
3095 if (rc == 0)
3096 cpr->stats_ctx_id = HWRM_NA_SIGNATURE((uint32_t)(-1));
3097
3098 return (rc);
3099}
3100
3101#if 0
3102
3103int
3104bnxt_hwrm_port_qstats(struct bnxt_softc *softc)
3105{
3106 struct hwrm_port_qstats_input req = {0};
3107 int rc = 0;
3108
3109 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_QSTATS(0x23U));
3110
3111 req.port_id = htole16(softc->pf.port_id)((__uint16_t)(softc->pf.port_id));
3112 req.rx_stat_host_addr = htole64(softc->hw_rx_port_stats.idi_paddr)((__uint64_t)(softc->hw_rx_port_stats.idi_paddr));
3113 req.tx_stat_host_addr = htole64(softc->hw_tx_port_stats.idi_paddr)((__uint64_t)(softc->hw_tx_port_stats.idi_paddr));
3114
3115 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
3116 rc = _hwrm_send_message(softc, &req, sizeof(req));
3117 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
3118
3119 return rc;
3120}
3121
3122#endif
3123
3124int
3125bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *softc,
3126 uint32_t vnic_id, uint32_t rx_mask, uint64_t mc_addr, uint32_t mc_count)
3127{
3128 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
3129
3130 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_SET_RX_MASK(0x93U));
3131
3132 req.vnic_id = htole32(vnic_id)((__uint32_t)(vnic_id));
3133 req.mask = htole32(rx_mask)((__uint32_t)(rx_mask));
3134 req.mc_tbl_addr = htole64(mc_addr)((__uint64_t)(mc_addr));
3135 req.num_mc_entries = htole32(mc_count)((__uint32_t)(mc_count));
3136 return hwrm_send_message(softc, &req, sizeof(req));
3137}
3138
3139int
3140bnxt_hwrm_set_filter(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
3141{
3142 struct hwrm_cfa_l2_filter_alloc_input req = {0};
3143 struct hwrm_cfa_l2_filter_alloc_output *resp;
3144 uint32_t enables = 0;
3145 int rc = 0;
3146
3147 if (vnic->filter_id != -1) {
3148 printf("%s: attempt to re-allocate l2 ctx filter\n",
3149 DEVNAME(softc)((softc)->sc_dev.dv_xname));
3150 return EINVAL22;
3151 }
3152
3153 resp = BNXT_DMA_KVA(softc->sc_cmd_resp)((void *)(softc->sc_cmd_resp)->bdm_kva);
3154 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_ALLOC(0x90U));
3155
3156 req.flags = htole32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX((__uint32_t)((0x1U << 0) | 0x8U))
3157 | HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST)((__uint32_t)((0x1U << 0) | 0x8U));
3158 enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR0x1U
3159 | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK0x2U
3160 | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID0x8000U;
3161 req.enables = htole32(enables)((__uint32_t)(enables));
3162 req.dst_id = htole16(vnic->id)((__uint16_t)(vnic->id));
3163 memcpy(req.l2_addr, softc->sc_ac.ac_enaddr, ETHER_ADDR_LEN)__builtin_memcpy((req.l2_addr), (softc->sc_ac.ac_enaddr), (
6))
;
3164 memset(&req.l2_addr_mask, 0xff, sizeof(req.l2_addr_mask))__builtin_memset((&req.l2_addr_mask), (0xff), (sizeof(req
.l2_addr_mask)))
;
3165
3166 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
3167 rc = _hwrm_send_message(softc, &req, sizeof(req));
3168 if (rc)
3169 goto fail;
3170
3171 vnic->filter_id = le64toh(resp->l2_filter_id)((__uint64_t)(resp->l2_filter_id));
3172 vnic->flow_id = le64toh(resp->flow_id)((__uint64_t)(resp->flow_id));
3173
3174fail:
3175 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
3176 return (rc);
3177}
3178
3179int
3180bnxt_hwrm_free_filter(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
3181{
3182 struct hwrm_cfa_l2_filter_free_input req = {0};
3183 int rc = 0;
3184
3185 if (vnic->filter_id == -1) {
3186 printf("%s: attempt to deallocate filter %llx\n",
3187 DEVNAME(softc)((softc)->sc_dev.dv_xname), vnic->filter_id);
3188 return (EINVAL22);
3189 }
3190
3191 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_FREE(0x91U));
3192 req.l2_filter_id = htole64(vnic->filter_id)((__uint64_t)(vnic->filter_id));
3193
3194 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
3195 rc = _hwrm_send_message(softc, &req, sizeof(req));
3196 if (rc == 0)
3197 vnic->filter_id = -1;
3198 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
3199
3200 return (rc);
3201}
3202
3203
3204int
3205bnxt_hwrm_vnic_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
3206 uint32_t hash_type, daddr_t rss_table, daddr_t rss_key)
3207{
3208 struct hwrm_vnic_rss_cfg_input req = {0};
3209
3210 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_CFG(0x46U));
3211
3212 req.hash_type = htole32(hash_type)((__uint32_t)(hash_type));
3213 req.ring_grp_tbl_addr = htole64(rss_table)((__uint64_t)(rss_table));
3214 req.hash_key_tbl_addr = htole64(rss_key)((__uint64_t)(rss_key));
3215 req.rss_ctx_idx = htole16(vnic->rss_id)((__uint16_t)(vnic->rss_id));
3216
3217 return hwrm_send_message(softc, &req, sizeof(req));
3218}
3219
3220int
3221bnxt_cfg_async_cr(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr)
3222{
3223 int rc = 0;
3224
3225 if (1 /* BNXT_PF(softc) */) {
3226 struct hwrm_func_cfg_input req = {0};
3227
3228 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG(0x17U));
3229
3230 req.fid = htole16(0xffff)((__uint16_t)(0xffff));
3231 req.enables = htole32(HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR)((__uint32_t)(0x4000U));
3232 req.async_event_cr = htole16(cpr->ring.phys_id)((__uint16_t)(cpr->ring.phys_id));
3233
3234 rc = hwrm_send_message(softc, &req, sizeof(req));
3235 } else {
3236 struct hwrm_func_vf_cfg_input req = {0};
3237
3238 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_VF_CFG(0xfU));
3239
3240 req.enables = htole32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR)((__uint32_t)(0x4U));
3241 req.async_event_cr = htole16(cpr->ring.phys_id)((__uint16_t)(cpr->ring.phys_id));
3242
3243 rc = hwrm_send_message(softc, &req, sizeof(req));
3244 }
3245 return rc;
3246}
3247
3248#if 0
3249
3250void
3251bnxt_validate_hw_lro_settings(struct bnxt_softc *softc)
3252{
3253 softc->hw_lro.enable = min(softc->hw_lro.enable, 1);
3254
3255 softc->hw_lro.is_mode_gro = min(softc->hw_lro.is_mode_gro, 1);
3256
3257 softc->hw_lro.max_agg_segs = min(softc->hw_lro.max_agg_segs,
3258 HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX0x1fU);
3259
3260 softc->hw_lro.max_aggs = min(softc->hw_lro.max_aggs,
3261 HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX0x7U);
3262
3263 softc->hw_lro.min_agg_len = min(softc->hw_lro.min_agg_len, BNXT_MAX_MTU9500);
3264}
3265
3266int
3267bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc)
3268{
3269 struct hwrm_vnic_tpa_cfg_input req = {0};
3270 uint32_t flags;
3271
3272 if (softc->vnic_info.id == (uint16_t) HWRM_NA_SIGNATURE((uint32_t)(-1))) {
3273 return 0;
3274 }
3275
3276 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_TPA_CFG(0x44U));
3277
3278 if (softc->hw_lro.enable) {
3279 flags = HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA0x1U |
3280 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA0x2U |
3281 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN0x10U |
3282 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ0x20U;
3283
3284 if (softc->hw_lro.is_mode_gro)
3285 flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO0x8U;
3286 else
3287 flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE0x4U;
3288
3289 req.flags = htole32(flags)((__uint32_t)(flags));
3290
3291 req.enables = htole32(HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |((__uint32_t)(0x1U | 0x2U | 0x8U))
3292 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |((__uint32_t)(0x1U | 0x2U | 0x8U))
3293 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN)((__uint32_t)(0x1U | 0x2U | 0x8U));
3294
3295 req.max_agg_segs = htole16(softc->hw_lro.max_agg_segs)((__uint16_t)(softc->hw_lro.max_agg_segs));
3296 req.max_aggs = htole16(softc->hw_lro.max_aggs)((__uint16_t)(softc->hw_lro.max_aggs));
3297 req.min_agg_len = htole32(softc->hw_lro.min_agg_len)((__uint32_t)(softc->hw_lro.min_agg_len));
3298 }
3299
3300 req.vnic_id = htole16(softc->vnic_info.id)((__uint16_t)(softc->vnic_info.id));
3301
3302 return hwrm_send_message(softc, &req, sizeof(req));
3303}
3304
3305
3306int
3307bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
3308 uint8_t *selfreset)
3309{
3310 struct hwrm_fw_reset_input req = {0};
3311 struct hwrm_fw_reset_output *resp =
3312 (void *)softc->hwrm_cmd_resp.idi_vaddr;
3313 int rc;
3314
3315 MPASS(selfreset);
3316
3317 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_RESET(0xc0U));
3318 req.embedded_proc_type = processor;
3319 req.selfrst_status = *selfreset;
3320
3321 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
3322 rc = _hwrm_send_message(softc, &req, sizeof(req));
3323 if (rc)
3324 goto exit;
3325 *selfreset = resp->selfrst_status;
3326
3327exit:
3328 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
3329 return rc;
3330}
3331
3332int
3333bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type, uint8_t *selfreset)
3334{
3335 struct hwrm_fw_qstatus_input req = {0};
3336 struct hwrm_fw_qstatus_output *resp =
3337 (void *)softc->hwrm_cmd_resp.idi_vaddr;
3338 int rc;
3339
3340 MPASS(selfreset);
3341
3342 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_QSTATUS(0xc1U));
3343 req.embedded_proc_type = type;
3344
3345 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
3346 rc = _hwrm_send_message(softc, &req, sizeof(req));
3347 if (rc)
3348 goto exit;
3349 *selfreset = resp->selfrst_status;
3350
3351exit:
3352 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
3353 return rc;
3354}
3355
3356#endif
3357
3358int
3359bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *softc, uint16_t *mfg_id,
3360 uint16_t *device_id, uint32_t *sector_size, uint32_t *nvram_size,
3361 uint32_t *reserved_size, uint32_t *available_size)
3362{
3363 struct hwrm_nvm_get_dev_info_input req = {0};
3364 struct hwrm_nvm_get_dev_info_output *resp =
3365 BNXT_DMA_KVA(softc->sc_cmd_resp)((void *)(softc->sc_cmd_resp)->bdm_kva);
3366 int rc;
3367 uint32_t old_timeo;
3368
3369 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DEV_INFO(0xfff6U));
3370
3371 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
3372 old_timeo = softc->sc_cmd_timeo;
3373 softc->sc_cmd_timeo = BNXT_NVM_TIMEO(5 * 60 * 1000);
3374 rc = _hwrm_send_message(softc, &req, sizeof(req));
3375 softc->sc_cmd_timeo = old_timeo;
3376 if (rc)
3377 goto exit;
3378
3379 if (mfg_id)
3380 *mfg_id = le16toh(resp->manufacturer_id)((__uint16_t)(resp->manufacturer_id));
3381 if (device_id)
3382 *device_id = le16toh(resp->device_id)((__uint16_t)(resp->device_id));
3383 if (sector_size)
3384 *sector_size = le32toh(resp->sector_size)((__uint32_t)(resp->sector_size));
3385 if (nvram_size)
3386 *nvram_size = le32toh(resp->nvram_size)((__uint32_t)(resp->nvram_size));
3387 if (reserved_size)
3388 *reserved_size = le32toh(resp->reserved_size)((__uint32_t)(resp->reserved_size));
3389 if (available_size)
3390 *available_size = le32toh(resp->available_size)((__uint32_t)(resp->available_size));
3391
3392exit:
3393 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
3394 return rc;
3395}
3396
3397#if 0
3398
3399int
3400bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year, uint8_t *month,
3401 uint8_t *day, uint8_t *hour, uint8_t *minute, uint8_t *second,
3402 uint16_t *millisecond, uint16_t *zone)
3403{
3404 struct hwrm_fw_get_time_input req = {0};
3405 struct hwrm_fw_get_time_output *resp =
3406 (void *)softc->hwrm_cmd_resp.idi_vaddr;
3407 int rc;
3408
3409 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_GET_TIME(0xc9U));
3410
3411 BNXT_HWRM_LOCK(softc)mtx_enter(&softc->sc_lock);
3412 rc = _hwrm_send_message(softc, &req, sizeof(req));
3413 if (rc)
3414 goto exit;
3415
3416 if (year)
3417 *year = le16toh(resp->year)((__uint16_t)(resp->year));
3418 if (month)
3419 *month = resp->month;
3420 if (day)
3421 *day = resp->day;
3422 if (hour)
3423 *hour = resp->hour;
3424 if (minute)
3425 *minute = resp->minute;
3426 if (second)
3427 *second = resp->second;
3428 if (millisecond)
3429 *millisecond = le16toh(resp->millisecond)((__uint16_t)(resp->millisecond));
3430 if (zone)
3431 *zone = le16toh(resp->zone)((__uint16_t)(resp->zone));
3432
3433exit:
3434 BNXT_HWRM_UNLOCK(softc)mtx_leave(&softc->sc_lock);
3435 return rc;
3436}
3437
3438int
3439bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year, uint8_t month,
3440 uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
3441 uint16_t millisecond, uint16_t zone)
3442{
3443 struct hwrm_fw_set_time_input req = {0};
3444
3445 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_SET_TIME(0xc8U));
3446
3447 req.year = htole16(year)((__uint16_t)(year));
3448 req.month = month;
3449 req.day = day;
3450 req.hour = hour;
3451 req.minute = minute;
3452 req.second = second;
3453 req.millisecond = htole16(millisecond)((__uint16_t)(millisecond));
3454 req.zone = htole16(zone)((__uint16_t)(zone));
3455 return hwrm_send_message(softc, &req, sizeof(req));
3456}
3457
3458#endif
3459
3460void
3461_bnxt_hwrm_set_async_event_bit(struct hwrm_func_drv_rgtr_input *req, int bit)
3462{
3463 req->async_event_fwd[bit/32] |= (1 << (bit % 32));
3464}
3465
3466int bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *softc)
3467{
3468 struct hwrm_func_drv_rgtr_input req = {0};
3469 int events[] = {
3470 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE0x0U,
3471 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD0x20U,
3472 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED0x4U,
3473 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE0x33U,
3474 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE0x6U
3475 };
3476 int i;
3477
3478 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR(0x1dU));
3479
3480 req.enables =
3481 htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD)((__uint32_t)(0x10U));
3482
3483 for (i = 0; i < nitems(events)(sizeof((events)) / sizeof((events)[0])); i++)
3484 _bnxt_hwrm_set_async_event_bit(&req, events[i]);
3485
3486 return hwrm_send_message(softc, &req, sizeof(req));
3487}
3488
3489int
3490bnxt_get_sffpage(struct bnxt_softc *softc, struct if_sffpage *sff)
3491{
3492 struct hwrm_port_phy_i2c_read_input req;
3493 struct hwrm_port_phy_i2c_read_output *out;
3494 int offset;
3495
3496 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_I2C_READ(0x2cU));
3497 req.i2c_slave_addr = sff->sff_addr;
3498 req.page_number = htole16(sff->sff_page)((__uint16_t)(sff->sff_page));
3499
3500 for (offset = 0; offset < 256; offset += sizeof(out->data)) {
3501 req.page_offset = htole16(offset)((__uint16_t)(offset));
3502 req.data_length = sizeof(out->data);
3503 req.enables = htole32(HWRM_PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET)((__uint32_t)(0x1UL));
3504
3505 if (hwrm_send_message(softc, &req, sizeof(req))) {
3506 printf("%s: failed to read i2c data\n", DEVNAME(softc)((softc)->sc_dev.dv_xname));
3507 return 1;
3508 }
3509
3510 out = (struct hwrm_port_phy_i2c_read_output *)
3511 BNXT_DMA_KVA(softc->sc_cmd_resp)((void *)(softc->sc_cmd_resp)->bdm_kva);
3512 memcpy(sff->sff_data + offset, out->data, sizeof(out->data))__builtin_memcpy((sff->sff_data + offset), (out->data),
(sizeof(out->data)))
;
3513 }
3514
3515 return 0;
3516}