Bug Summary

File:dev/pci/if_iavf.c
Warning:line 1742, column 7
The left expression of the compound assignment is an uninitialized value. The computed value will also be garbage

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name if_iavf.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/if_iavf.c
1/* $OpenBSD: if_iavf.c,v 1.12 2023/11/10 15:51:20 bluhm Exp $ */
2
3/*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34/*
35 * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org>
36 * Copyright (c) 2019 Jonathan Matthew <jmatthew@openbsd.org>
37 *
38 * Permission to use, copy, modify, and distribute this software for any
39 * purpose with or without fee is hereby granted, provided that the above
40 * copyright notice and this permission notice appear in all copies.
41 *
42 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
43 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
44 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
45 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
46 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
47 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
48 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 */
50
51#include "bpfilter.h"
52
53#include <sys/param.h>
54#include <sys/systm.h>
55#include <sys/proc.h>
56#include <sys/sockio.h>
57#include <sys/mbuf.h>
58#include <sys/kernel.h>
59#include <sys/socket.h>
60#include <sys/device.h>
61#include <sys/pool.h>
62#include <sys/queue.h>
63#include <sys/timeout.h>
64#include <sys/task.h>
65#include <sys/syslog.h>
66
67#include <machine/bus.h>
68#include <machine/intr.h>
69
70#include <net/if.h>
71#include <net/if_dl.h>
72#include <net/if_media.h>
73
74#if NBPFILTER1 > 0
75#include <net/bpf.h>
76#endif
77
78#include <netinet/in.h>
79#include <netinet/if_ether.h>
80
81#include <dev/pci/pcireg.h>
82#include <dev/pci/pcivar.h>
83#include <dev/pci/pcidevs.h>
84
85#define I40E_MASK(mask, shift)((mask) << (shift)) ((mask) << (shift))
86#define I40E_AQ_LARGE_BUF512 512
87
88#define IAVF_REG_VFR0xdeadbeef 0xdeadbeef
89
90#define IAVF_VFR_INPROGRESS0 0
91#define IAVF_VFR_COMPLETED1 1
92#define IAVF_VFR_VFACTIVE2 2
93
94#include <dev/pci/if_ixlreg.h>
95
96struct iavf_aq_desc {
97 uint16_t iaq_flags;
98#define IAVF_AQ_DD(1U << 0) (1U << 0)
99#define IAVF_AQ_CMP(1U << 1) (1U << 1)
100#define IAVF_AQ_ERR(1U << 2) (1U << 2)
101#define IAVF_AQ_VFE(1U << 3) (1U << 3)
102#define IAVF_AQ_LB(1U << 9) (1U << 9)
103#define IAVF_AQ_RD(1U << 10) (1U << 10)
104#define IAVF_AQ_VFC(1U << 11) (1U << 11)
105#define IAVF_AQ_BUF(1U << 12) (1U << 12)
106#define IAVF_AQ_SI(1U << 13) (1U << 13)
107#define IAVF_AQ_EI(1U << 14) (1U << 14)
108#define IAVF_AQ_FE(1U << 15) (1U << 15)
109
110#define IAVF_AQ_FLAGS_FMT"\020" "\020FE" "\017EI" "\016SI" "\015BUF" "\014VFC" "\013DB"
"\012LB" "\004VFE" "\003ERR" "\002CMP" "\001DD"
"\020" "\020FE" "\017EI" "\016SI" "\015BUF" \
111 "\014VFC" "\013DB" "\012LB" "\004VFE" \
112 "\003ERR" "\002CMP" "\001DD"
113
114 uint16_t iaq_opcode;
115
116 uint16_t iaq_datalen;
117 uint16_t iaq_retval;
118
119 uint32_t iaq_vc_opcode;
120 uint32_t iaq_vc_retval;
121
122 uint32_t iaq_param[4];
123/* iaq_vfid iaq_param[0] */
124/* iaq_data_hi iaq_param[2] */
125/* iaq_data_lo iaq_param[3] */
126} __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8)));
127
128/* aq commands */
129#define IAVF_AQ_OP_SEND_TO_PF0x0801 0x0801
130#define IAVF_AQ_OP_MSG_FROM_PF0x0802 0x0802
131#define IAVF_AQ_OP_SHUTDOWN0x0803 0x0803
132
133/* virt channel messages */
134#define IAVF_VC_OP_VERSION1 1
135#define IAVF_VC_OP_RESET_VF2 2
136#define IAVF_VC_OP_GET_VF_RESOURCES3 3
137#define IAVF_VC_OP_CONFIG_TX_QUEUE4 4
138#define IAVF_VC_OP_CONFIG_RX_QUEUE5 5
139#define IAVF_VC_OP_CONFIG_VSI_QUEUES6 6
140#define IAVF_VC_OP_CONFIG_IRQ_MAP7 7
141#define IAVF_VC_OP_ENABLE_QUEUES8 8
142#define IAVF_VC_OP_DISABLE_QUEUES9 9
143#define IAVF_VC_OP_ADD_ETH_ADDR10 10
144#define IAVF_VC_OP_DEL_ETH_ADDR11 11
145#define IAVF_VC_OP_ADD_VLAN12 12
146#define IAVF_VC_OP_DEL_VLAN13 13
147#define IAVF_VC_OP_CONFIG_PROMISC14 14
148#define IAVF_VC_OP_GET_STATS15 15
149#define IAVF_VC_OP_EVENT17 17
150#define IAVF_VC_OP_GET_RSS_HENA_CAPS25 25
151#define IAVF_VC_OP_SET_RSS_HENA26 26
152
153/* virt channel response codes */
154#define IAVF_VC_RC_SUCCESS0 0
155#define IAVF_VC_RC_ERR_PARAM-5 -5
156#define IAVF_VC_RC_ERR_OPCODE-38 -38
157#define IAVF_VC_RC_ERR_CQP_COMPL-39 -39
158#define IAVF_VC_RC_ERR_VF_ID-40 -40
159#define IAVF_VC_RC_ERR_NOT_SUP-64 -64
160
161/* virt channel events */
162#define IAVF_VC_EVENT_LINK_CHANGE1 1
163#define IAVF_VC_EVENT_RESET_IMPENDING2 2
164#define IAVF_VC_EVENT_PF_DRIVER_CLOSE3 3
165
166/* virt channel offloads */
167#define IAVF_VC_OFFLOAD_L20x00000001 0x00000001
168#define IAVF_VC_OFFLOAD_IWARP0x00000002 0x00000002
169#define IAVF_VC_OFFLOAD_RSVD0x00000004 0x00000004
170#define IAVF_VC_OFFLOAD_RSS_AQ0x00000008 0x00000008
171#define IAVF_VC_OFFLOAD_RSS_REG0x00000010 0x00000010
172#define IAVF_VC_OFFLOAD_WB_ON_ITR0x00000020 0x00000020
173#define IAVF_VC_OFFLOAD_VLAN0x00010000 0x00010000
174#define IAVF_VC_OFFLOAD_RX_POLLING0x00020000 0x00020000
175#define IAVF_VC_OFFLOAD_RSS_PCTYPE_V20x00040000 0x00040000
176#define IAVF_VC_OFFLOAD_RSS_PF0x00080000 0x00080000
177#define IAVF_VC_OFFLOAD_ENCAP0x00100000 0x00100000
178#define IAVF_VC_OFFLOAD_ENCAP_CSUM0x00200000 0x00200000
179#define IAVF_VC_OFFLOAD_RX_ENCAP_CSUM0x00400000 0x00400000
180
181/* link speeds */
182#define IAVF_VC_LINK_SPEED_100MB0x1 0x1
183#define IAVC_VC_LINK_SPEED_1000MB0x2 0x2
184#define IAVC_VC_LINK_SPEED_10GB0x3 0x3
185#define IAVC_VC_LINK_SPEED_40GB0x4 0x4
186#define IAVC_VC_LINK_SPEED_20GB0x5 0x5
187#define IAVC_VC_LINK_SPEED_25GB0x6 0x6
188
189struct iavf_link_speed {
190 uint64_t baudrate;
191 uint64_t media;
192};
193
194static const struct iavf_link_speed iavf_link_speeds[] = {
195 { 0, 0 },
196 { IF_Mbps(100)((((100) * 1000ULL) * 1000ULL)), IFM_100_TX6 },
197 { IF_Mbps(1000)((((1000) * 1000ULL) * 1000ULL)), IFM_1000_T16 },
198 { IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))), IFM_10G_T22 },
199 { IF_Gbps(40)((((((40) * 1000ULL) * 1000ULL) * 1000ULL))), IFM_40G_CR425 },
200 { IF_Gbps(20)((((((20) * 1000ULL) * 1000ULL) * 1000ULL))), IFM_20G_KR232 },
201 { IF_Gbps(25)((((((25) * 1000ULL) * 1000ULL) * 1000ULL))), IFM_25G_CR47 }
202};
203
204
205struct iavf_vc_version_info {
206 uint32_t major;
207 uint32_t minor;
208} __packed__attribute__((__packed__));
209
210struct iavf_vc_txq_info {
211 uint16_t vsi_id;
212 uint16_t queue_id;
213 uint16_t ring_len;
214 uint16_t headwb_ena; /* deprecated */
215 uint64_t dma_ring_addr;
216 uint64_t dma_headwb_addr; /* deprecated */
217} __packed__attribute__((__packed__));
218
219struct iavf_vc_rxq_info {
220 uint16_t vsi_id;
221 uint16_t queue_id;
222 uint32_t ring_len;
223 uint16_t hdr_size;
224 uint16_t splithdr_ena;
225 uint32_t databuf_size;
226 uint32_t max_pkt_size;
227 uint32_t pad1;
228 uint64_t dma_ring_addr;
229 uint32_t rx_split_pos;
230 uint32_t pad2;
231} __packed__attribute__((__packed__));
232
233struct iavf_vc_queue_pair_info {
234 struct iavf_vc_txq_info txq;
235 struct iavf_vc_rxq_info rxq;
236} __packed__attribute__((__packed__));
237
238struct iavf_vc_queue_config_info {
239 uint16_t vsi_id;
240 uint16_t num_queue_pairs;
241 uint32_t pad;
242 struct iavf_vc_queue_pair_info qpair[1];
243} __packed__attribute__((__packed__));
244
245struct iavf_vc_vector_map {
246 uint16_t vsi_id;
247 uint16_t vector_id;
248 uint16_t rxq_map;
249 uint16_t txq_map;
250 uint16_t rxitr_idx;
251 uint16_t txitr_idx;
252} __packed__attribute__((__packed__));
253
254struct iavf_vc_irq_map_info {
255 uint16_t num_vectors;
256 struct iavf_vc_vector_map vecmap[1];
257} __packed__attribute__((__packed__));
258
259struct iavf_vc_queue_select {
260 uint16_t vsi_id;
261 uint16_t pad;
262 uint32_t rx_queues;
263 uint32_t tx_queues;
264} __packed__attribute__((__packed__));
265
266struct iavf_vc_vsi_resource {
267 uint16_t vsi_id;
268 uint16_t num_queue_pairs;
269 uint32_t vsi_type;
270 uint16_t qset_handle;
271 uint8_t default_mac[ETHER_ADDR_LEN6];
272} __packed__attribute__((__packed__));
273
274struct iavf_vc_vf_resource {
275 uint16_t num_vsis;
276 uint16_t num_qp;
277 uint16_t max_vectors;
278 uint16_t max_mtu;
279 uint32_t offload_flags;
280 uint32_t rss_key_size;
281 uint32_t rss_lut_size;
282 struct iavf_vc_vsi_resource vsi_res[1];
283} __packed__attribute__((__packed__));
284
285struct iavf_vc_eth_addr {
286 uint8_t addr[ETHER_ADDR_LEN6];
287 uint8_t pad[2];
288} __packed__attribute__((__packed__));
289
290struct iavf_vc_eth_addr_list {
291 uint16_t vsi_id;
292 uint16_t num_elements;
293 struct iavf_vc_eth_addr list[1];
294} __packed__attribute__((__packed__));
295
296struct iavf_vc_vlan_list {
297 uint16_t vsi_id;
298 uint16_t num_elements;
299 uint16_t vlan_id[1];
300} __packed__attribute__((__packed__));
301
302struct iavf_vc_promisc_info {
303 uint16_t vsi_id;
304 uint16_t flags;
305#define IAVF_FLAG_VF_UNICAST_PROMISC0x0001 0x0001
306#define IAVF_FLAG_VF_MULTICAST_PROMISC0x0002 0x0002
307} __packed__attribute__((__packed__));
308
309struct iavf_vc_pf_event {
310 uint32_t event;
311 uint32_t link_speed;
312 uint8_t link_status;
313 uint8_t pad[3];
314 uint32_t severity;
315} __packed__attribute__((__packed__));
316
317/* aq response codes */
318#define IAVF_AQ_RC_OK0 0 /* success */
319#define IAVF_AQ_RC_EPERM1 1 /* Operation not permitted */
320#define IAVF_AQ_RC_ENOENT2 2 /* No such element */
321#define IAVF_AQ_RC_ESRCH3 3 /* Bad opcode */
322#define IAVF_AQ_RC_EINTR4 4 /* operation interrupted */
323#define IAVF_AQ_RC_EIO5 5 /* I/O error */
324#define IAVF_AQ_RC_ENXIO6 6 /* No such resource */
325#define IAVF_AQ_RC_E2BIG7 7 /* Arg too long */
326#define IAVF_AQ_RC_EAGAIN8 8 /* Try again */
327#define IAVF_AQ_RC_ENOMEM9 9 /* Out of memory */
328#define IAVF_AQ_RC_EACCES10 10 /* Permission denied */
329#define IAVF_AQ_RC_EFAULT11 11 /* Bad address */
330#define IAVF_AQ_RC_EBUSY12 12 /* Device or resource busy */
331#define IAVF_AQ_RC_EEXIST13 13 /* object already exists */
332#define IAVF_AQ_RC_EINVAL14 14 /* invalid argument */
333#define IAVF_AQ_RC_ENOTTY15 15 /* not a typewriter */
334#define IAVF_AQ_RC_ENOSPC16 16 /* No space or alloc failure */
335#define IAVF_AQ_RC_ENOSYS17 17 /* function not implemented */
336#define IAVF_AQ_RC_ERANGE18 18 /* parameter out of range */
337#define IAVF_AQ_RC_EFLUSHED19 19 /* cmd flushed due to prev error */
338#define IAVF_AQ_RC_BAD_ADDR20 20 /* contains a bad pointer */
339#define IAVF_AQ_RC_EMODE21 21 /* not allowed in current mode */
340#define IAVF_AQ_RC_EFBIG22 22 /* file too large */
341
342struct iavf_tx_desc {
343 uint64_t addr;
344 uint64_t cmd;
345#define IAVF_TX_DESC_DTYPE_SHIFT0 0
346#define IAVF_TX_DESC_DTYPE_MASK(0xfULL << 0) (0xfULL << IAVF_TX_DESC_DTYPE_SHIFT0)
347#define IAVF_TX_DESC_DTYPE_DATA(0x0ULL << 0) (0x0ULL << IAVF_TX_DESC_DTYPE_SHIFT0)
348#define IAVF_TX_DESC_DTYPE_NOP(0x1ULL << 0) (0x1ULL << IAVF_TX_DESC_DTYPE_SHIFT0)
349#define IAVF_TX_DESC_DTYPE_CONTEXT(0x1ULL << 0) (0x1ULL << IAVF_TX_DESC_DTYPE_SHIFT0)
350#define IAVF_TX_DESC_DTYPE_FCOE_CTX(0x2ULL << 0) (0x2ULL << IAVF_TX_DESC_DTYPE_SHIFT0)
351#define IAVF_TX_DESC_DTYPE_FD(0x8ULL << 0) (0x8ULL << IAVF_TX_DESC_DTYPE_SHIFT0)
352#define IAVF_TX_DESC_DTYPE_DDP_CTX(0x9ULL << 0) (0x9ULL << IAVF_TX_DESC_DTYPE_SHIFT0)
353#define IAVF_TX_DESC_DTYPE_FLEX_DATA(0xbULL << 0) (0xbULL << IAVF_TX_DESC_DTYPE_SHIFT0)
354#define IAVF_TX_DESC_DTYPE_FLEX_CTX_1(0xcULL << 0) (0xcULL << IAVF_TX_DESC_DTYPE_SHIFT0)
355#define IAVF_TX_DESC_DTYPE_FLEX_CTX_2(0xdULL << 0) (0xdULL << IAVF_TX_DESC_DTYPE_SHIFT0)
356#define IAVF_TX_DESC_DTYPE_DONE(0xfULL << 0) (0xfULL << IAVF_TX_DESC_DTYPE_SHIFT0)
357
358#define IAVF_TX_DESC_CMD_SHIFT4 4
359#define IAVF_TX_DESC_CMD_MASK(0x3ffULL << 4) (0x3ffULL << IAVF_TX_DESC_CMD_SHIFT4)
360#define IAVF_TX_DESC_CMD_EOP(0x001 << 4) (0x001 << IAVF_TX_DESC_CMD_SHIFT4)
361#define IAVF_TX_DESC_CMD_RS(0x002 << 4) (0x002 << IAVF_TX_DESC_CMD_SHIFT4)
362#define IAVF_TX_DESC_CMD_ICRC(0x004 << 4) (0x004 << IAVF_TX_DESC_CMD_SHIFT4)
363#define IAVF_TX_DESC_CMD_IL2TAG1(0x008 << 4) (0x008 << IAVF_TX_DESC_CMD_SHIFT4)
364#define IAVF_TX_DESC_CMD_DUMMY(0x010 << 4) (0x010 << IAVF_TX_DESC_CMD_SHIFT4)
365#define IAVF_TX_DESC_CMD_IIPT_MASK(0x060 << 4) (0x060 << IAVF_TX_DESC_CMD_SHIFT4)
366#define IAVF_TX_DESC_CMD_IIPT_NONIP(0x000 << 4) (0x000 << IAVF_TX_DESC_CMD_SHIFT4)
367#define IAVF_TX_DESC_CMD_IIPT_IPV6(0x020 << 4) (0x020 << IAVF_TX_DESC_CMD_SHIFT4)
368#define IAVF_TX_DESC_CMD_IIPT_IPV4(0x040 << 4) (0x040 << IAVF_TX_DESC_CMD_SHIFT4)
369#define IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM(0x060 << 4) (0x060 << IAVF_TX_DESC_CMD_SHIFT4)
370#define IAVF_TX_DESC_CMD_FCOET(0x080 << 4) (0x080 << IAVF_TX_DESC_CMD_SHIFT4)
371#define IAVF_TX_DESC_CMD_L4T_EOFT_MASK(0x300 << 4) (0x300 << IAVF_TX_DESC_CMD_SHIFT4)
372#define IAVF_TX_DESC_CMD_L4T_EOFT_UNK(0x000 << 4) (0x000 << IAVF_TX_DESC_CMD_SHIFT4)
373#define IAVF_TX_DESC_CMD_L4T_EOFT_TCP(0x100 << 4) (0x100 << IAVF_TX_DESC_CMD_SHIFT4)
374#define IAVF_TX_DESC_CMD_L4T_EOFT_SCTP(0x200 << 4) (0x200 << IAVF_TX_DESC_CMD_SHIFT4)
375#define IAVF_TX_DESC_CMD_L4T_EOFT_UDP(0x300 << 4) (0x300 << IAVF_TX_DESC_CMD_SHIFT4)
376
377#define IAVF_TX_DESC_MACLEN_SHIFT16 16
378#define IAVF_TX_DESC_MACLEN_MASK(0x7fULL << 16) (0x7fULL << IAVF_TX_DESC_MACLEN_SHIFT16)
379#define IAVF_TX_DESC_IPLEN_SHIFT23 23
380#define IAVF_TX_DESC_IPLEN_MASK(0x7fULL << 23) (0x7fULL << IAVF_TX_DESC_IPLEN_SHIFT23)
381#define IAVF_TX_DESC_L4LEN_SHIFT30 30
382#define IAVF_TX_DESC_L4LEN_MASK(0xfULL << 30) (0xfULL << IAVF_TX_DESC_L4LEN_SHIFT30)
383#define IAVF_TX_DESC_FCLEN_SHIFT30 30
384#define IAVF_TX_DESC_FCLEN_MASK(0xfULL << 30) (0xfULL << IAVF_TX_DESC_FCLEN_SHIFT30)
385
386#define IAVF_TX_DESC_BSIZE_SHIFT34 34
387#define IAVF_TX_DESC_BSIZE_MAX0x3fffULL 0x3fffULL
388#define IAVF_TX_DESC_BSIZE_MASK(0x3fffULL << 34) \
389 (IAVF_TX_DESC_BSIZE_MAX0x3fffULL << IAVF_TX_DESC_BSIZE_SHIFT34)
390
391#define IAVF_TX_DESC_L2TAG1_SHIFT48 48
392#define IAVF_TX_DESC_L2TAG1_MASK(0xffff << 48) (0xffff << IAVF_TX_DESC_L2TAG1_SHIFT48)
393} __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16)));
394
395struct iavf_rx_rd_desc_16 {
396 uint64_t paddr; /* packet addr */
397 uint64_t haddr; /* header addr */
398} __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16)));
399
400struct iavf_rx_rd_desc_32 {
401 uint64_t paddr; /* packet addr */
402 uint64_t haddr; /* header addr */
403 uint64_t _reserved1;
404 uint64_t _reserved2;
405} __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16)));
406
407struct iavf_rx_wb_desc_16 {
408 uint64_t qword0;
409#define IAVF_RX_DESC_L2TAG1_SHIFT16 16
410#define IAVF_RX_DESC_L2TAG1_MASK(0xffff << 16) (0xffff << IAVF_RX_DESC_L2TAG1_SHIFT16)
411 uint64_t qword1;
412#define IAVF_RX_DESC_DD(1 << 0) (1 << 0)
413#define IAVF_RX_DESC_EOP(1 << 1) (1 << 1)
414#define IAVF_RX_DESC_L2TAG1P(1 << 2) (1 << 2)
415#define IAVF_RX_DESC_L3L4P(1 << 3) (1 << 3)
416#define IAVF_RX_DESC_CRCP(1 << 4) (1 << 4)
417#define IAVF_RX_DESC_TSYNINDX_SHIFT5 5 /* TSYNINDX */
418#define IAVF_RX_DESC_TSYNINDX_MASK(7 << 5) (7 << IAVF_RX_DESC_TSYNINDX_SHIFT5)
419#define IAVF_RX_DESC_UMB_SHIFT9 9
420#define IAVF_RX_DESC_UMB_MASK(0x3 << 9) (0x3 << IAVF_RX_DESC_UMB_SHIFT9)
421#define IAVF_RX_DESC_UMB_UCAST(0x0 << 9) (0x0 << IAVF_RX_DESC_UMB_SHIFT9)
422#define IAVF_RX_DESC_UMB_MCAST(0x1 << 9) (0x1 << IAVF_RX_DESC_UMB_SHIFT9)
423#define IAVF_RX_DESC_UMB_BCAST(0x2 << 9) (0x2 << IAVF_RX_DESC_UMB_SHIFT9)
424#define IAVF_RX_DESC_UMB_MIRROR(0x3 << 9) (0x3 << IAVF_RX_DESC_UMB_SHIFT9)
425#define IAVF_RX_DESC_FLM(1 << 11) (1 << 11)
426#define IAVF_RX_DESC_FLTSTAT_SHIFT12 12
427#define IAVF_RX_DESC_FLTSTAT_MASK(0x3 << 12) (0x3 << IAVF_RX_DESC_FLTSTAT_SHIFT12)
428#define IAVF_RX_DESC_FLTSTAT_NODATA(0x0 << 12) (0x0 << IAVF_RX_DESC_FLTSTAT_SHIFT12)
429#define IAVF_RX_DESC_FLTSTAT_FDFILTID(0x1 << 12) (0x1 << IAVF_RX_DESC_FLTSTAT_SHIFT12)
430#define IAVF_RX_DESC_FLTSTAT_RSS(0x3 << 12) (0x3 << IAVF_RX_DESC_FLTSTAT_SHIFT12)
431#define IAVF_RX_DESC_LPBK(1 << 14) (1 << 14)
432#define IAVF_RX_DESC_IPV6EXTADD(1 << 15) (1 << 15)
433#define IAVF_RX_DESC_INT_UDP_0(1 << 18) (1 << 18)
434
435#define IAVF_RX_DESC_RXE(1 << 19) (1 << 19)
436#define IAVF_RX_DESC_HBO(1 << 21) (1 << 21)
437#define IAVF_RX_DESC_IPE(1 << 22) (1 << 22)
438#define IAVF_RX_DESC_L4E(1 << 23) (1 << 23)
439#define IAVF_RX_DESC_EIPE(1 << 24) (1 << 24)
440#define IAVF_RX_DESC_OVERSIZE(1 << 25) (1 << 25)
441
442#define IAVF_RX_DESC_PTYPE_SHIFT30 30
443#define IAVF_RX_DESC_PTYPE_MASK(0xffULL << 30) (0xffULL << IAVF_RX_DESC_PTYPE_SHIFT30)
444
445#define IAVF_RX_DESC_PLEN_SHIFT38 38
446#define IAVF_RX_DESC_PLEN_MASK(0x3fffULL << 38) (0x3fffULL << IAVF_RX_DESC_PLEN_SHIFT38)
447#define IAVF_RX_DESC_HLEN_SHIFT42 42
448#define IAVF_RX_DESC_HLEN_MASK(0x7ffULL << 42) (0x7ffULL << IAVF_RX_DESC_HLEN_SHIFT42)
449} __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16)));
450
451struct iavf_rx_wb_desc_32 {
452 uint64_t qword0;
453 uint64_t qword1;
454 uint64_t qword2;
455 uint64_t qword3;
456} __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16)));
457
458
459#define IAVF_VF_MAJOR1 1
460#define IAVF_VF_MINOR1 1
461
462#define IAVF_TX_PKT_DESCS8 8
463#define IAVF_TX_QUEUE_ALIGN128 128
464#define IAVF_RX_QUEUE_ALIGN128 128
465
466#define IAVF_HARDMTU9712 9712 /* 9726 - ETHER_HDR_LEN */
467
468#define IAVF_PCIREG0x10 PCI_MAPREG_START0x10
469
470#define IAVF_ITR00x0 0x0
471#define IAVF_ITR10x1 0x1
472#define IAVF_ITR20x2 0x2
473#define IAVF_NOITR0x3 0x3
474
475#define IAVF_AQ_NUM256 256
476#define IAVF_AQ_MASK(256 - 1) (IAVF_AQ_NUM256 - 1)
477#define IAVF_AQ_ALIGN64 64 /* lol */
478#define IAVF_AQ_BUFLEN4096 4096
479
480struct iavf_aq_regs {
481 bus_size_t atq_tail;
482 bus_size_t atq_head;
483 bus_size_t atq_len;
484 bus_size_t atq_bal;
485 bus_size_t atq_bah;
486
487 bus_size_t arq_tail;
488 bus_size_t arq_head;
489 bus_size_t arq_len;
490 bus_size_t arq_bal;
491 bus_size_t arq_bah;
492
493 uint32_t atq_len_enable;
494 uint32_t atq_tail_mask;
495 uint32_t atq_head_mask;
496
497 uint32_t arq_len_enable;
498 uint32_t arq_tail_mask;
499 uint32_t arq_head_mask;
500};
501
502struct iavf_aq_buf {
503 SIMPLEQ_ENTRY(iavf_aq_buf)struct { struct iavf_aq_buf *sqe_next; }
504 aqb_entry;
505 void *aqb_data;
506 bus_dmamap_t aqb_map;
507};
508SIMPLEQ_HEAD(iavf_aq_bufs, iavf_aq_buf)struct iavf_aq_bufs { struct iavf_aq_buf *sqh_first; struct iavf_aq_buf
**sqh_last; }
;
509
510struct iavf_dmamem {
511 bus_dmamap_t ixm_map;
512 bus_dma_segment_t ixm_seg;
513 int ixm_nsegs;
514 size_t ixm_size;
515 caddr_t ixm_kva;
516};
517#define IAVF_DMA_MAP(_ixm)((_ixm)->ixm_map) ((_ixm)->ixm_map)
518#define IAVF_DMA_DVA(_ixm)((_ixm)->ixm_map->dm_segs[0].ds_addr) ((_ixm)->ixm_map->dm_segs[0].ds_addr)
519#define IAVF_DMA_KVA(_ixm)((void *)(_ixm)->ixm_kva) ((void *)(_ixm)->ixm_kva)
520#define IAVF_DMA_LEN(_ixm)((_ixm)->ixm_size) ((_ixm)->ixm_size)
521
522struct iavf_tx_map {
523 struct mbuf *txm_m;
524 bus_dmamap_t txm_map;
525 unsigned int txm_eop;
526};
527
528struct iavf_tx_ring {
529 unsigned int txr_prod;
530 unsigned int txr_cons;
531
532 struct iavf_tx_map *txr_maps;
533 struct iavf_dmamem txr_mem;
534
535 bus_size_t txr_tail;
536 unsigned int txr_qid;
537};
538
539struct iavf_rx_map {
540 struct mbuf *rxm_m;
541 bus_dmamap_t rxm_map;
542};
543
544struct iavf_rx_ring {
545 struct iavf_softc *rxr_sc;
546
547 struct if_rxring rxr_acct;
548 struct timeout rxr_refill;
549
550 unsigned int rxr_prod;
551 unsigned int rxr_cons;
552
553 struct iavf_rx_map *rxr_maps;
554 struct iavf_dmamem rxr_mem;
555
556 struct mbuf *rxr_m_head;
557 struct mbuf **rxr_m_tail;
558
559 bus_size_t rxr_tail;
560 unsigned int rxr_qid;
561};
562
563struct iavf_softc {
564 struct device sc_dev;
565 struct arpcom sc_ac;
566 struct ifmedia sc_media;
567 uint64_t sc_media_status;
568 uint64_t sc_media_active;
569
570 pci_chipset_tag_t sc_pc;
571 pci_intr_handle_t sc_ih;
572 void *sc_ihc;
573 pcitag_t sc_tag;
574
575 bus_dma_tag_t sc_dmat;
576 bus_space_tag_t sc_memt;
577 bus_space_handle_t sc_memh;
578 bus_size_t sc_mems;
579
580 uint32_t sc_major_ver;
581 uint32_t sc_minor_ver;
582
583 int sc_got_vf_resources;
584 int sc_got_irq_map;
585 uint32_t sc_vf_id;
586 uint16_t sc_vsi_id;
587 uint16_t sc_qset_handle;
588 unsigned int sc_base_queue;
589
590 struct cond sc_admin_cond;
591 int sc_admin_result;
592 struct timeout sc_admin_timeout;
593
594 struct iavf_dmamem sc_scratch;
595
596 const struct iavf_aq_regs *
597 sc_aq_regs;
598
599 struct mutex sc_atq_mtx;
600 struct iavf_dmamem sc_atq;
601 unsigned int sc_atq_prod;
602 unsigned int sc_atq_cons;
603
604 struct iavf_dmamem sc_arq;
605 struct iavf_aq_bufs sc_arq_idle;
606 struct iavf_aq_bufs sc_arq_live;
607 struct if_rxring sc_arq_ring;
608 unsigned int sc_arq_prod;
609 unsigned int sc_arq_cons;
610
611 struct task sc_reset_task;
612 int sc_resetting;
613
614 unsigned int sc_tx_ring_ndescs;
615 unsigned int sc_rx_ring_ndescs;
616 unsigned int sc_nqueues; /* 1 << sc_nqueues */
617
618 struct rwlock sc_cfg_lock;
619 unsigned int sc_dead;
620
621 uint8_t sc_enaddr[ETHER_ADDR_LEN6];
622};
623#define DEVNAME(_sc)((_sc)->sc_dev.dv_xname) ((_sc)->sc_dev.dv_xname)
624
625#define delaymsec(_ms)(*delay_func)(1000 * (_ms)) delay(1000 * (_ms))(*delay_func)(1000 * (_ms))
626
627static int iavf_dmamem_alloc(struct iavf_softc *, struct iavf_dmamem *,
628 bus_size_t, u_int);
629static void iavf_dmamem_free(struct iavf_softc *, struct iavf_dmamem *);
630
631static int iavf_arq_fill(struct iavf_softc *, int);
632static void iavf_arq_unfill(struct iavf_softc *);
633static void iavf_arq_timeout(void *);
634static int iavf_arq_wait(struct iavf_softc *, int);
635
636static int iavf_atq_post(struct iavf_softc *, struct iavf_aq_desc *);
637static void iavf_atq_done(struct iavf_softc *);
638
639static void iavf_init_admin_queue(struct iavf_softc *);
640
641static int iavf_get_version(struct iavf_softc *);
642static int iavf_get_vf_resources(struct iavf_softc *);
643static int iavf_config_irq_map(struct iavf_softc *);
644
645static int iavf_add_del_addr(struct iavf_softc *, uint8_t *, int);
646static int iavf_process_arq(struct iavf_softc *, int);
647
648static int iavf_match(struct device *, void *, void *);
649static void iavf_attach(struct device *, struct device *, void *);
650
651static int iavf_media_change(struct ifnet *);
652static void iavf_media_status(struct ifnet *, struct ifmediareq *);
653static void iavf_watchdog(struct ifnet *);
654static int iavf_ioctl(struct ifnet *, u_long, caddr_t);
655static void iavf_start(struct ifqueue *);
656static int iavf_intr(void *);
657static int iavf_up(struct iavf_softc *);
658static int iavf_down(struct iavf_softc *);
659static int iavf_iff(struct iavf_softc *);
660static void iavf_reset(void *);
661
662static struct iavf_tx_ring *
663 iavf_txr_alloc(struct iavf_softc *, unsigned int);
664static void iavf_txr_clean(struct iavf_softc *, struct iavf_tx_ring *);
665static void iavf_txr_free(struct iavf_softc *, struct iavf_tx_ring *);
666static int iavf_txeof(struct iavf_softc *, struct ifqueue *);
667
668static struct iavf_rx_ring *
669 iavf_rxr_alloc(struct iavf_softc *, unsigned int);
670static void iavf_rxr_clean(struct iavf_softc *, struct iavf_rx_ring *);
671static void iavf_rxr_free(struct iavf_softc *, struct iavf_rx_ring *);
672static int iavf_rxeof(struct iavf_softc *, struct ifiqueue *);
673static void iavf_rxfill(struct iavf_softc *, struct iavf_rx_ring *);
674static void iavf_rxrefill(void *);
675static int iavf_rxrinfo(struct iavf_softc *, struct if_rxrinfo *);
676
677struct cfdriver iavf_cd = {
678 NULL((void *)0),
679 "iavf",
680 DV_IFNET,
681};
682
683const struct cfattach iavf_ca = {
684 sizeof(struct iavf_softc),
685 iavf_match,
686 iavf_attach,
687};
688
689static const struct iavf_aq_regs iavf_aq_regs = {
690 .atq_tail = I40E_VF_ATQT10x00008400,
691 .atq_tail_mask = I40E_VF_ATQT1_ATQT_MASK((0x3FF) << (0)),
692 .atq_head = I40E_VF_ATQH10x00006400,
693 .atq_head_mask = I40E_VF_ARQH1_ARQH_MASK((0x3FF) << (0)),
694 .atq_len = I40E_VF_ATQLEN10x00006800,
695 .atq_bal = I40E_VF_ATQBAL10x00007C00,
696 .atq_bah = I40E_VF_ATQBAH10x00007800,
697 .atq_len_enable = I40E_VF_ATQLEN1_ATQENABLE_MASK((0x1) << (31)),
698
699 .arq_tail = I40E_VF_ARQT10x00007000,
700 .arq_tail_mask = I40E_VF_ARQT1_ARQT_MASK((0x3FF) << (0)),
701 .arq_head = I40E_VF_ARQH10x00007400,
702 .arq_head_mask = I40E_VF_ARQH1_ARQH_MASK((0x3FF) << (0)),
703 .arq_len = I40E_VF_ARQLEN10x00008000,
704 .arq_bal = I40E_VF_ARQBAL10x00006C00,
705 .arq_bah = I40E_VF_ARQBAH10x00006000,
706 .arq_len_enable = I40E_VF_ARQLEN1_ARQENABLE_MASK((0x1) << (31)),
707};
708
709#define iavf_rd(_s, _r)(((_s)->sc_memt)->read_4(((_s)->sc_memh), ((_r)))) \
710 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))(((_s)->sc_memt)->read_4(((_s)->sc_memh), ((_r))))
711#define iavf_wr(_s, _r, _v)(((_s)->sc_memt)->write_4(((_s)->sc_memh), ((_r)), (
(_v))))
\
712 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))(((_s)->sc_memt)->write_4(((_s)->sc_memh), ((_r)), (
(_v))))
713#define iavf_barrier(_s, _r, _l, _o)bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (
_l), (_o))
\
714 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
715#define iavf_intr_enable(_s)((((_s))->sc_memt)->write_4((((_s))->sc_memh), ((0x00005C00
)), ((((0x1) << (0)) | ((0x1) << (1)) | (0x3 <<
3))))); ((((_s))->sc_memt)->write_4((((_s))->sc_memh
), ((0x00005000)), ((((0x1) << (30))))))
\
716 iavf_wr((_s), I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL0_INTENA_MASK | \((((_s))->sc_memt)->write_4((((_s))->sc_memh), ((0x00005C00
)), ((((0x1) << (0)) | ((0x1) << (1)) | (0x3 <<
3)))))
717 I40E_VFINT_DYN_CTL0_CLEARPBA_MASK | \((((_s))->sc_memt)->write_4((((_s))->sc_memh), ((0x00005C00
)), ((((0x1) << (0)) | ((0x1) << (1)) | (0x3 <<
3)))))
718 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT))((((_s))->sc_memt)->write_4((((_s))->sc_memh), ((0x00005C00
)), ((((0x1) << (0)) | ((0x1) << (1)) | (0x3 <<
3)))))
; \
719 iavf_wr((_s), I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK)((((_s))->sc_memt)->write_4((((_s))->sc_memh), ((0x00005000
)), ((((0x1) << (30))))))
720
721#define iavf_nqueues(_sc)(1 << (_sc)->sc_nqueues) (1 << (_sc)->sc_nqueues)
722#define iavf_allqueues(_sc)((1 << ((_sc)->sc_nqueues+1)) - 1) ((1 << ((_sc)->sc_nqueues+1)) - 1)
723
724#ifdef __LP64__1
725#define iavf_dmamem_hi(_ixm)(uint32_t)(((_ixm)->ixm_map->dm_segs[0].ds_addr) >>
32)
(uint32_t)(IAVF_DMA_DVA(_ixm)((_ixm)->ixm_map->dm_segs[0].ds_addr) >> 32)
726#else
727#define iavf_dmamem_hi(_ixm)(uint32_t)(((_ixm)->ixm_map->dm_segs[0].ds_addr) >>
32)
0
728#endif
729
730#define iavf_dmamem_lo(_ixm)(uint32_t)((_ixm)->ixm_map->dm_segs[0].ds_addr) (uint32_t)IAVF_DMA_DVA(_ixm)((_ixm)->ixm_map->dm_segs[0].ds_addr)
731
732static inline void
733iavf_aq_dva(struct iavf_aq_desc *iaq, bus_addr_t addr)
734{
735#ifdef __LP64__1
736 htolem32(&iaq->iaq_param[2], addr >> 32)(*(__uint32_t *)(&iaq->iaq_param[2]) = ((__uint32_t)(addr
>> 32)))
;
737#else
738 iaq->iaq_param[2] = htole32(0)((__uint32_t)(0));
739#endif
740 htolem32(&iaq->iaq_param[3], addr)(*(__uint32_t *)(&iaq->iaq_param[3]) = ((__uint32_t)(addr
)))
;
741}
742
743#if _BYTE_ORDER1234 == _BIG_ENDIAN4321
744#define HTOLE16(_x)(_x) (uint16_t)(((_x) & 0xff) << 8 | ((_x) & 0xff00) >> 8)
745#else
746#define HTOLE16(_x)(_x) (_x)
747#endif
748
749static const struct pci_matchid iavf_devices[] = {
750 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_XL710_VF0x154c },
751 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_XL710_VF_HV0x1571 },
752 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X722_VF0x37cd },
753 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ADAPTIVE_VF0x1889 },
754};
755
756static int
757iavf_match(struct device *parent, void *match, void *aux)
758{
759 return (pci_matchbyid(aux, iavf_devices, nitems(iavf_devices)(sizeof((iavf_devices)) / sizeof((iavf_devices)[0]))));
760}
761
762void
763iavf_attach(struct device *parent, struct device *self, void *aux)
764{
765 struct iavf_softc *sc = (struct iavf_softc *)self;
766 struct ifnet *ifp = &sc->sc_ac.ac_if;
767 struct pci_attach_args *pa = aux;
768 pcireg_t memtype;
769 int tries;
770
771 rw_init(&sc->sc_cfg_lock, "iavfcfg")_rw_init_flags(&sc->sc_cfg_lock, "iavfcfg", 0, ((void *
)0))
;
772
773 sc->sc_pc = pa->pa_pc;
774 sc->sc_tag = pa->pa_tag;
775 sc->sc_dmat = pa->pa_dmat;
776 sc->sc_aq_regs = &iavf_aq_regs;
777
778 sc->sc_nqueues = 0; /* 1 << 0 is 1 queue */
779 sc->sc_tx_ring_ndescs = 1024;
780 sc->sc_rx_ring_ndescs = 1024;
781
782 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, IAVF_PCIREG0x10);
783 if (pci_mapreg_map(pa, IAVF_PCIREG0x10, memtype, 0,
784 &sc->sc_memt, &sc->sc_memh, NULL((void *)0), &sc->sc_mems, 0)) {
785 printf(": unable to map registers\n");
786 return;
787 }
788
789 for (tries = 0; tries < 100; tries++) {
790 uint32_t reg;
791 reg = iavf_rd(sc, I40E_VFGEN_RSTAT)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x00008800
))))
&
792 I40E_VFGEN_RSTAT_VFR_STATE_MASK((0x3) << (0));
793 if (reg == IAVF_VFR_VFACTIVE2 ||
794 reg == IAVF_VFR_COMPLETED1)
795 break;
796
797 delay(10000)(*delay_func)(10000);
798 }
799 if (tries == 100) {
800 printf(": VF reset timed out\n");
801 return;
802 }
803 task_set(&sc->sc_reset_task, iavf_reset, sc);
804
805 mtx_init(&sc->sc_atq_mtx, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_atq_mtx), ((((0x4)) > 0x0 && ((0x4)) < 0x9) ?
0x9 : ((0x4)))); } while (0)
;
806
807 if (iavf_dmamem_alloc(sc, &sc->sc_atq,
808 sizeof(struct iavf_aq_desc) * IAVF_AQ_NUM256, IAVF_AQ_ALIGN64) != 0) {
809 printf("\n" "%s: unable to allocate atq\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
810 goto unmap;
811 }
812
813 SIMPLEQ_INIT(&sc->sc_arq_idle)do { (&sc->sc_arq_idle)->sqh_first = ((void *)0); (
&sc->sc_arq_idle)->sqh_last = &(&sc->sc_arq_idle
)->sqh_first; } while (0)
;
814 SIMPLEQ_INIT(&sc->sc_arq_live)do { (&sc->sc_arq_live)->sqh_first = ((void *)0); (
&sc->sc_arq_live)->sqh_last = &(&sc->sc_arq_live
)->sqh_first; } while (0)
;
815 if_rxr_init(&sc->sc_arq_ring, 2, IAVF_AQ_NUM256 - 1);
816 sc->sc_arq_cons = 0;
817 sc->sc_arq_prod = 0;
818
819 if (iavf_dmamem_alloc(sc, &sc->sc_arq,
820 sizeof(struct iavf_aq_desc) * IAVF_AQ_NUM256, IAVF_AQ_ALIGN64) != 0) {
821 printf("\n" "%s: unable to allocate arq\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
822 goto free_atq;
823 }
824
825 if (!iavf_arq_fill(sc, 0)) {
826 printf("\n" "%s: unable to fill arq descriptors\n",
827 DEVNAME(sc)((sc)->sc_dev.dv_xname));
828 goto free_arq;
829 }
830 timeout_set(&sc->sc_admin_timeout, iavf_arq_timeout, sc);
831
832 if (iavf_dmamem_alloc(sc, &sc->sc_scratch, PAGE_SIZE(1 << 12), IAVF_AQ_ALIGN64) != 0) {
833 printf("\n" "%s: unable to allocate scratch\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
834 goto shutdown;
835 }
836
837 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)->
ixm_size)), (0x01|0x04))
838 0, IAVF_DMA_LEN(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)->
ixm_size)), (0x01|0x04))
839 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)->
ixm_size)), (0x01|0x04))
;
840
841 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_arq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)->
ixm_size)), (0x01|0x04))
842 0, IAVF_DMA_LEN(&sc->sc_arq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)->
ixm_size)), (0x01|0x04))
843 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)->
ixm_size)), (0x01|0x04))
;
844
845 iavf_init_admin_queue(sc);
846
847 if (iavf_get_version(sc) != 0) {
848 printf(", unable to get VF interface version\n");
849 goto free_scratch;
850 }
851
852 if (iavf_get_vf_resources(sc) != 0) {
853 printf(", timed out waiting for VF resources\n");
854 goto free_scratch;
855 }
856
857 if (iavf_config_irq_map(sc) != 0) {
858 printf(", timeout waiting for IRQ map response");
859 goto free_scratch;
860 }
861
862 /* msix only? */
863 if (pci_intr_map_msix(pa, 0, &sc->sc_ih) != 0) {
864 printf(", unable to map interrupt\n");
865 goto free_scratch;
866 }
867
868 /* generate an address if the pf didn't give us one */
869 memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN)__builtin_memcpy((sc->sc_enaddr), (sc->sc_ac.ac_enaddr)
, (6))
;
870 if (memcmp(sc->sc_ac.ac_enaddr, etheranyaddr, ETHER_ADDR_LEN)__builtin_memcmp((sc->sc_ac.ac_enaddr), (etheranyaddr), (6
))
== 0)
871 ether_fakeaddr(ifp);
872
873 printf(", %s, address %s\n", pci_intr_string(sc->sc_pc, sc->sc_ih),
874 ether_sprintf(sc->sc_ac.ac_enaddr));
875
876 sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih,
877 IPL_NET0x4 | IPL_MPSAFE0x100, iavf_intr, sc, DEVNAME(sc)((sc)->sc_dev.dv_xname));
878 if (sc->sc_ihc == NULL((void *)0)) {
879 printf("%s: unable to establish interrupt handler\n",
880 DEVNAME(sc)((sc)->sc_dev.dv_xname));
881 goto free_scratch;
882 }
883
884 ifp->if_softc = sc;
885 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
886 ifp->if_xflags = IFXF_MPSAFE0x1;
887 ifp->if_ioctl = iavf_ioctl;
888 ifp->if_qstart = iavf_start;
889 ifp->if_watchdog = iavf_watchdog;
890 if (ifp->if_hardmtu == 0)
891 ifp->if_hardmtu = IAVF_HARDMTU9712;
892 strlcpy(ifp->if_xname, DEVNAME(sc)((sc)->sc_dev.dv_xname), IFNAMSIZ16);
893 ifq_init_maxlen(&ifp->if_snd, sc->sc_tx_ring_ndescs);
894
895 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010 | IFCAP_VLAN_HWTAGGING0x00000020;
896#if 0
897 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_CSUM_IPv40x00000001 | IFCAP_CSUM_TCPv40x00000002 |
898 IFCAP_CSUM_UDPv40x00000004;
899#endif
900
901 ifmedia_init(&sc->sc_media, 0, iavf_media_change, iavf_media_status);
902
903 ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0));
904 ifmedia_set(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL);
905
906 if_attach(ifp);
907 ether_ifattach(ifp);
908
909 if_attach_queues(ifp, iavf_nqueues(sc)(1 << (sc)->sc_nqueues));
910 if_attach_iqueues(ifp, iavf_nqueues(sc)(1 << (sc)->sc_nqueues));
911
912 iavf_intr_enable(sc)((((sc))->sc_memt)->write_4((((sc))->sc_memh), ((0x00005C00
)), ((((0x1) << (0)) | ((0x1) << (1)) | (0x3 <<
3))))); ((((sc))->sc_memt)->write_4((((sc))->sc_memh
), ((0x00005000)), ((((0x1) << (30))))))
;
913
914 return;
915free_scratch:
916 iavf_dmamem_free(sc, &sc->sc_scratch);
917shutdown:
918 iavf_wr(sc, sc->sc_aq_regs->atq_head, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->atq_head)), ((0))))
;
919 iavf_wr(sc, sc->sc_aq_regs->arq_head, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->arq_head)), ((0))))
;
920 iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->atq_tail)), ((0))))
;
921 iavf_wr(sc, sc->sc_aq_regs->arq_tail, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->arq_tail)), ((0))))
;
922
923 iavf_wr(sc, sc->sc_aq_regs->atq_bal, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->atq_bal)), ((0))))
;
924 iavf_wr(sc, sc->sc_aq_regs->atq_bah, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->atq_bah)), ((0))))
;
925 iavf_wr(sc, sc->sc_aq_regs->atq_len, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->atq_len)), ((0))))
;
926
927 iavf_wr(sc, sc->sc_aq_regs->arq_bal, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->arq_bal)), ((0))))
;
928 iavf_wr(sc, sc->sc_aq_regs->arq_bah, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->arq_bah)), ((0))))
;
929 iavf_wr(sc, sc->sc_aq_regs->arq_len, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->arq_len)), ((0))))
;
930
931 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_arq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)->
ixm_size)), (0x02))
932 0, IAVF_DMA_LEN(&sc->sc_arq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)->
ixm_size)), (0x02))
933 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)->
ixm_size)), (0x02))
;
934 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)->
ixm_size)), (0x02|0x08))
935 0, IAVF_DMA_LEN(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)->
ixm_size)), (0x02|0x08))
936 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)->
ixm_size)), (0x02|0x08))
;
937
938 iavf_arq_unfill(sc);
939free_arq:
940 iavf_dmamem_free(sc, &sc->sc_arq);
941free_atq:
942 iavf_dmamem_free(sc, &sc->sc_atq);
943unmap:
944 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
945 sc->sc_mems = 0;
946}
947
948static int
949iavf_media_change(struct ifnet *ifp)
950{
951 return (EOPNOTSUPP45);
952}
953
954static void
955iavf_media_status(struct ifnet *ifp, struct ifmediareq *ifm)
956{
957 struct iavf_softc *sc = ifp->if_softc;
958
959 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
960
961 ifm->ifm_status = sc->sc_media_status;
962 ifm->ifm_active = sc->sc_media_active;
963}
964
965static void
966iavf_watchdog(struct ifnet *ifp)
967{
968
969}
970
971int
972iavf_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
973{
974 struct iavf_softc *sc = (struct iavf_softc *)ifp->if_softc;
975 struct ifreq *ifr = (struct ifreq *)data;
976 uint8_t addrhi[ETHER_ADDR_LEN6], addrlo[ETHER_ADDR_LEN6];
977 int /*aqerror,*/ error = 0;
978
979 switch (cmd) {
980 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
981 ifp->if_flags |= IFF_UP0x1;
982 /* FALLTHROUGH */
983
984 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
985 if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) {
986 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
987 error = ENETRESET52;
988 else
989 error = iavf_up(sc);
990 } else {
991 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
992 error = iavf_down(sc);
993 }
994 break;
995
996 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
997 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
998 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
999 break;
1000
1001 case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((170)))
:
1002 error = iavf_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data);
1003 break;
1004
1005 case SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((49)))
:
1006 if (ether_addmulti(ifr, &sc->sc_ac) == ENETRESET52) {
1007 error = ether_multiaddr(&ifr->ifr_addrifr_ifru.ifru_addr, addrlo, addrhi);
1008 if (error != 0)
1009 return (error);
1010
1011 iavf_add_del_addr(sc, addrlo, 1);
1012 /* check result i guess? */
1013
1014 if (sc->sc_ac.ac_multirangecnt > 0) {
1015 SET(ifp->if_flags, IFF_ALLMULTI)((ifp->if_flags) |= (0x200));
1016 error = ENETRESET52;
1017 }
1018 }
1019 break;
1020
1021 case SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((50)))
:
1022 if (ether_delmulti(ifr, &sc->sc_ac) == ENETRESET52) {
1023 error = ether_multiaddr(&ifr->ifr_addrifr_ifru.ifru_addr, addrlo, addrhi);
1024 if (error != 0)
1025 return (error);
1026
1027 iavf_add_del_addr(sc, addrlo, 0);
1028
1029 if (ISSET(ifp->if_flags, IFF_ALLMULTI)((ifp->if_flags) & (0x200)) &&
1030 sc->sc_ac.ac_multirangecnt == 0) {
1031 CLR(ifp->if_flags, IFF_ALLMULTI)((ifp->if_flags) &= ~(0x200));
1032 error = ENETRESET52;
1033 }
1034 }
1035 break;
1036
1037 default:
1038 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1039 break;
1040 }
1041
1042 if (error == ENETRESET52)
1043 error = iavf_iff(sc);
1044
1045 return (error);
1046}
1047
1048static int
1049iavf_config_vsi_queues(struct iavf_softc *sc)
1050{
1051 struct ifnet *ifp = &sc->sc_ac.ac_if;
1052 struct iavf_aq_desc iaq;
1053 struct iavf_vc_queue_config_info *config;
1054 struct iavf_vc_txq_info *txq;
1055 struct iavf_vc_rxq_info *rxq;
1056 struct iavf_rx_ring *rxr;
1057 struct iavf_tx_ring *txr;
1058 int rv, i;
1059
1060 memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq)));
1061 iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD)((__uint16_t)((1U << 12) | (1U << 10)));
1062 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF)((__uint16_t)(0x0801));
1063 iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_CONFIG_VSI_QUEUES)((__uint32_t)(6));
1064 iaq.iaq_datalen = htole16(sizeof(*config) +((__uint16_t)(sizeof(*config) + (1 << (sc)->sc_nqueues
) * sizeof(struct iavf_vc_queue_pair_info)))
1065 iavf_nqueues(sc) * sizeof(struct iavf_vc_queue_pair_info))((__uint16_t)(sizeof(*config) + (1 << (sc)->sc_nqueues
) * sizeof(struct iavf_vc_queue_pair_info)))
;
1066 iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch)((&sc->sc_scratch)->ixm_map->dm_segs[0].ds_addr));
1067
1068 config = IAVF_DMA_KVA(&sc->sc_scratch)((void *)(&sc->sc_scratch)->ixm_kva);
1069 config->vsi_id = htole16(sc->sc_vsi_id)((__uint16_t)(sc->sc_vsi_id));
1070 config->num_queue_pairs = htole16(iavf_nqueues(sc))((__uint16_t)((1 << (sc)->sc_nqueues)));
1071
1072 for (i = 0; i < iavf_nqueues(sc)(1 << (sc)->sc_nqueues); i++) {
1073 rxr = ifp->if_iqs[i]->ifiq_softc_ifiq_ptr._ifiq_softc;
1074 txr = ifp->if_ifqs[i]->ifq_softc_ifq_ptr._ifq_softc;
1075
1076 txq = &config->qpair[i].txq;
1077 txq->vsi_id = htole16(sc->sc_vsi_id)((__uint16_t)(sc->sc_vsi_id));
1078 txq->queue_id = htole16(i)((__uint16_t)(i));
1079 txq->ring_len = sc->sc_tx_ring_ndescs;
1080 txq->headwb_ena = 0;
1081 htolem64(&txq->dma_ring_addr, IAVF_DMA_DVA(&txr->txr_mem))(*(__uint64_t *)(&txq->dma_ring_addr) = ((__uint64_t)(
((&txr->txr_mem)->ixm_map->dm_segs[0].ds_addr)))
)
;
1082 txq->dma_headwb_addr = 0;
1083
1084 rxq = &config->qpair[i].rxq;
1085 rxq->vsi_id = htole16(sc->sc_vsi_id)((__uint16_t)(sc->sc_vsi_id));
1086 rxq->queue_id = htole16(i)((__uint16_t)(i));
1087 rxq->ring_len = sc->sc_rx_ring_ndescs;
1088 rxq->splithdr_ena = 0;
1089 rxq->databuf_size = htole32(MCLBYTES)((__uint32_t)((1 << 11)));
1090 rxq->max_pkt_size = htole32(IAVF_HARDMTU)((__uint32_t)(9712));
1091 htolem64(&rxq->dma_ring_addr, IAVF_DMA_DVA(&rxr->rxr_mem))(*(__uint64_t *)(&rxq->dma_ring_addr) = ((__uint64_t)(
((&rxr->rxr_mem)->ixm_map->dm_segs[0].ds_addr)))
)
;
1092 rxq->rx_split_pos = 0;
1093 }
1094
1095 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_scratch)->ixm_map)), (0), (((&sc->sc_scratch
)->ixm_size)), (0x01))
1096 IAVF_DMA_LEN(&sc->sc_scratch),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_scratch)->ixm_map)), (0), (((&sc->sc_scratch
)->ixm_size)), (0x01))
1097 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_scratch)->ixm_map)), (0), (((&sc->sc_scratch
)->ixm_size)), (0x01))
;
1098
1099 iavf_atq_post(sc, &iaq);
1100 rv = iavf_arq_wait(sc, 250);
1101 if (rv != IAVF_VC_RC_SUCCESS0) {
1102 printf("%s: CONFIG_VSI_QUEUES failed: %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), rv);
1103 return (1);
1104 }
1105
1106 return (0);
1107}
1108
1109static int
1110iavf_config_hena(struct iavf_softc *sc)
1111{
1112 struct iavf_aq_desc iaq;
1113 uint64_t *caps;
1114 int rv;
1115
1116 memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq)));
1117 iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD)((__uint16_t)((1U << 12) | (1U << 10)));
1118 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF)((__uint16_t)(0x0801));
1119 iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_SET_RSS_HENA)((__uint32_t)(26));
1120 iaq.iaq_datalen = htole32(sizeof(*caps))((__uint32_t)(sizeof(*caps)));
1121 iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch)((&sc->sc_scratch)->ixm_map->dm_segs[0].ds_addr));
1122
1123 caps = IAVF_DMA_KVA(&sc->sc_scratch)((void *)(&sc->sc_scratch)->ixm_kva);
1124 *caps = 0;
1125
1126 iavf_atq_post(sc, &iaq);
1127 rv = iavf_arq_wait(sc, 250);
1128 if (rv != IAVF_VC_RC_SUCCESS0) {
1129 printf("%s: SET_RSS_HENA failed: %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), rv);
1130 return (1);
1131 }
1132
1133 caps = IAVF_DMA_KVA(&sc->sc_scratch)((void *)(&sc->sc_scratch)->ixm_kva);
1134
1135 return (0);
1136}
1137
1138static int
1139iavf_queue_select(struct iavf_softc *sc, int opcode)
1140{
1141 struct iavf_aq_desc iaq;
1142 struct iavf_vc_queue_select *qsel;
1143 int rv;
1144
1145 memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq)));
1146 iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD)((__uint16_t)((1U << 12) | (1U << 10)));
1147 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF)((__uint16_t)(0x0801));
1148 iaq.iaq_vc_opcode = htole32(opcode)((__uint32_t)(opcode));
1149 iaq.iaq_datalen = htole16(sizeof(*qsel))((__uint16_t)(sizeof(*qsel)));
1150 iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch)((&sc->sc_scratch)->ixm_map->dm_segs[0].ds_addr));
1151
1152 qsel = IAVF_DMA_KVA(&sc->sc_scratch)((void *)(&sc->sc_scratch)->ixm_kva);
1153 qsel->vsi_id = htole16(sc->sc_vsi_id)((__uint16_t)(sc->sc_vsi_id));
1154 qsel->rx_queues = htole32(iavf_allqueues(sc))((__uint32_t)(((1 << ((sc)->sc_nqueues+1)) - 1)));
1155 qsel->tx_queues = htole32(iavf_allqueues(sc))((__uint32_t)(((1 << ((sc)->sc_nqueues+1)) - 1)));
1156
1157 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_scratch)->ixm_map)), (0), (((&sc->sc_scratch
)->ixm_size)), (0x01))
1158 IAVF_DMA_LEN(&sc->sc_scratch),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_scratch)->ixm_map)), (0), (((&sc->sc_scratch
)->ixm_size)), (0x01))
1159 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_scratch)->ixm_map)), (0), (((&sc->sc_scratch
)->ixm_size)), (0x01))
;
1160
1161 iavf_atq_post(sc, &iaq);
1162 rv = iavf_arq_wait(sc, 250);
1163 if (rv != IAVF_VC_RC_SUCCESS0) {
1164 printf("%s: queue op %d failed: %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), opcode, rv);
1165 return (1);
1166 }
1167
1168 return (0);
1169}
1170
1171static int
1172iavf_up(struct iavf_softc *sc)
1173{
1174 struct ifnet *ifp = &sc->sc_ac.ac_if;
1175 struct iavf_rx_ring *rxr;
1176 struct iavf_tx_ring *txr;
1177 unsigned int nqueues, i;
1178 int rv = ENOMEM12;
1179
1180 nqueues = iavf_nqueues(sc)(1 << (sc)->sc_nqueues);
1181 KASSERT(nqueues == 1)((nqueues == 1) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iavf.c"
, 1181, "nqueues == 1"))
; /* XXX */
1182
1183 rw_enter_write(&sc->sc_cfg_lock);
1184 if (sc->sc_dead) {
1185 rw_exit_write(&sc->sc_cfg_lock);
1186 return (ENXIO6);
1187 }
1188
1189 for (i = 0; i < nqueues; i++) {
1190 rxr = iavf_rxr_alloc(sc, i);
1191 if (rxr == NULL((void *)0))
1192 goto free;
1193
1194 txr = iavf_txr_alloc(sc, i);
1195 if (txr == NULL((void *)0)) {
1196 iavf_rxr_free(sc, rxr);
1197 goto free;
1198 }
1199
1200 ifp->if_iqs[i]->ifiq_softc_ifiq_ptr._ifiq_softc = rxr;
1201 ifp->if_ifqs[i]->ifq_softc_ifq_ptr._ifq_softc = txr;
1202
1203 iavf_rxfill(sc, rxr);
1204 }
1205
1206 if (iavf_config_vsi_queues(sc) != 0)
1207 goto down;
1208
1209 if (iavf_config_hena(sc) != 0)
1210 goto down;
1211
1212 if (iavf_queue_select(sc, IAVF_VC_OP_ENABLE_QUEUES8) != 0)
1213 goto down;
1214
1215 SET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) |= (0x40));
1216
1217 iavf_wr(sc, I40E_VFINT_ITR01(0), 0x7a)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00004C00
+ ((0) * 4)))), ((0x7a))))
;
1218 iavf_wr(sc, I40E_VFINT_ITR01(1), 0x7a)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00004C00
+ ((1) * 4)))), ((0x7a))))
;
1219 iavf_wr(sc, I40E_VFINT_ITR01(2), 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00004C00
+ ((2) * 4)))), ((0))))
;
1220
1221 rw_exit_write(&sc->sc_cfg_lock);
1222
1223 return (ENETRESET52);
1224
1225free:
1226 for (i = 0; i < nqueues; i++) {
1227 rxr = ifp->if_iqs[i]->ifiq_softc_ifiq_ptr._ifiq_softc;
1228 txr = ifp->if_ifqs[i]->ifq_softc_ifq_ptr._ifq_softc;
1229
1230 if (rxr == NULL((void *)0)) {
1231 /*
1232 * tx and rx get set at the same time, so if one
1233 * is NULL, the other is too.
1234 */
1235 continue;
1236 }
1237
1238 iavf_txr_free(sc, txr);
1239 iavf_rxr_free(sc, rxr);
1240 }
1241 rw_exit_write(&sc->sc_cfg_lock);
1242 return (rv);
1243down:
1244 rw_exit_write(&sc->sc_cfg_lock);
1245 iavf_down(sc);
1246 return (ETIMEDOUT60);
1247}
1248
1249static int
1250iavf_config_promisc_mode(struct iavf_softc *sc, int unicast, int multicast)
1251{
1252 struct iavf_aq_desc iaq;
1253 struct iavf_vc_promisc_info *promisc;
1254 int rv, flags;
1255
1256 memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq)));
1257 iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD)((__uint16_t)((1U << 12) | (1U << 10)));
1258 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF)((__uint16_t)(0x0801));
1259 iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_CONFIG_PROMISC)((__uint32_t)(14));
1260 iaq.iaq_datalen = htole16(sizeof(*promisc))((__uint16_t)(sizeof(*promisc)));
1261 iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch)((&sc->sc_scratch)->ixm_map->dm_segs[0].ds_addr));
1262
1263 flags = 0;
1264 if (unicast)
1265 flags |= IAVF_FLAG_VF_UNICAST_PROMISC0x0001;
1266 if (multicast)
1267 flags |= IAVF_FLAG_VF_MULTICAST_PROMISC0x0002;
1268
1269 promisc = IAVF_DMA_KVA(&sc->sc_scratch)((void *)(&sc->sc_scratch)->ixm_kva);
1270 promisc->vsi_id = htole16(sc->sc_vsi_id)((__uint16_t)(sc->sc_vsi_id));
1271 promisc->flags = htole16(flags)((__uint16_t)(flags));
1272
1273 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_scratch)->ixm_map)), (0), (((&sc->sc_scratch
)->ixm_size)), (0x01))
1274 IAVF_DMA_LEN(&sc->sc_scratch),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_scratch)->ixm_map)), (0), (((&sc->sc_scratch
)->ixm_size)), (0x01))
1275 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_scratch)->ixm_map)), (0), (((&sc->sc_scratch
)->ixm_size)), (0x01))
;
1276
1277 iavf_atq_post(sc, &iaq);
1278 rv = iavf_arq_wait(sc, 250);
1279 if (rv != IAVF_VC_RC_SUCCESS0) {
1280 printf("%s: CONFIG_PROMISC_MODE failed: %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), rv);
1281 return (1);
1282 }
1283
1284 return (0);
1285}
1286
1287static int
1288iavf_add_del_addr(struct iavf_softc *sc, uint8_t *addr, int add)
1289{
1290 struct iavf_aq_desc iaq;
1291 struct iavf_vc_eth_addr_list *addrs;
1292 struct iavf_vc_eth_addr *vcaddr;
1293 int rv;
1294
1295 memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq)));
1296 iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD)((__uint16_t)((1U << 12) | (1U << 10)));
1297 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF)((__uint16_t)(0x0801));
1298 if (add)
1299 iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_ADD_ETH_ADDR)((__uint32_t)(10));
1300 else
1301 iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_DEL_ETH_ADDR)((__uint32_t)(11));
1302 iaq.iaq_datalen = htole16(sizeof(*addrs) + sizeof(*vcaddr))((__uint16_t)(sizeof(*addrs) + sizeof(*vcaddr)));
1303 iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch)((&sc->sc_scratch)->ixm_map->dm_segs[0].ds_addr));
1304
1305 addrs = IAVF_DMA_KVA(&sc->sc_scratch)((void *)(&sc->sc_scratch)->ixm_kva);
1306 addrs->vsi_id = htole16(sc->sc_vsi_id)((__uint16_t)(sc->sc_vsi_id));
1307 addrs->num_elements = htole16(1)((__uint16_t)(1));
1308
1309 vcaddr = addrs->list;
1310 memcpy(vcaddr->addr, addr, ETHER_ADDR_LEN)__builtin_memcpy((vcaddr->addr), (addr), (6));
1311
1312 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_scratch)->ixm_map)), (0), (((&sc->sc_scratch
)->ixm_size)), (0x01))
1313 IAVF_DMA_LEN(&sc->sc_scratch),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_scratch)->ixm_map)), (0), (((&sc->sc_scratch
)->ixm_size)), (0x01))
1314 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_scratch)->ixm_map)), (0), (((&sc->sc_scratch
)->ixm_size)), (0x01))
;
1315
1316 iavf_atq_post(sc, &iaq);
1317 rv = iavf_arq_wait(sc, 250);
1318 if (rv != IAVF_VC_RC_SUCCESS0) {
1319 printf("%s: ADD/DEL_ETH_ADDR failed: %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), rv);
1320 return (1);
1321 }
1322
1323 return (0);
1324}
1325
1326static int
1327iavf_iff(struct iavf_softc *sc)
1328{
1329 struct ifnet *ifp = &sc->sc_ac.ac_if;
1330 int unicast, multicast;
1331
1332 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
1333 return (0);
1334
1335 rw_enter_write(&sc->sc_cfg_lock);
1336
1337 unicast = 0;
1338 multicast = 0;
1339 if (ISSET(ifp->if_flags, IFF_PROMISC)((ifp->if_flags) & (0x100))) {
1340 unicast = 1;
1341 multicast = 1;
1342 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)((ifp->if_flags) & (0x200))) {
1343 multicast = 1;
1344 }
1345 iavf_config_promisc_mode(sc, unicast, multicast);
1346
1347 if (memcmp(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN)__builtin_memcmp((sc->sc_enaddr), (sc->sc_ac.ac_enaddr)
, (6))
!= 0) {
1348 if (memcmp(sc->sc_enaddr, etheranyaddr, ETHER_ADDR_LEN)__builtin_memcmp((sc->sc_enaddr), (etheranyaddr), (6)) != 0)
1349 iavf_add_del_addr(sc, sc->sc_enaddr, 0);
1350 memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN)__builtin_memcpy((sc->sc_enaddr), (sc->sc_ac.ac_enaddr)
, (6))
;
1351 iavf_add_del_addr(sc, sc->sc_enaddr, 1);
1352 }
1353
1354 rw_exit_write(&sc->sc_cfg_lock);
1355 return (0);
1356}
1357
1358static int
1359iavf_down(struct iavf_softc *sc)
1360{
1361 struct ifnet *ifp = &sc->sc_ac.ac_if;
1362 struct iavf_rx_ring *rxr;
1363 struct iavf_tx_ring *txr;
1364 unsigned int nqueues, i;
1365 uint32_t reg;
1366 int error = 0;
1367
1368 nqueues = iavf_nqueues(sc)(1 << (sc)->sc_nqueues);
1369
1370 rw_enter_write(&sc->sc_cfg_lock);
1371
1372 CLR(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) &= ~(0x40));
1373
1374 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1375
1376 if (sc->sc_resetting == 0) {
1377 /* disable queues */
1378 if (iavf_queue_select(sc, IAVF_VC_OP_DISABLE_QUEUES9) != 0)
1379 goto die;
1380 }
1381
1382 /* mask interrupts */
1383 reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x00005C00
))))
;
1384 reg |= I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK((0x1) << (31)) |
1385 (IAVF_NOITR0x3 << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT3);
1386 iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00005C00
)), ((reg))))
;
1387
1388 /* make sure no hw generated work is still in flight */
1389 intr_barrier(sc->sc_ihc);
1390 for (i = 0; i < nqueues; i++) {
1391 rxr = ifp->if_iqs[i]->ifiq_softc_ifiq_ptr._ifiq_softc;
1392 txr = ifp->if_ifqs[i]->ifq_softc_ifq_ptr._ifq_softc;
1393
1394 ifq_barrier(ifp->if_ifqs[i]);
1395
1396 timeout_del_barrier(&rxr->rxr_refill);
1397 }
1398
1399 for (i = 0; i < nqueues; i++) {
1400 rxr = ifp->if_iqs[i]->ifiq_softc_ifiq_ptr._ifiq_softc;
1401 txr = ifp->if_ifqs[i]->ifq_softc_ifq_ptr._ifq_softc;
1402
1403 iavf_txr_clean(sc, txr);
1404 iavf_rxr_clean(sc, rxr);
1405
1406 iavf_txr_free(sc, txr);
1407 iavf_rxr_free(sc, rxr);
1408
1409 ifp->if_iqs[i]->ifiq_softc_ifiq_ptr._ifiq_softc = NULL((void *)0);
1410 ifp->if_ifqs[i]->ifq_softc_ifq_ptr._ifq_softc = NULL((void *)0);
1411 }
1412
1413 /* unmask */
1414 reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x00005C00
))))
;
1415 reg |= (IAVF_NOITR0x3 << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT3);
1416 iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00005C00
)), ((reg))))
;
1417
1418out:
1419 rw_exit_write(&sc->sc_cfg_lock);
1420 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1421 return (error);
1422die:
1423 sc->sc_dead = 1;
1424 log(LOG_CRIT2, "%s: failed to shut down rings", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1425 error = ETIMEDOUT60;
1426 goto out;
1427}
1428
1429static void
1430iavf_reset(void *xsc)
1431{
1432 struct iavf_softc *sc = xsc;
1433 struct ifnet *ifp = &sc->sc_ac.ac_if;
1434 int tries, up, link_state;
1435
1436 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1437
1438 /* treat the reset as a loss of link */
1439 link_state = ifp->if_link_stateif_data.ifi_link_state;
1440 if (ifp->if_link_stateif_data.ifi_link_state != LINK_STATE_DOWN2) {
1441 ifp->if_link_stateif_data.ifi_link_state = LINK_STATE_DOWN2;
1442 if_link_state_change(ifp);
1443 }
1444
1445 up = 0;
1446 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
1447 iavf_down(sc);
1448 up = 1;
1449 }
1450
1451 rw_enter_write(&sc->sc_cfg_lock);
1452
1453 sc->sc_major_ver = UINT_MAX0xffffffffU;
1454 sc->sc_minor_ver = UINT_MAX0xffffffffU;
1455 sc->sc_got_vf_resources = 0;
1456 sc->sc_got_irq_map = 0;
1457
1458 for (tries = 0; tries < 100; tries++) {
1459 uint32_t reg;
1460 reg = iavf_rd(sc, I40E_VFGEN_RSTAT)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x00008800
))))
&
1461 I40E_VFGEN_RSTAT_VFR_STATE_MASK((0x3) << (0));
1462 if (reg == IAVF_VFR_VFACTIVE2 ||
1463 reg == IAVF_VFR_COMPLETED1)
1464 break;
1465
1466 delay(10000)(*delay_func)(10000);
1467 }
1468 if (tries == 100) {
1469 printf("%s: VF reset timed out\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1470 goto failed;
1471 }
1472
1473 iavf_arq_unfill(sc);
1474 sc->sc_arq_cons = 0;
1475 sc->sc_arq_prod = 0;
1476 if (!iavf_arq_fill(sc, 0)) {
1477 printf("\n" "%s: unable to fill arq descriptors\n",
1478 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1479 goto failed;
1480 }
1481
1482 iavf_init_admin_queue(sc);
1483
1484 if (iavf_get_version(sc) != 0) {
1485 printf("%s: unable to get VF interface version\n",
1486 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1487 goto failed;
1488 }
1489
1490 if (iavf_get_vf_resources(sc) != 0) {
1491 printf("%s: timed out waiting for VF resources\n",
1492 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1493 goto failed;
1494 }
1495
1496 if (iavf_config_irq_map(sc) != 0) {
1497 printf("%s: timed out configuring IRQ map\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1498 goto failed;
1499 }
1500
1501 /* do we need to re-add mac addresses here? */
1502
1503 sc->sc_resetting = 0;
1504 iavf_intr_enable(sc)((((sc))->sc_memt)->write_4((((sc))->sc_memh), ((0x00005C00
)), ((((0x1) << (0)) | ((0x1) << (1)) | (0x3 <<
3))))); ((((sc))->sc_memt)->write_4((((sc))->sc_memh
), ((0x00005000)), ((((0x1) << (30))))))
;
1505 rw_exit_write(&sc->sc_cfg_lock);
1506
1507 /* the PF-assigned MAC address might have changed */
1508 if ((memcmp(sc->sc_ac.ac_enaddr, etheranyaddr, ETHER_ADDR_LEN)__builtin_memcmp((sc->sc_ac.ac_enaddr), (etheranyaddr), (6
))
!= 0) &&
1509 (memcmp(sc->sc_ac.ac_enaddr, sc->sc_enaddr, ETHER_ADDR_LEN)__builtin_memcmp((sc->sc_ac.ac_enaddr), (sc->sc_enaddr)
, (6))
!= 0)) {
1510 memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN)__builtin_memcpy((sc->sc_enaddr), (sc->sc_ac.ac_enaddr)
, (6))
;
1511 if_setlladdr(ifp, sc->sc_ac.ac_enaddr);
1512 ifnewlladdr(ifp);
1513 }
1514
1515 /* restore link state */
1516 if (link_state != LINK_STATE_DOWN2) {
1517 ifp->if_link_stateif_data.ifi_link_state = link_state;
1518 if_link_state_change(ifp);
1519 }
1520
1521 if (up) {
1522 int i;
1523
1524 iavf_up(sc);
1525
1526 for (i = 0; i < iavf_nqueues(sc)(1 << (sc)->sc_nqueues); i++) {
1527 if (ifq_is_oactive(ifp->if_ifqs[i]))
1528 ifq_restart(ifp->if_ifqs[i]);
1529 }
1530 }
1531
1532 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1533 return;
1534failed:
1535 sc->sc_dead = 1;
1536 sc->sc_resetting = 0;
1537 rw_exit_write(&sc->sc_cfg_lock);
1538 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1539}
1540
1541static struct iavf_tx_ring *
1542iavf_txr_alloc(struct iavf_softc *sc, unsigned int qid)
1543{
1544 struct iavf_tx_ring *txr;
1545 struct iavf_tx_map *maps, *txm;
1546 unsigned int i;
1547
1548 txr = malloc(sizeof(*txr), M_DEVBUF2, M_WAITOK0x0001|M_CANFAIL0x0004);
1549 if (txr == NULL((void *)0))
1550 return (NULL((void *)0));
1551
1552 maps = mallocarray(sizeof(*maps),
1553 sc->sc_tx_ring_ndescs, M_DEVBUF2, M_WAITOK0x0001|M_CANFAIL0x0004|M_ZERO0x0008);
1554 if (maps == NULL((void *)0))
1555 goto free;
1556
1557 if (iavf_dmamem_alloc(sc, &txr->txr_mem,
1558 sizeof(struct iavf_tx_desc) * sc->sc_tx_ring_ndescs,
1559 IAVF_TX_QUEUE_ALIGN128) != 0)
1560 goto freemap;
1561
1562 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
1563 txm = &maps[i];
1564
1565 if (bus_dmamap_create(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9712
), (8), (9712), (0), (0x0000 | 0x0002 | 0x2000), (&txm->
txm_map))
1566 IAVF_HARDMTU, IAVF_TX_PKT_DESCS, IAVF_HARDMTU, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9712
), (8), (9712), (0), (0x0000 | 0x0002 | 0x2000), (&txm->
txm_map))
1567 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9712
), (8), (9712), (0), (0x0000 | 0x0002 | 0x2000), (&txm->
txm_map))
1568 &txm->txm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9712
), (8), (9712), (0), (0x0000 | 0x0002 | 0x2000), (&txm->
txm_map))
!= 0)
1569 goto uncreate;
1570
1571 txm->txm_eop = -1;
1572 txm->txm_m = NULL((void *)0);
1573 }
1574
1575 txr->txr_cons = txr->txr_prod = 0;
1576 txr->txr_maps = maps;
1577
1578 txr->txr_tail = I40E_QTX_TAIL1(qid)(0x00000000 + ((qid) * 4));
1579 txr->txr_qid = qid;
1580
1581 return (txr);
1582
1583uncreate:
1584 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
1585 txm = &maps[i];
1586
1587 if (txm->txm_map == NULL((void *)0))
1588 continue;
1589
1590 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (txm
->txm_map))
;
1591 }
1592
1593 iavf_dmamem_free(sc, &txr->txr_mem);
1594freemap:
1595 free(maps, M_DEVBUF2, sizeof(*maps) * sc->sc_tx_ring_ndescs);
1596free:
1597 free(txr, M_DEVBUF2, sizeof(*txr));
1598 return (NULL((void *)0));
1599}
1600
1601static void
1602iavf_txr_clean(struct iavf_softc *sc, struct iavf_tx_ring *txr)
1603{
1604 struct iavf_tx_map *maps, *txm;
1605 bus_dmamap_t map;
1606 unsigned int i;
1607
1608 maps = txr->txr_maps;
1609 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
1610 txm = &maps[i];
1611
1612 if (txm->txm_m == NULL((void *)0))
1613 continue;
1614
1615 map = txm->txm_map;
1616 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x08))
1617 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x08))
;
1618 bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map
))
;
1619
1620 m_freem(txm->txm_m);
1621 txm->txm_m = NULL((void *)0);
1622 }
1623}
1624
1625static void
1626iavf_txr_free(struct iavf_softc *sc, struct iavf_tx_ring *txr)
1627{
1628 struct iavf_tx_map *maps, *txm;
1629 unsigned int i;
1630
1631 maps = txr->txr_maps;
1632 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
1633 txm = &maps[i];
1634
1635 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (txm
->txm_map))
;
1636 }
1637
1638 iavf_dmamem_free(sc, &txr->txr_mem);
1639 free(maps, M_DEVBUF2, sizeof(*maps) * sc->sc_tx_ring_ndescs);
1640 free(txr, M_DEVBUF2, sizeof(*txr));
1641}
1642
1643static inline int
1644iavf_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
1645{
1646 int error;
1647
1648 error = bus_dmamap_load_mbuf(dmat, map, m,(*(dmat)->_dmamap_load_mbuf)((dmat), (map), (m), (0x0100 |
0x0001))
1649 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)(*(dmat)->_dmamap_load_mbuf)((dmat), (map), (m), (0x0100 |
0x0001))
;
1650 if (error != EFBIG27)
1651 return (error);
1652
1653 error = m_defrag(m, M_DONTWAIT0x0002);
1654 if (error != 0)
1655 return (error);
1656
1657 return (bus_dmamap_load_mbuf(dmat, map, m,(*(dmat)->_dmamap_load_mbuf)((dmat), (map), (m), (0x0100 |
0x0001))
1658 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)(*(dmat)->_dmamap_load_mbuf)((dmat), (map), (m), (0x0100 |
0x0001))
);
1659}
1660
1661static void
1662iavf_start(struct ifqueue *ifq)
1663{
1664 struct ifnet *ifp = ifq->ifq_if;
1665 struct iavf_softc *sc = ifp->if_softc;
1666 struct iavf_tx_ring *txr = ifq->ifq_softc_ifq_ptr._ifq_softc;
1667 struct iavf_tx_desc *ring, *txd;
1668 struct iavf_tx_map *txm;
1669 bus_dmamap_t map;
1670 struct mbuf *m;
1671 uint64_t cmd;
1
'cmd' declared without an initial value
1672 uint64_t vlan_cmd;
1673 unsigned int prod, free, last, i;
1674 unsigned int mask;
1675 int post = 0;
1676#if NBPFILTER1 > 0
1677 caddr_t if_bpf;
1678#endif
1679
1680 if (!LINK_STATE_IS_UP(ifp->if_link_state)((ifp->if_data.ifi_link_state) >= 4 || (ifp->if_data
.ifi_link_state) == 0)
) {
2
Assuming field 'ifi_link_state' is >= 4
3
Taking false branch
1681 ifq_purge(ifq);
1682 return;
1683 }
1684
1685 prod = txr->txr_prod;
1686 free = txr->txr_cons;
1687 if (free <= prod)
4
Assuming 'free' is > 'prod'
5
Taking false branch
1688 free += sc->sc_tx_ring_ndescs;
1689 free -= prod;
1690
1691 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&txr->txr_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
txr->txr_mem)->ixm_map)), (0), (((&txr->txr_mem)
->ixm_size)), (0x08))
1692 0, IAVF_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
txr->txr_mem)->ixm_map)), (0), (((&txr->txr_mem)
->ixm_size)), (0x08))
;
1693
1694 ring = IAVF_DMA_KVA(&txr->txr_mem)((void *)(&txr->txr_mem)->ixm_kva);
1695 mask = sc->sc_tx_ring_ndescs - 1;
1696
1697 for (;;) {
6
Loop condition is true. Entering loop body
1698 if (free <= IAVF_TX_PKT_DESCS8) {
7
Assuming 'free' is > IAVF_TX_PKT_DESCS
8
Taking false branch
1699 ifq_set_oactive(ifq);
1700 break;
1701 }
1702
1703 m = ifq_dequeue(ifq);
1704 if (m == NULL((void *)0))
9
Assuming 'm' is not equal to NULL
10
Taking false branch
1705 break;
1706
1707 txm = &txr->txr_maps[prod];
1708 map = txm->txm_map;
1709
1710 if (iavf_load_mbuf(sc->sc_dmat, map, m) != 0) {
11
Assuming the condition is false
12
Taking false branch
1711 ifq->ifq_errors++;
1712 m_freem(m);
1713 continue;
1714 }
1715
1716 bus_dmamap_sync(sc->sc_dmat, map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x04))
1717 map->dm_mapsize, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x04))
;
1718
1719 vlan_cmd = 0;
1720 if (m->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020) {
13
Assuming the condition is false
14
Taking false branch
1721 vlan_cmd = IAVF_TX_DESC_CMD_IL2TAG1(0x008 << 4) |
1722 (((uint64_t)m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag) <<
1723 IAVF_TX_DESC_L2TAG1_SHIFT48);
1724 }
1725
1726 for (i = 0; i < map->dm_nsegs; i++) {
15
Assuming 'i' is >= field 'dm_nsegs'
16
Loop condition is false. Execution continues on line 1742
1727 txd = &ring[prod];
1728
1729 cmd = (uint64_t)map->dm_segs[i].ds_len <<
1730 IAVF_TX_DESC_BSIZE_SHIFT34;
1731 cmd |= IAVF_TX_DESC_DTYPE_DATA(0x0ULL << 0) | IAVF_TX_DESC_CMD_ICRC(0x004 << 4) |
1732 vlan_cmd;
1733
1734 htolem64(&txd->addr, map->dm_segs[i].ds_addr)(*(__uint64_t *)(&txd->addr) = ((__uint64_t)(map->dm_segs
[i].ds_addr)))
;
1735 htolem64(&txd->cmd, cmd)(*(__uint64_t *)(&txd->cmd) = ((__uint64_t)(cmd)));
1736
1737 last = prod;
1738
1739 prod++;
1740 prod &= mask;
1741 }
1742 cmd |= IAVF_TX_DESC_CMD_EOP(0x001 << 4) | IAVF_TX_DESC_CMD_RS(0x002 << 4);
17
The left expression of the compound assignment is an uninitialized value. The computed value will also be garbage
1743 htolem64(&txd->cmd, cmd)(*(__uint64_t *)(&txd->cmd) = ((__uint64_t)(cmd)));
1744
1745 txm->txm_m = m;
1746 txm->txm_eop = last;
1747
1748#if NBPFILTER1 > 0
1749 if_bpf = ifp->if_bpf;
1750 if (if_bpf)
1751 bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
1752#endif
1753
1754 free -= i;
1755 post = 1;
1756 }
1757
1758 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&txr->txr_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
txr->txr_mem)->ixm_map)), (0), (((&txr->txr_mem)
->ixm_size)), (0x04))
1759 0, IAVF_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
txr->txr_mem)->ixm_map)), (0), (((&txr->txr_mem)
->ixm_size)), (0x04))
;
1760
1761 if (post) {
1762 txr->txr_prod = prod;
1763 iavf_wr(sc, txr->txr_tail, prod)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((txr->
txr_tail)), ((prod))))
;
1764 }
1765}
1766
1767static int
1768iavf_txeof(struct iavf_softc *sc, struct ifqueue *ifq)
1769{
1770 struct iavf_tx_ring *txr = ifq->ifq_softc_ifq_ptr._ifq_softc;
1771 struct iavf_tx_desc *ring, *txd;
1772 struct iavf_tx_map *txm;
1773 bus_dmamap_t map;
1774 unsigned int cons, prod, last;
1775 unsigned int mask;
1776 uint64_t dtype;
1777 int done = 0;
1778
1779 prod = txr->txr_prod;
1780 cons = txr->txr_cons;
1781
1782 if (cons == prod)
1783 return (0);
1784
1785 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&txr->txr_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
txr->txr_mem)->ixm_map)), (0), (((&txr->txr_mem)
->ixm_size)), (0x02))
1786 0, IAVF_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
txr->txr_mem)->ixm_map)), (0), (((&txr->txr_mem)
->ixm_size)), (0x02))
;
1787
1788 ring = IAVF_DMA_KVA(&txr->txr_mem)((void *)(&txr->txr_mem)->ixm_kva);
1789 mask = sc->sc_tx_ring_ndescs - 1;
1790
1791 do {
1792 txm = &txr->txr_maps[cons];
1793 last = txm->txm_eop;
1794 txd = &ring[last];
1795
1796 dtype = txd->cmd & htole64(IAVF_TX_DESC_DTYPE_MASK)((__uint64_t)((0xfULL << 0)));
1797 if (dtype != htole64(IAVF_TX_DESC_DTYPE_DONE)((__uint64_t)((0xfULL << 0))))
1798 break;
1799
1800 map = txm->txm_map;
1801
1802 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x08))
1803 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x08))
;
1804 bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map
))
;
1805 m_freem(txm->txm_m);
1806
1807 txm->txm_m = NULL((void *)0);
1808 txm->txm_eop = -1;
1809
1810 cons = last + 1;
1811 cons &= mask;
1812
1813 done = 1;
1814 } while (cons != prod);
1815
1816 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&txr->txr_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
txr->txr_mem)->ixm_map)), (0), (((&txr->txr_mem)
->ixm_size)), (0x01))
1817 0, IAVF_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
txr->txr_mem)->ixm_map)), (0), (((&txr->txr_mem)
->ixm_size)), (0x01))
;
1818
1819 txr->txr_cons = cons;
1820
1821 //ixl_enable(sc, txr->txr_msix);
1822
1823 if (ifq_is_oactive(ifq))
1824 ifq_restart(ifq);
1825
1826 return (done);
1827}
1828
1829static struct iavf_rx_ring *
1830iavf_rxr_alloc(struct iavf_softc *sc, unsigned int qid)
1831{
1832 struct iavf_rx_ring *rxr;
1833 struct iavf_rx_map *maps, *rxm;
1834 unsigned int i;
1835
1836 rxr = malloc(sizeof(*rxr), M_DEVBUF2, M_WAITOK0x0001|M_CANFAIL0x0004);
1837 if (rxr == NULL((void *)0))
1838 return (NULL((void *)0));
1839
1840 maps = mallocarray(sizeof(*maps),
1841 sc->sc_rx_ring_ndescs, M_DEVBUF2, M_WAITOK0x0001|M_CANFAIL0x0004|M_ZERO0x0008);
1842 if (maps == NULL((void *)0))
1843 goto free;
1844
1845 if (iavf_dmamem_alloc(sc, &rxr->rxr_mem,
1846 sizeof(struct iavf_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
1847 IAVF_RX_QUEUE_ALIGN128) != 0)
1848 goto freemap;
1849
1850 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
1851 rxm = &maps[i];
1852
1853 if (bus_dmamap_create(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9712
), (1), (9712), (0), (0x0000 | 0x0002 | 0x2000), (&rxm->
rxm_map))
1854 IAVF_HARDMTU, 1, IAVF_HARDMTU, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9712
), (1), (9712), (0), (0x0000 | 0x0002 | 0x2000), (&rxm->
rxm_map))
1855 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9712
), (1), (9712), (0), (0x0000 | 0x0002 | 0x2000), (&rxm->
rxm_map))
1856 &rxm->rxm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9712
), (1), (9712), (0), (0x0000 | 0x0002 | 0x2000), (&rxm->
rxm_map))
!= 0)
1857 goto uncreate;
1858
1859 rxm->rxm_m = NULL((void *)0);
1860 }
1861
1862 rxr->rxr_sc = sc;
1863 if_rxr_init(&rxr->rxr_acct, 17, sc->sc_rx_ring_ndescs - 1);
1864 timeout_set(&rxr->rxr_refill, iavf_rxrefill, rxr);
1865 rxr->rxr_cons = rxr->rxr_prod = 0;
1866 rxr->rxr_m_head = NULL((void *)0);
1867 rxr->rxr_m_tail = &rxr->rxr_m_head;
1868 rxr->rxr_maps = maps;
1869
1870 rxr->rxr_tail = I40E_QRX_TAIL1(qid)(0x00002000 + ((qid) * 4));
1871 rxr->rxr_qid = qid;
1872
1873 return (rxr);
1874
1875uncreate:
1876 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
1877 rxm = &maps[i];
1878
1879 if (rxm->rxm_map == NULL((void *)0))
1880 continue;
1881
1882 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (rxm
->rxm_map))
;
1883 }
1884
1885 iavf_dmamem_free(sc, &rxr->rxr_mem);
1886freemap:
1887 free(maps, M_DEVBUF2, sizeof(*maps) * sc->sc_rx_ring_ndescs);
1888free:
1889 free(rxr, M_DEVBUF2, sizeof(*rxr));
1890 return (NULL((void *)0));
1891}
1892
1893static void
1894iavf_rxr_clean(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
1895{
1896 struct iavf_rx_map *maps, *rxm;
1897 bus_dmamap_t map;
1898 unsigned int i;
1899
1900 timeout_del_barrier(&rxr->rxr_refill);
1901
1902 maps = rxr->rxr_maps;
1903 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
1904 rxm = &maps[i];
1905
1906 if (rxm->rxm_m == NULL((void *)0))
1907 continue;
1908
1909 map = rxm->rxm_map;
1910 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x08))
1911 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x08))
;
1912 bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map
))
;
1913
1914 m_freem(rxm->rxm_m);
1915 rxm->rxm_m = NULL((void *)0);
1916 }
1917
1918 m_freem(rxr->rxr_m_head);
1919 rxr->rxr_m_head = NULL((void *)0);
1920 rxr->rxr_m_tail = &rxr->rxr_m_head;
1921
1922 rxr->rxr_prod = rxr->rxr_cons = 0;
1923}
1924
1925static void
1926iavf_rxr_free(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
1927{
1928 struct iavf_rx_map *maps, *rxm;
1929 unsigned int i;
1930
1931 maps = rxr->rxr_maps;
1932 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
1933 rxm = &maps[i];
1934
1935 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (rxm
->rxm_map))
;
1936 }
1937
1938 iavf_dmamem_free(sc, &rxr->rxr_mem);
1939 free(maps, M_DEVBUF2, sizeof(*maps) * sc->sc_rx_ring_ndescs);
1940 free(rxr, M_DEVBUF2, sizeof(*rxr));
1941}
1942
1943static int
1944iavf_rxeof(struct iavf_softc *sc, struct ifiqueue *ifiq)
1945{
1946 struct iavf_rx_ring *rxr = ifiq->ifiq_softc_ifiq_ptr._ifiq_softc;
1947 struct ifnet *ifp = &sc->sc_ac.ac_if;
1948 struct iavf_rx_wb_desc_32 *ring, *rxd;
1949 struct iavf_rx_map *rxm;
1950 bus_dmamap_t map;
1951 unsigned int cons, prod;
1952 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
1953 struct mbuf *m;
1954 uint64_t word;
1955 uint16_t vlan;
1956 unsigned int len;
1957 unsigned int mask;
1958 int done = 0;
1959
1960 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
1961 return (0);
1962
1963 prod = rxr->rxr_prod;
1964 cons = rxr->rxr_cons;
1965
1966 if (cons == prod)
1967 return (0);
1968
1969 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&rxr->rxr_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
rxr->rxr_mem)->ixm_map)), (0), (((&rxr->rxr_mem)
->ixm_size)), (0x02|0x08))
1970 0, IAVF_DMA_LEN(&rxr->rxr_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
rxr->rxr_mem)->ixm_map)), (0), (((&rxr->rxr_mem)
->ixm_size)), (0x02|0x08))
1971 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
rxr->rxr_mem)->ixm_map)), (0), (((&rxr->rxr_mem)
->ixm_size)), (0x02|0x08))
;
1972
1973 ring = IAVF_DMA_KVA(&rxr->rxr_mem)((void *)(&rxr->rxr_mem)->ixm_kva);
1974 mask = sc->sc_rx_ring_ndescs - 1;
1975
1976 do {
1977 rxd = &ring[cons];
1978
1979 word = lemtoh64(&rxd->qword1)((__uint64_t)(*(__uint64_t *)(&rxd->qword1)));
1980 if (!ISSET(word, IAVF_RX_DESC_DD)((word) & ((1 << 0))))
1981 break;
1982
1983 if_rxr_put(&rxr->rxr_acct, 1)do { (&rxr->rxr_acct)->rxr_alive -= (1); } while (0
)
;
1984
1985 rxm = &rxr->rxr_maps[cons];
1986
1987 map = rxm->rxm_map;
1988 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x02))
1989 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x02))
;
1990 bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map
))
;
1991
1992 m = rxm->rxm_m;
1993 rxm->rxm_m = NULL((void *)0);
1994
1995 len = (word & IAVF_RX_DESC_PLEN_MASK(0x3fffULL << 38)) >> IAVF_RX_DESC_PLEN_SHIFT38;
1996 m->m_lenm_hdr.mh_len = len;
1997 m->m_pkthdrM_dat.MH.MH_pkthdr.len = 0;
1998
1999 m->m_nextm_hdr.mh_next = NULL((void *)0);
2000 *rxr->rxr_m_tail = m;
2001 rxr->rxr_m_tail = &m->m_nextm_hdr.mh_next;
2002
2003 m = rxr->rxr_m_head;
2004 m->m_pkthdrM_dat.MH.MH_pkthdr.len += len;
2005
2006 if (ISSET(word, IAVF_RX_DESC_EOP)((word) & ((1 << 1)))) {
2007 if (ISSET(word, IAVF_RX_DESC_L2TAG1P)((word) & ((1 << 2)))) {
2008 vlan = (lemtoh64(&rxd->qword0)((__uint64_t)(*(__uint64_t *)(&rxd->qword0))) &
2009 IAVF_RX_DESC_L2TAG1_MASK(0xffff << 16))
2010 >> IAVF_RX_DESC_L2TAG1_SHIFT16;
2011 m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag = vlan;
2012 m->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020;
2013 }
2014 if (!ISSET(word,((word) & ((1 << 19) | (1 << 25)))
2015 IAVF_RX_DESC_RXE | IAVF_RX_DESC_OVERSIZE)((word) & ((1 << 19) | (1 << 25)))) {
2016 ml_enqueue(&ml, m);
2017 } else {
2018 ifp->if_ierrorsif_data.ifi_ierrors++; /* XXX */
2019 m_freem(m);
2020 }
2021
2022 rxr->rxr_m_head = NULL((void *)0);
2023 rxr->rxr_m_tail = &rxr->rxr_m_head;
2024 }
2025
2026 cons++;
2027 cons &= mask;
2028
2029 done = 1;
2030 } while (cons != prod);
2031
2032 if (done) {
2033 rxr->rxr_cons = cons;
2034 if (ifiq_input(ifiq, &ml))
2035 if_rxr_livelocked(&rxr->rxr_acct);
2036 iavf_rxfill(sc, rxr);
2037 }
2038
2039 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&rxr->rxr_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
rxr->rxr_mem)->ixm_map)), (0), (((&rxr->rxr_mem)
->ixm_size)), (0x01|0x04))
2040 0, IAVF_DMA_LEN(&rxr->rxr_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
rxr->rxr_mem)->ixm_map)), (0), (((&rxr->rxr_mem)
->ixm_size)), (0x01|0x04))
2041 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
rxr->rxr_mem)->ixm_map)), (0), (((&rxr->rxr_mem)
->ixm_size)), (0x01|0x04))
;
2042
2043 return (done);
2044}
2045
2046static void
2047iavf_rxfill(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2048{
2049 struct iavf_rx_rd_desc_32 *ring, *rxd;
2050 struct iavf_rx_map *rxm;
2051 bus_dmamap_t map;
2052 struct mbuf *m;
2053 unsigned int prod;
2054 unsigned int slots;
2055 unsigned int mask;
2056 int post = 0;
2057
2058 slots = if_rxr_get(&rxr->rxr_acct, sc->sc_rx_ring_ndescs);
2059 if (slots == 0)
2060 return;
2061
2062 prod = rxr->rxr_prod;
2063
2064 ring = IAVF_DMA_KVA(&rxr->rxr_mem)((void *)(&rxr->rxr_mem)->ixm_kva);
2065 mask = sc->sc_rx_ring_ndescs - 1;
2066
2067 do {
2068 rxm = &rxr->rxr_maps[prod];
2069
2070 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES + ETHER_ALIGN)m_clget((((void *)0)), (0x0002), ((1 << 11) + 2));
2071 if (m == NULL((void *)0))
2072 break;
2073 m->m_datam_hdr.mh_data += (m->m_extM_dat.MH.MH_dat.MH_ext.ext_size - (MCLBYTES(1 << 11) + ETHER_ALIGN2));
2074 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = MCLBYTES(1 << 11) + ETHER_ALIGN2;
2075
2076 map = rxm->rxm_map;
2077
2078 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
map), (m), (0x0001))
2079 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
map), (m), (0x0001))
!= 0) {
2080 m_freem(m);
2081 break;
2082 }
2083
2084 rxm->rxm_m = m;
2085
2086 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x01))
2087 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x01))
;
2088
2089 rxd = &ring[prod];
2090
2091 htolem64(&rxd->paddr, map->dm_segs[0].ds_addr)(*(__uint64_t *)(&rxd->paddr) = ((__uint64_t)(map->
dm_segs[0].ds_addr)))
;
2092 rxd->haddr = htole64(0)((__uint64_t)(0));
2093
2094 prod++;
2095 prod &= mask;
2096
2097 post = 1;
2098 } while (--slots);
2099
2100 if_rxr_put(&rxr->rxr_acct, slots)do { (&rxr->rxr_acct)->rxr_alive -= (slots); } while
(0)
;
2101
2102 if (if_rxr_inuse(&rxr->rxr_acct)((&rxr->rxr_acct)->rxr_alive) == 0)
2103 timeout_add(&rxr->rxr_refill, 1);
2104 else if (post) {
2105 rxr->rxr_prod = prod;
2106 iavf_wr(sc, rxr->rxr_tail, prod)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((rxr->
rxr_tail)), ((prod))))
;
2107 }
2108}
2109
2110void
2111iavf_rxrefill(void *arg)
2112{
2113 struct iavf_rx_ring *rxr = arg;
2114 struct iavf_softc *sc = rxr->rxr_sc;
2115
2116 iavf_rxfill(sc, rxr);
2117}
2118
2119static int
2120iavf_rxrinfo(struct iavf_softc *sc, struct if_rxrinfo *ifri)
2121{
2122 struct ifnet *ifp = &sc->sc_ac.ac_if;
2123 struct if_rxring_info *ifr;
2124 struct iavf_rx_ring *ring;
2125 int i, rv;
2126
2127 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2128 return (ENOTTY25);
2129
2130 ifr = mallocarray(sizeof(*ifr), iavf_nqueues(sc)(1 << (sc)->sc_nqueues), M_TEMP127,
2131 M_WAITOK0x0001|M_CANFAIL0x0004|M_ZERO0x0008);
2132 if (ifr == NULL((void *)0))
2133 return (ENOMEM12);
2134
2135 for (i = 0; i < iavf_nqueues(sc)(1 << (sc)->sc_nqueues); i++) {
2136 ring = ifp->if_iqs[i]->ifiq_softc_ifiq_ptr._ifiq_softc;
2137 ifr[i].ifr_size = MCLBYTES(1 << 11);
2138 ifr[i].ifr_info = ring->rxr_acct;
2139 }
2140
2141 rv = if_rxr_info_ioctl(ifri, iavf_nqueues(sc)(1 << (sc)->sc_nqueues), ifr);
2142 free(ifr, M_TEMP127, iavf_nqueues(sc)(1 << (sc)->sc_nqueues) * sizeof(*ifr));
2143
2144 return (rv);
2145}
2146
2147static int
2148iavf_intr(void *xsc)
2149{
2150 struct iavf_softc *sc = xsc;
2151 struct ifnet *ifp = &sc->sc_ac.ac_if;
2152 uint32_t icr, ena;
2153 int i, rv = 0;
2154
2155 ena = iavf_rd(sc, I40E_VFINT_ICR0_ENA1)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x00005000
))))
;
2156 iavf_intr_enable(sc)((((sc))->sc_memt)->write_4((((sc))->sc_memh), ((0x00005C00
)), ((((0x1) << (0)) | ((0x1) << (1)) | (0x3 <<
3))))); ((((sc))->sc_memt)->write_4((((sc))->sc_memh
), ((0x00005000)), ((((0x1) << (30))))))
;
2157 icr = iavf_rd(sc, I40E_VFINT_ICR01)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x00004800
))))
;
2158
2159 if (icr == IAVF_REG_VFR0xdeadbeef) {
2160 printf("%s: VF reset in progress\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2161 sc->sc_resetting = 1;
2162 task_add(systq, &sc->sc_reset_task);
2163 return (1);
2164 }
2165
2166 if (ISSET(icr, I40E_VFINT_ICR01_ADMINQ_MASK)((icr) & (((0x1) << (30))))) {
2167 iavf_atq_done(sc);
2168 iavf_process_arq(sc, 0);
2169 rv = 1;
2170 }
2171
2172 if (ISSET(icr, I40E_VFINT_ICR01_QUEUE_0_MASK)((icr) & (((0x1) << (1))))) {
2173 for (i = 0; i < iavf_nqueues(sc)(1 << (sc)->sc_nqueues); i++) {
2174 rv |= iavf_rxeof(sc, ifp->if_iqs[i]);
2175 rv |= iavf_txeof(sc, ifp->if_ifqs[i]);
2176 }
2177 }
2178
2179 return (rv);
2180}
2181
2182static void
2183iavf_process_vf_resources(struct iavf_softc *sc, struct iavf_aq_desc *desc,
2184 struct iavf_aq_buf *buf)
2185{
2186 struct ifnet *ifp = &sc->sc_ac.ac_if;
2187 struct iavf_vc_vf_resource *vf_res;
2188 struct iavf_vc_vsi_resource *vsi_res;
2189 int mtu;
2190
2191 sc->sc_got_vf_resources = 1;
2192
2193 vf_res = buf->aqb_data;
2194 if (letoh16(vf_res->num_vsis)((__uint16_t)(vf_res->num_vsis)) == 0) {
2195 printf(", no VSI available\n");
2196 /* set vsi number to something */
2197 return;
2198 }
2199
2200 mtu = letoh16(vf_res->max_mtu)((__uint16_t)(vf_res->max_mtu));
2201 if (mtu != 0)
2202 ifp->if_hardmtu = MIN(IAVF_HARDMTU, mtu)(((9712)<(mtu))?(9712):(mtu));
2203
2204 /* limit vectors to what we got here? */
2205
2206 /* just take the first vsi */
2207 vsi_res = &vf_res->vsi_res[0];
2208 sc->sc_vsi_id = letoh16(vsi_res->vsi_id)((__uint16_t)(vsi_res->vsi_id));
2209 sc->sc_qset_handle = letoh16(vsi_res->qset_handle)((__uint16_t)(vsi_res->qset_handle));
2210 /* limit number of queues to what we got here */
2211 /* is vsi type interesting? */
2212
2213 sc->sc_vf_id = letoh32(desc->iaq_param[0])((__uint32_t)(desc->iaq_param[0]));
2214
2215 memcpy(sc->sc_ac.ac_enaddr, vsi_res->default_mac, ETHER_ADDR_LEN)__builtin_memcpy((sc->sc_ac.ac_enaddr), (vsi_res->default_mac
), (6))
;
2216
2217 if (sc->sc_resetting == 0)
2218 printf(", VF %d VSI %d", sc->sc_vf_id, sc->sc_vsi_id);
2219}
2220
2221static const struct iavf_link_speed *
2222iavf_find_link_speed(struct iavf_softc *sc, uint32_t link_speed)
2223{
2224 int i;
2225 for (i = 0; i < nitems(iavf_link_speeds)(sizeof((iavf_link_speeds)) / sizeof((iavf_link_speeds)[0])); i++) {
2226 if (link_speed & (1 << i))
2227 return (&iavf_link_speeds[i]);
2228 }
2229
2230 return (NULL((void *)0));
2231}
2232
2233static void
2234iavf_process_vc_event(struct iavf_softc *sc, struct iavf_aq_desc *desc,
2235 struct iavf_aq_buf *buf)
2236{
2237 struct iavf_vc_pf_event *event;
2238 struct ifnet *ifp = &sc->sc_ac.ac_if;
2239 const struct iavf_link_speed *speed;
2240 int link;
2241
2242 event = buf->aqb_data;
2243 switch (event->event) {
2244 case IAVF_VC_EVENT_LINK_CHANGE1:
2245 sc->sc_media_status = IFM_AVALID0x0000000000000001ULL;
2246 sc->sc_media_active = IFM_ETHER0x0000000000000100ULL;
2247 link = LINK_STATE_DOWN2;
2248 if (event->link_status) {
2249 link = LINK_STATE_UP4;
2250 sc->sc_media_status |= IFM_ACTIVE0x0000000000000002ULL;
2251
2252 ifp->if_baudrateif_data.ifi_baudrate = 0;
2253 speed = iavf_find_link_speed(sc, event->link_speed);
2254 if (speed != NULL((void *)0)) {
2255 sc->sc_media_active |= speed->media;
2256 ifp->if_baudrateif_data.ifi_baudrate = speed->baudrate;
2257 }
2258 }
2259
2260 if (ifp->if_link_stateif_data.ifi_link_state != link) {
2261 ifp->if_link_stateif_data.ifi_link_state = link;
2262 if_link_state_change(ifp);
2263 }
2264 break;
2265
2266 default:
2267 break;
2268 }
2269}
2270
2271static void
2272iavf_process_irq_map(struct iavf_softc *sc, struct iavf_aq_desc *desc)
2273{
2274 if (letoh32(desc->iaq_vc_retval)((__uint32_t)(desc->iaq_vc_retval)) != IAVF_VC_RC_SUCCESS0) {
2275 printf("config irq map failed: %d\n", letoh32(desc->iaq_vc_retval)((__uint32_t)(desc->iaq_vc_retval)));
2276 }
2277 sc->sc_got_irq_map = 1;
2278}
2279
2280static void
2281iavf_init_admin_queue(struct iavf_softc *sc)
2282{
2283 iavf_wr(sc, sc->sc_aq_regs->atq_head, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->atq_head)), ((0))))
;
2284 iavf_wr(sc, sc->sc_aq_regs->arq_head, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->arq_head)), ((0))))
;
2285 iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->atq_tail)), ((0))))
;
2286
2287 iavf_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE)bus_space_barrier((sc)->sc_memt, (sc)->sc_memh, (0), (sc
->sc_mems), (0x02))
;
2288
2289 iavf_wr(sc, sc->sc_aq_regs->atq_bal,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->atq_bal)), (((uint32_t)((&sc->sc_atq)->
ixm_map->dm_segs[0].ds_addr)))))
2290 iavf_dmamem_lo(&sc->sc_atq))(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->atq_bal)), (((uint32_t)((&sc->sc_atq)->
ixm_map->dm_segs[0].ds_addr)))))
;
2291 iavf_wr(sc, sc->sc_aq_regs->atq_bah,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->atq_bah)), (((uint32_t)(((&sc->sc_atq)->
ixm_map->dm_segs[0].ds_addr) >> 32)))))
2292 iavf_dmamem_hi(&sc->sc_atq))(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->atq_bah)), (((uint32_t)(((&sc->sc_atq)->
ixm_map->dm_segs[0].ds_addr) >> 32)))))
;
2293 iavf_wr(sc, sc->sc_aq_regs->atq_len,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->atq_len)), ((sc->sc_aq_regs->atq_len_enable
| 256))))
2294 sc->sc_aq_regs->atq_len_enable | IAVF_AQ_NUM)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->atq_len)), ((sc->sc_aq_regs->atq_len_enable
| 256))))
;
2295
2296 iavf_wr(sc, sc->sc_aq_regs->arq_bal,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->arq_bal)), (((uint32_t)((&sc->sc_arq)->
ixm_map->dm_segs[0].ds_addr)))))
2297 iavf_dmamem_lo(&sc->sc_arq))(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->arq_bal)), (((uint32_t)((&sc->sc_arq)->
ixm_map->dm_segs[0].ds_addr)))))
;
2298 iavf_wr(sc, sc->sc_aq_regs->arq_bah,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->arq_bah)), (((uint32_t)(((&sc->sc_arq)->
ixm_map->dm_segs[0].ds_addr) >> 32)))))
2299 iavf_dmamem_hi(&sc->sc_arq))(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->arq_bah)), (((uint32_t)(((&sc->sc_arq)->
ixm_map->dm_segs[0].ds_addr) >> 32)))))
;
2300 iavf_wr(sc, sc->sc_aq_regs->arq_len,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->arq_len)), ((sc->sc_aq_regs->arq_len_enable
| 256))))
2301 sc->sc_aq_regs->arq_len_enable | IAVF_AQ_NUM)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->arq_len)), ((sc->sc_aq_regs->arq_len_enable
| 256))))
;
2302
2303 iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->arq_tail)), ((sc->sc_arq_prod))))
;
2304}
2305
2306static int
2307iavf_process_arq(struct iavf_softc *sc, int fill)
2308{
2309 struct iavf_aq_desc *arq, *iaq;
2310 struct iavf_aq_buf *aqb;
2311 struct iavf_vc_version_info *ver;
2312 unsigned int cons = sc->sc_arq_cons;
2313 unsigned int prod;
2314 int done = 0;
2315
2316 prod = iavf_rd(sc, sc->sc_aq_regs->arq_head)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((sc->sc_aq_regs
->arq_head))))
&
2317 sc->sc_aq_regs->arq_head_mask;
2318
2319 if (cons == prod)
2320 return (0);
2321
2322 arq = IAVF_DMA_KVA(&sc->sc_arq)((void *)(&sc->sc_arq)->ixm_kva);
2323
2324 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_arq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)->
ixm_size)), (0x02|0x08))
2325 0, IAVF_DMA_LEN(&sc->sc_arq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)->
ixm_size)), (0x02|0x08))
2326 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)->
ixm_size)), (0x02|0x08))
;
2327
2328 do {
2329 iaq = &arq[cons];
2330
2331 aqb = SIMPLEQ_FIRST(&sc->sc_arq_live)((&sc->sc_arq_live)->sqh_first);
2332 SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry)do { if (((&sc->sc_arq_live)->sqh_first = (&sc->
sc_arq_live)->sqh_first->aqb_entry.sqe_next) == ((void *
)0)) (&sc->sc_arq_live)->sqh_last = &(&sc->
sc_arq_live)->sqh_first; } while (0)
;
2333 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IAVF_AQ_BUFLEN,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (aqb->
aqb_map), (0), (4096), (0x02))
2334 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (aqb->
aqb_map), (0), (4096), (0x02))
;
2335
2336 switch (letoh32(iaq->iaq_vc_opcode)((__uint32_t)(iaq->iaq_vc_opcode))) {
2337 case IAVF_VC_OP_VERSION1:
2338 ver = aqb->aqb_data;
2339 sc->sc_major_ver = letoh32(ver->major)((__uint32_t)(ver->major));
2340 sc->sc_minor_ver = letoh32(ver->minor)((__uint32_t)(ver->minor));
2341 break;
2342
2343 case IAVF_VC_OP_GET_VF_RESOURCES3:
2344 iavf_process_vf_resources(sc, iaq, aqb);
2345 break;
2346
2347 case IAVF_VC_OP_EVENT17:
2348 iavf_process_vc_event(sc, iaq, aqb);
2349 break;
2350
2351 case IAVF_VC_OP_CONFIG_IRQ_MAP7:
2352 iavf_process_irq_map(sc, iaq);
2353 break;
2354
2355 case IAVF_VC_OP_CONFIG_TX_QUEUE4:
2356 case IAVF_VC_OP_CONFIG_RX_QUEUE5:
2357 case IAVF_VC_OP_CONFIG_VSI_QUEUES6:
2358 case IAVF_VC_OP_ENABLE_QUEUES8:
2359 case IAVF_VC_OP_DISABLE_QUEUES9:
2360 case IAVF_VC_OP_GET_RSS_HENA_CAPS25:
2361 case IAVF_VC_OP_SET_RSS_HENA26:
2362 case IAVF_VC_OP_ADD_ETH_ADDR10:
2363 case IAVF_VC_OP_DEL_ETH_ADDR11:
2364 case IAVF_VC_OP_CONFIG_PROMISC14:
2365 sc->sc_admin_result = letoh32(iaq->iaq_vc_retval)((__uint32_t)(iaq->iaq_vc_retval));
2366 cond_signal(&sc->sc_admin_cond);
2367 break;
2368 }
2369
2370 memset(iaq, 0, sizeof(*iaq))__builtin_memset((iaq), (0), (sizeof(*iaq)));
2371 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry)do { (aqb)->aqb_entry.sqe_next = ((void *)0); *(&sc->
sc_arq_idle)->sqh_last = (aqb); (&sc->sc_arq_idle)->
sqh_last = &(aqb)->aqb_entry.sqe_next; } while (0)
;
2372 if_rxr_put(&sc->sc_arq_ring, 1)do { (&sc->sc_arq_ring)->rxr_alive -= (1); } while (
0)
;
2373
2374 cons++;
2375 cons &= IAVF_AQ_MASK(256 - 1);
2376
2377 done = 1;
2378 } while (cons != prod);
2379
2380 if (fill)
2381 iavf_arq_fill(sc, 1);
2382
2383 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_arq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)->
ixm_size)), (0x01|0x04))
2384 0, IAVF_DMA_LEN(&sc->sc_arq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)->
ixm_size)), (0x01|0x04))
2385 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)->
ixm_size)), (0x01|0x04))
;
2386
2387 sc->sc_arq_cons = cons;
2388 return (done);
2389}
2390
2391static void
2392iavf_atq_done(struct iavf_softc *sc)
2393{
2394 struct iavf_aq_desc *atq, *slot;
2395 unsigned int cons;
2396 unsigned int prod;
2397
2398 prod = sc->sc_atq_prod;
2399 cons = sc->sc_atq_cons;
2400
2401 if (prod == cons)
2402 return;
2403
2404 atq = IAVF_DMA_KVA(&sc->sc_atq)((void *)(&sc->sc_atq)->ixm_kva);
2405
2406 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)->
ixm_size)), (0x02|0x08))
2407 0, IAVF_DMA_LEN(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)->
ixm_size)), (0x02|0x08))
2408 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)->
ixm_size)), (0x02|0x08))
;
2409
2410 do {
2411 slot = &atq[cons];
2412 if (!ISSET(slot->iaq_flags, htole16(IAVF_AQ_DD))((slot->iaq_flags) & (((__uint16_t)((1U << 0))))
)
)
2413 break;
2414
2415 memset(slot, 0, sizeof(*slot))__builtin_memset((slot), (0), (sizeof(*slot)));
2416
2417 cons++;
2418 cons &= IAVF_AQ_MASK(256 - 1);
2419 } while (cons != prod);
2420
2421 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)->
ixm_size)), (0x01|0x04))
2422 0, IAVF_DMA_LEN(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)->
ixm_size)), (0x01|0x04))
2423 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)->
ixm_size)), (0x01|0x04))
;
2424
2425 sc->sc_atq_cons = cons;
2426}
2427
2428static int
2429iavf_atq_post(struct iavf_softc *sc, struct iavf_aq_desc *iaq)
2430{
2431 struct iavf_aq_desc *atq, *slot;
2432 unsigned int prod;
2433
2434 atq = IAVF_DMA_KVA(&sc->sc_atq)((void *)(&sc->sc_atq)->ixm_kva);
2435 prod = sc->sc_atq_prod;
2436 slot = atq + prod;
2437
2438 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)->
ixm_size)), (0x08))
2439 0, IAVF_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)->
ixm_size)), (0x08))
;
2440
2441 *slot = *iaq;
2442 slot->iaq_flags |= htole16(IAVF_AQ_SI)((__uint16_t)((1U << 13)));
2443
2444 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)->
ixm_size)), (0x04))
2445 0, IAVF_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)->
ixm_size)), (0x04))
;
2446
2447 prod++;
2448 prod &= IAVF_AQ_MASK(256 - 1);
2449 sc->sc_atq_prod = prod;
2450 iavf_wr(sc, sc->sc_aq_regs->atq_tail, prod)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->atq_tail)), ((prod))))
;
2451 return (prod);
2452}
2453
2454static int
2455iavf_get_version(struct iavf_softc *sc)
2456{
2457 struct iavf_aq_desc iaq;
2458 struct iavf_vc_version_info *ver;
2459 int tries;
2460
2461 memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq)));
2462 iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD)((__uint16_t)((1U << 12) | (1U << 10)));
2463 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF)((__uint16_t)(0x0801));
2464 iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_VERSION)((__uint32_t)(1));
2465 iaq.iaq_datalen = htole16(sizeof(struct iavf_vc_version_info))((__uint16_t)(sizeof(struct iavf_vc_version_info)));
2466 iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch)((&sc->sc_scratch)->ixm_map->dm_segs[0].ds_addr));
2467
2468 ver = IAVF_DMA_KVA(&sc->sc_scratch)((void *)(&sc->sc_scratch)->ixm_kva);
2469 ver->major = htole32(IAVF_VF_MAJOR)((__uint32_t)(1));
2470 ver->minor = htole32(IAVF_VF_MINOR)((__uint32_t)(1));
2471 sc->sc_major_ver = UINT_MAX0xffffffffU;
2472 sc->sc_minor_ver = UINT_MAX0xffffffffU;
2473
2474 membar_sync()do { __asm volatile("mfence" ::: "memory"); } while (0);
2475 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0, IAVF_DMA_LEN(&sc->sc_scratch),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_scratch)->ixm_map)), (0), (((&sc->sc_scratch
)->ixm_size)), (0x01))
2476 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_scratch)->ixm_map)), (0), (((&sc->sc_scratch
)->ixm_size)), (0x01))
;
2477
2478 iavf_atq_post(sc, &iaq);
2479
2480 for (tries = 0; tries < 100; tries++) {
2481 iavf_process_arq(sc, 1);
2482 if (sc->sc_major_ver != -1)
2483 break;
2484
2485 delaymsec(1)(*delay_func)(1000 * (1));
2486 }
2487 if (tries == 100) {
2488 printf(", timeout waiting for VF version");
2489 return (1);
2490 }
2491
2492 if (sc->sc_major_ver != IAVF_VF_MAJOR1) {
2493 printf(", unsupported VF version %d", sc->sc_major_ver);
2494 return (1);
2495 }
2496
2497 if (sc->sc_resetting == 0) {
2498 printf(", VF version %d.%d%s", sc->sc_major_ver,
2499 sc->sc_minor_ver,
2500 (sc->sc_minor_ver > IAVF_VF_MINOR1) ? " (minor mismatch)" : "");
2501 }
2502
2503 return (0);
2504}
2505
2506static int
2507iavf_get_vf_resources(struct iavf_softc *sc)
2508{
2509 struct iavf_aq_desc iaq;
2510 uint32_t *cap;
2511 int tries;
2512
2513 memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq)));
2514 iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD)((__uint16_t)((1U << 12) | (1U << 10)));
2515 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF)((__uint16_t)(0x0801));
2516 iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_GET_VF_RESOURCES)((__uint32_t)(3));
2517 iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch)((&sc->sc_scratch)->ixm_map->dm_segs[0].ds_addr));
2518
2519 if (sc->sc_minor_ver > 0) {
2520 iaq.iaq_datalen = htole16(sizeof(uint32_t))((__uint16_t)(sizeof(uint32_t)));
2521 cap = IAVF_DMA_KVA(&sc->sc_scratch)((void *)(&sc->sc_scratch)->ixm_kva);
2522 *cap = htole32(IAVF_VC_OFFLOAD_L2 | IAVF_VC_OFFLOAD_VLAN |((__uint32_t)(0x00000001 | 0x00010000 | 0x00080000))
2523 IAVF_VC_OFFLOAD_RSS_PF)((__uint32_t)(0x00000001 | 0x00010000 | 0x00080000));
2524 }
2525
2526 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0, IAVF_DMA_LEN(&sc->sc_scratch),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_scratch)->ixm_map)), (0), (((&sc->sc_scratch
)->ixm_size)), (0x01))
2527 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_scratch)->ixm_map)), (0), (((&sc->sc_scratch
)->ixm_size)), (0x01))
;
2528
2529 sc->sc_got_vf_resources = 0;
2530 iavf_atq_post(sc, &iaq);
2531
2532 for (tries = 0; tries < 100; tries++) {
2533 iavf_process_arq(sc, 1);
2534 if (sc->sc_got_vf_resources != 0)
2535 return (0);
2536
2537 delaymsec(1)(*delay_func)(1000 * (1));
2538 }
2539
2540 return (1);
2541}
2542
2543static int
2544iavf_config_irq_map(struct iavf_softc *sc)
2545{
2546 struct iavf_aq_desc iaq;
2547 struct iavf_vc_vector_map *vec;
2548 struct iavf_vc_irq_map_info *map;
2549 int tries;
2550
2551 memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq)));
2552 iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD)((__uint16_t)((1U << 12) | (1U << 10)));
2553 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF)((__uint16_t)(0x0801));
2554 iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_CONFIG_IRQ_MAP)((__uint32_t)(7));
2555 iaq.iaq_datalen = htole16(sizeof(*map) + sizeof(*vec))((__uint16_t)(sizeof(*map) + sizeof(*vec)));
2556 iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch)((&sc->sc_scratch)->ixm_map->dm_segs[0].ds_addr));
2557
2558 map = IAVF_DMA_KVA(&sc->sc_scratch)((void *)(&sc->sc_scratch)->ixm_kva);
2559 map->num_vectors = letoh16(1)((__uint16_t)(1));
2560
2561 vec = map->vecmap;
2562 vec[0].vsi_id = letoh16(sc->sc_vsi_id)((__uint16_t)(sc->sc_vsi_id));
2563 vec[0].vector_id = 0;
2564 vec[0].rxq_map = letoh16(iavf_allqueues(sc))((__uint16_t)(((1 << ((sc)->sc_nqueues+1)) - 1)));
2565 vec[0].txq_map = letoh16(iavf_allqueues(sc))((__uint16_t)(((1 << ((sc)->sc_nqueues+1)) - 1)));
2566 vec[0].rxitr_idx = IAVF_NOITR0x3;
2567 vec[0].txitr_idx = IAVF_NOITR0x3;
2568
2569 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0, IAVF_DMA_LEN(&sc->sc_scratch),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_scratch)->ixm_map)), (0), (((&sc->sc_scratch
)->ixm_size)), (0x01))
2570 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((&
sc->sc_scratch)->ixm_map)), (0), (((&sc->sc_scratch
)->ixm_size)), (0x01))
;
2571
2572 sc->sc_got_irq_map = 0;
2573 iavf_atq_post(sc, &iaq);
2574
2575 for (tries = 0; tries < 100; tries++) {
2576 iavf_process_arq(sc, 1);
2577 if (sc->sc_got_irq_map != 0)
2578 return (0);
2579
2580 delaymsec(1)(*delay_func)(1000 * (1));
2581 }
2582
2583 return (1);
2584}
2585
2586static struct iavf_aq_buf *
2587iavf_aqb_alloc(struct iavf_softc *sc)
2588{
2589 struct iavf_aq_buf *aqb;
2590
2591 aqb = malloc(sizeof(*aqb), M_DEVBUF2, M_WAITOK0x0001);
2592 if (aqb == NULL((void *)0))
2593 return (NULL((void *)0));
2594
2595 aqb->aqb_data = dma_alloc(IAVF_AQ_BUFLEN4096, PR_WAITOK0x0001);
2596 if (aqb->aqb_data == NULL((void *)0))
2597 goto free;
2598
2599 if (bus_dmamap_create(sc->sc_dmat, IAVF_AQ_BUFLEN, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0000 | 0x0002 | 0x2000), (&aqb->
aqb_map))
2600 IAVF_AQ_BUFLEN, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0000 | 0x0002 | 0x2000), (&aqb->
aqb_map))
2601 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0000 | 0x0002 | 0x2000), (&aqb->
aqb_map))
2602 &aqb->aqb_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0000 | 0x0002 | 0x2000), (&aqb->
aqb_map))
!= 0)
2603 goto dma_free;
2604
2605 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (aqb->
aqb_map), (aqb->aqb_data), (4096), (((void *)0)), (0x0000)
)
2606 IAVF_AQ_BUFLEN, NULL, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (aqb->
aqb_map), (aqb->aqb_data), (4096), (((void *)0)), (0x0000)
)
!= 0)
2607 goto destroy;
2608
2609 return (aqb);
2610
2611destroy:
2612 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (aqb
->aqb_map))
;
2613dma_free:
2614 dma_free(aqb->aqb_data, IAVF_AQ_BUFLEN4096);
2615free:
2616 free(aqb, M_DEVBUF2, sizeof(*aqb));
2617
2618 return (NULL((void *)0));
2619}
2620
2621static void
2622iavf_aqb_free(struct iavf_softc *sc, struct iavf_aq_buf *aqb)
2623{
2624 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (aqb
->aqb_map))
;
2625 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (aqb
->aqb_map))
;
2626 dma_free(aqb->aqb_data, IAVF_AQ_BUFLEN4096);
2627 free(aqb, M_DEVBUF2, sizeof(*aqb));
2628}
2629
2630static int
2631iavf_arq_fill(struct iavf_softc *sc, int post)
2632{
2633 struct iavf_aq_buf *aqb;
2634 struct iavf_aq_desc *arq, *iaq;
2635 unsigned int prod = sc->sc_arq_prod;
2636 unsigned int n;
2637 int filled = 0;
2638
2639 n = if_rxr_get(&sc->sc_arq_ring, IAVF_AQ_NUM256);
2640 arq = IAVF_DMA_KVA(&sc->sc_arq)((void *)(&sc->sc_arq)->ixm_kva);
2641
2642 while (n > 0) {
2643 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)((&sc->sc_arq_idle)->sqh_first);
2644 if (aqb != NULL((void *)0))
2645 SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_idle, aqb_entry)do { if (((&sc->sc_arq_idle)->sqh_first = (&sc->
sc_arq_idle)->sqh_first->aqb_entry.sqe_next) == ((void *
)0)) (&sc->sc_arq_idle)->sqh_last = &(&sc->
sc_arq_idle)->sqh_first; } while (0)
;
2646 else if ((aqb = iavf_aqb_alloc(sc)) == NULL((void *)0))
2647 break;
2648
2649 memset(aqb->aqb_data, 0, IAVF_AQ_BUFLEN)__builtin_memset((aqb->aqb_data), (0), (4096));
2650
2651 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IAVF_AQ_BUFLEN,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (aqb->
aqb_map), (0), (4096), (0x01))
2652 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (aqb->
aqb_map), (0), (4096), (0x01))
;
2653
2654 iaq = &arq[prod];
2655 iaq->iaq_flags = htole16(IAVF_AQ_BUF |((__uint16_t)((1U << 12) | (4096 > 512 ? (1U <<
9) : 0)))
2656 (IAVF_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IAVF_AQ_LB : 0))((__uint16_t)((1U << 12) | (4096 > 512 ? (1U <<
9) : 0)))
;
2657 iaq->iaq_opcode = 0;
2658 iaq->iaq_datalen = htole16(IAVF_AQ_BUFLEN)((__uint16_t)(4096));
2659 iaq->iaq_retval = 0;
2660 iaq->iaq_vc_opcode = 0;
2661 iaq->iaq_vc_retval = 0;
2662 iaq->iaq_param[0] = 0;
2663 iaq->iaq_param[1] = 0;
2664 iavf_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
2665
2666 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_live, aqb, aqb_entry)do { (aqb)->aqb_entry.sqe_next = ((void *)0); *(&sc->
sc_arq_live)->sqh_last = (aqb); (&sc->sc_arq_live)->
sqh_last = &(aqb)->aqb_entry.sqe_next; } while (0)
;
2667
2668 prod++;
2669 prod &= IAVF_AQ_MASK(256 - 1);
2670
2671 filled = 1;
2672
2673 n--;
2674 }
2675
2676 if_rxr_put(&sc->sc_arq_ring, n)do { (&sc->sc_arq_ring)->rxr_alive -= (n); } while (
0)
;
2677 sc->sc_arq_prod = prod;
2678
2679 if (filled && post)
2680 iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc->
sc_aq_regs->arq_tail)), ((sc->sc_arq_prod))))
;
2681
2682 return (filled);
2683}
2684
2685static void
2686iavf_arq_unfill(struct iavf_softc *sc)
2687{
2688 struct iavf_aq_buf *aqb;
2689
2690 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_live)((&sc->sc_arq_live)->sqh_first)) != NULL((void *)0)) {
2691 SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry)do { if (((&sc->sc_arq_live)->sqh_first = (&sc->
sc_arq_live)->sqh_first->aqb_entry.sqe_next) == ((void *
)0)) (&sc->sc_arq_live)->sqh_last = &(&sc->
sc_arq_live)->sqh_first; } while (0)
;
2692
2693 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IAVF_AQ_BUFLEN,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (aqb->
aqb_map), (0), (4096), (0x02))
2694 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (aqb->
aqb_map), (0), (4096), (0x02))
;
2695 iavf_aqb_free(sc, aqb);
2696 if_rxr_put(&sc->sc_arq_ring, 1)do { (&sc->sc_arq_ring)->rxr_alive -= (1); } while (
0)
;
2697 }
2698}
2699
2700static void
2701iavf_arq_timeout(void *xsc)
2702{
2703 struct iavf_softc *sc = xsc;
2704
2705 sc->sc_admin_result = -1;
2706 cond_signal(&sc->sc_admin_cond);
2707}
2708
2709static int
2710iavf_arq_wait(struct iavf_softc *sc, int msec)
2711{
2712 cond_init(&sc->sc_admin_cond);
2713
2714 timeout_add_msec(&sc->sc_admin_timeout, msec);
2715
2716 cond_wait(&sc->sc_admin_cond, "iavfarq");
2717 timeout_del(&sc->sc_admin_timeout);
2718
2719 iavf_arq_fill(sc, 1);
2720 return sc->sc_admin_result;
2721}
2722
2723static int
2724iavf_dmamem_alloc(struct iavf_softc *sc, struct iavf_dmamem *ixm,
2725 bus_size_t size, u_int align)
2726{
2727 ixm->ixm_size = size;
2728
2729 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (ixm
->ixm_size), (1), (ixm->ixm_size), (0), (0x0000 | 0x0002
| 0x2000), (&ixm->ixm_map))
2730 ixm->ixm_size, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (ixm
->ixm_size), (1), (ixm->ixm_size), (0), (0x0000 | 0x0002
| 0x2000), (&ixm->ixm_map))
2731 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (ixm
->ixm_size), (1), (ixm->ixm_size), (0), (0x0000 | 0x0002
| 0x2000), (&ixm->ixm_map))
2732 &ixm->ixm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (ixm
->ixm_size), (1), (ixm->ixm_size), (0), (0x0000 | 0x0002
| 0x2000), (&ixm->ixm_map))
!= 0)
2733 return (1);
2734 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (ixm->
ixm_size), (align), (0), (&ixm->ixm_seg), (1), (&ixm
->ixm_nsegs), (0x0000 | 0x1000))
2735 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (ixm->
ixm_size), (align), (0), (&ixm->ixm_seg), (1), (&ixm
->ixm_nsegs), (0x0000 | 0x1000))
2736 BUS_DMA_WAITOK | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (ixm->
ixm_size), (align), (0), (&ixm->ixm_seg), (1), (&ixm
->ixm_nsegs), (0x0000 | 0x1000))
!= 0)
2737 goto destroy;
2738 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&ixm
->ixm_seg), (ixm->ixm_nsegs), (ixm->ixm_size), (&
ixm->ixm_kva), (0x0000))
2739 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&ixm
->ixm_seg), (ixm->ixm_nsegs), (ixm->ixm_size), (&
ixm->ixm_kva), (0x0000))
!= 0)
2740 goto free;
2741 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (ixm->
ixm_map), (ixm->ixm_kva), (ixm->ixm_size), (((void *)0)
), (0x0000))
2742 ixm->ixm_size, NULL, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (ixm->
ixm_map), (ixm->ixm_kva), (ixm->ixm_size), (((void *)0)
), (0x0000))
!= 0)
2743 goto unmap;
2744
2745 return (0);
2746unmap:
2747 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (ixm->
ixm_kva), (ixm->ixm_size))
;
2748free:
2749 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
ixm->ixm_seg), (1))
;
2750destroy:
2751 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ixm
->ixm_map))
;
2752 return (1);
2753}
2754
2755static void
2756iavf_dmamem_free(struct iavf_softc *sc, struct iavf_dmamem *ixm)
2757{
2758 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (ixm
->ixm_map))
;
2759 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (ixm->
ixm_kva), (ixm->ixm_size))
;
2760 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
ixm->ixm_seg), (1))
;
2761 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ixm
->ixm_map))
;
2762}