File: | dev/pci/if_ixl.c |
Warning: | line 4132, column 2 Value stored to 'error' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: if_ixl.c,v 1.78 2022/01/09 05:42:54 jsg Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2013-2015, Intel Corporation |
5 | * All rights reserved. |
6 | |
7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions are met: |
9 | * |
10 | * 1. Redistributions of source code must retain the above copyright notice, |
11 | * this list of conditions and the following disclaimer. |
12 | * |
13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. |
16 | * |
17 | * 3. Neither the name of the Intel Corporation nor the names of its |
18 | * contributors may be used to endorse or promote products derived from |
19 | * this software without specific prior written permission. |
20 | * |
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
22 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
25 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
26 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
27 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
28 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
29 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
30 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
31 | * POSSIBILITY OF SUCH DAMAGE. |
32 | */ |
33 | |
34 | /* |
35 | * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org> |
36 | * |
37 | * Permission to use, copy, modify, and distribute this software for any |
38 | * purpose with or without fee is hereby granted, provided that the above |
39 | * copyright notice and this permission notice appear in all copies. |
40 | * |
41 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
42 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
43 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
44 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
45 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
46 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
47 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
48 | */ |
49 | |
50 | #include "bpfilter.h" |
51 | #include "kstat.h" |
52 | |
53 | #include <sys/param.h> |
54 | #include <sys/systm.h> |
55 | #include <sys/proc.h> |
56 | #include <sys/sockio.h> |
57 | #include <sys/mbuf.h> |
58 | #include <sys/kernel.h> |
59 | #include <sys/socket.h> |
60 | #include <sys/device.h> |
61 | #include <sys/pool.h> |
62 | #include <sys/queue.h> |
63 | #include <sys/timeout.h> |
64 | #include <sys/task.h> |
65 | #include <sys/syslog.h> |
66 | #include <sys/intrmap.h> |
67 | |
68 | #include <machine/bus.h> |
69 | #include <machine/intr.h> |
70 | |
71 | #include <net/if.h> |
72 | #include <net/if_dl.h> |
73 | #include <net/if_media.h> |
74 | #include <net/toeplitz.h> |
75 | |
76 | #if NBPFILTER1 > 0 |
77 | #include <net/bpf.h> |
78 | #endif |
79 | |
80 | #if NKSTAT0 > 0 |
81 | #include <sys/kstat.h> |
82 | #endif |
83 | |
84 | #include <netinet/in.h> |
85 | #include <netinet/if_ether.h> |
86 | |
87 | #include <dev/pci/pcireg.h> |
88 | #include <dev/pci/pcivar.h> |
89 | #include <dev/pci/pcidevs.h> |
90 | |
91 | #ifdef __sparc64__ |
92 | #include <dev/ofw/openfirm.h> |
93 | #endif |
94 | |
95 | #ifndef CACHE_LINE_SIZE64 |
96 | #define CACHE_LINE_SIZE64 64 |
97 | #endif |
98 | |
99 | #define IXL_MAX_VECTORS8 8 /* XXX this is pretty arbitrary */ |
100 | |
101 | #define I40E_MASK(mask, shift)((mask) << (shift)) ((mask) << (shift)) |
102 | #define I40E_PF_RESET_WAIT_COUNT200 200 |
103 | #define I40E_AQ_LARGE_BUF512 512 |
104 | |
105 | /* bitfields for Tx queue mapping in QTX_CTL */ |
106 | #define I40E_QTX_CTL_VF_QUEUE0x0 0x0 |
107 | #define I40E_QTX_CTL_VM_QUEUE0x1 0x1 |
108 | #define I40E_QTX_CTL_PF_QUEUE0x2 0x2 |
109 | |
110 | #define I40E_QUEUE_TYPE_EOL0x7ff 0x7ff |
111 | #define I40E_INTR_NOTX_QUEUE0 0 |
112 | |
113 | #define I40E_QUEUE_TYPE_RX0x0 0x0 |
114 | #define I40E_QUEUE_TYPE_TX0x1 0x1 |
115 | #define I40E_QUEUE_TYPE_PE_CEQ0x2 0x2 |
116 | #define I40E_QUEUE_TYPE_UNKNOWN0x3 0x3 |
117 | |
118 | #define I40E_ITR_INDEX_RX0x0 0x0 |
119 | #define I40E_ITR_INDEX_TX0x1 0x1 |
120 | #define I40E_ITR_INDEX_OTHER0x2 0x2 |
121 | #define I40E_ITR_INDEX_NONE0x3 0x3 |
122 | |
123 | #include <dev/pci/if_ixlreg.h> |
124 | |
125 | #define I40E_INTR_NOTX_QUEUE0 0 |
126 | #define I40E_INTR_NOTX_INTR0 0 |
127 | #define I40E_INTR_NOTX_RX_QUEUE0 0 |
128 | #define I40E_INTR_NOTX_TX_QUEUE1 1 |
129 | #define I40E_INTR_NOTX_RX_MASK((0x1) << (1)) I40E_PFINT_ICR0_QUEUE_0_MASK((0x1) << (1)) |
130 | #define I40E_INTR_NOTX_TX_MASK((0x1) << (2)) I40E_PFINT_ICR0_QUEUE_1_MASK((0x1) << (2)) |
131 | |
132 | struct ixl_aq_desc { |
133 | uint16_t iaq_flags; |
134 | #define IXL_AQ_DD(1U << 0) (1U << 0) |
135 | #define IXL_AQ_CMP(1U << 1) (1U << 1) |
136 | #define IXL_AQ_ERR(1U << 2) (1U << 2) |
137 | #define IXL_AQ_VFE(1U << 3) (1U << 3) |
138 | #define IXL_AQ_LB(1U << 9) (1U << 9) |
139 | #define IXL_AQ_RD(1U << 10) (1U << 10) |
140 | #define IXL_AQ_VFC(1U << 11) (1U << 11) |
141 | #define IXL_AQ_BUF(1U << 12) (1U << 12) |
142 | #define IXL_AQ_SI(1U << 13) (1U << 13) |
143 | #define IXL_AQ_EI(1U << 14) (1U << 14) |
144 | #define IXL_AQ_FE(1U << 15) (1U << 15) |
145 | |
146 | #define IXL_AQ_FLAGS_FMT"\020" "\020FE" "\017EI" "\016SI" "\015BUF" "\014VFC" "\013DB" "\012LB" "\004VFE" "\003ERR" "\002CMP" "\001DD" "\020" "\020FE" "\017EI" "\016SI" "\015BUF" \ |
147 | "\014VFC" "\013DB" "\012LB" "\004VFE" \ |
148 | "\003ERR" "\002CMP" "\001DD" |
149 | |
150 | uint16_t iaq_opcode; |
151 | |
152 | uint16_t iaq_datalen; |
153 | uint16_t iaq_retval; |
154 | |
155 | uint64_t iaq_cookie; |
156 | |
157 | uint32_t iaq_param[4]; |
158 | /* iaq_data_hi iaq_param[2] */ |
159 | /* iaq_data_lo iaq_param[3] */ |
160 | } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8))); |
161 | |
162 | /* aq commands */ |
163 | #define IXL_AQ_OP_GET_VERSION0x0001 0x0001 |
164 | #define IXL_AQ_OP_DRIVER_VERSION0x0002 0x0002 |
165 | #define IXL_AQ_OP_QUEUE_SHUTDOWN0x0003 0x0003 |
166 | #define IXL_AQ_OP_SET_PF_CONTEXT0x0004 0x0004 |
167 | #define IXL_AQ_OP_GET_AQ_ERR_REASON0x0005 0x0005 |
168 | #define IXL_AQ_OP_REQUEST_RESOURCE0x0008 0x0008 |
169 | #define IXL_AQ_OP_RELEASE_RESOURCE0x0009 0x0009 |
170 | #define IXL_AQ_OP_LIST_FUNC_CAP0x000a 0x000a |
171 | #define IXL_AQ_OP_LIST_DEV_CAP0x000b 0x000b |
172 | #define IXL_AQ_OP_MAC_ADDRESS_READ0x0107 0x0107 |
173 | #define IXL_AQ_OP_CLEAR_PXE_MODE0x0110 0x0110 |
174 | #define IXL_AQ_OP_SWITCH_GET_CONFIG0x0200 0x0200 |
175 | #define IXL_AQ_OP_RX_CTL_READ0x0206 0x0206 |
176 | #define IXL_AQ_OP_RX_CTL_WRITE0x0207 0x0207 |
177 | #define IXL_AQ_OP_ADD_VSI0x0210 0x0210 |
178 | #define IXL_AQ_OP_UPD_VSI_PARAMS0x0211 0x0211 |
179 | #define IXL_AQ_OP_GET_VSI_PARAMS0x0212 0x0212 |
180 | #define IXL_AQ_OP_ADD_VEB0x0230 0x0230 |
181 | #define IXL_AQ_OP_UPD_VEB_PARAMS0x0231 0x0231 |
182 | #define IXL_AQ_OP_GET_VEB_PARAMS0x0232 0x0232 |
183 | #define IXL_AQ_OP_ADD_MACVLAN0x0250 0x0250 |
184 | #define IXL_AQ_OP_REMOVE_MACVLAN0x0251 0x0251 |
185 | #define IXL_AQ_OP_SET_VSI_PROMISC0x0254 0x0254 |
186 | #define IXL_AQ_OP_PHY_GET_ABILITIES0x0600 0x0600 |
187 | #define IXL_AQ_OP_PHY_SET_CONFIG0x0601 0x0601 |
188 | #define IXL_AQ_OP_PHY_SET_MAC_CONFIG0x0603 0x0603 |
189 | #define IXL_AQ_OP_PHY_RESTART_AN0x0605 0x0605 |
190 | #define IXL_AQ_OP_PHY_LINK_STATUS0x0607 0x0607 |
191 | #define IXL_AQ_OP_PHY_SET_EVENT_MASK0x0613 0x0613 |
192 | #define IXL_AQ_OP_PHY_SET_REGISTER0x0628 0x0628 |
193 | #define IXL_AQ_OP_PHY_GET_REGISTER0x0629 0x0629 |
194 | #define IXL_AQ_OP_LLDP_GET_MIB0x0a00 0x0a00 |
195 | #define IXL_AQ_OP_LLDP_MIB_CHG_EV0x0a01 0x0a01 |
196 | #define IXL_AQ_OP_LLDP_ADD_TLV0x0a02 0x0a02 |
197 | #define IXL_AQ_OP_LLDP_UPD_TLV0x0a03 0x0a03 |
198 | #define IXL_AQ_OP_LLDP_DEL_TLV0x0a04 0x0a04 |
199 | #define IXL_AQ_OP_LLDP_STOP_AGENT0x0a05 0x0a05 |
200 | #define IXL_AQ_OP_LLDP_START_AGENT0x0a06 0x0a06 |
201 | #define IXL_AQ_OP_LLDP_GET_CEE_DCBX0x0a07 0x0a07 |
202 | #define IXL_AQ_OP_LLDP_SPECIFIC_AGENT0x0a09 0x0a09 |
203 | #define IXL_AQ_OP_SET_RSS_KEY0x0b02 0x0b02 /* 722 only */ |
204 | #define IXL_AQ_OP_SET_RSS_LUT0x0b03 0x0b03 /* 722 only */ |
205 | #define IXL_AQ_OP_GET_RSS_KEY0x0b04 0x0b04 /* 722 only */ |
206 | #define IXL_AQ_OP_GET_RSS_LUT0x0b05 0x0b05 /* 722 only */ |
207 | |
208 | struct ixl_aq_mac_addresses { |
209 | uint8_t pf_lan[ETHER_ADDR_LEN6]; |
210 | uint8_t pf_san[ETHER_ADDR_LEN6]; |
211 | uint8_t port[ETHER_ADDR_LEN6]; |
212 | uint8_t pf_wol[ETHER_ADDR_LEN6]; |
213 | } __packed__attribute__((__packed__)); |
214 | |
215 | #define IXL_AQ_MAC_PF_LAN_VALID(1U << 4) (1U << 4) |
216 | #define IXL_AQ_MAC_PF_SAN_VALID(1U << 5) (1U << 5) |
217 | #define IXL_AQ_MAC_PORT_VALID(1U << 6) (1U << 6) |
218 | #define IXL_AQ_MAC_PF_WOL_VALID(1U << 7) (1U << 7) |
219 | |
220 | struct ixl_aq_capability { |
221 | uint16_t cap_id; |
222 | #define IXL_AQ_CAP_SWITCH_MODE0x0001 0x0001 |
223 | #define IXL_AQ_CAP_MNG_MODE0x0002 0x0002 |
224 | #define IXL_AQ_CAP_NPAR_ACTIVE0x0003 0x0003 |
225 | #define IXL_AQ_CAP_OS2BMC_CAP0x0004 0x0004 |
226 | #define IXL_AQ_CAP_FUNCTIONS_VALID0x0005 0x0005 |
227 | #define IXL_AQ_CAP_ALTERNATE_RAM0x0006 0x0006 |
228 | #define IXL_AQ_CAP_WOL_AND_PROXY0x0008 0x0008 |
229 | #define IXL_AQ_CAP_SRIOV0x0012 0x0012 |
230 | #define IXL_AQ_CAP_VF0x0013 0x0013 |
231 | #define IXL_AQ_CAP_VMDQ0x0014 0x0014 |
232 | #define IXL_AQ_CAP_8021QBG0x0015 0x0015 |
233 | #define IXL_AQ_CAP_8021QBR0x0016 0x0016 |
234 | #define IXL_AQ_CAP_VSI0x0017 0x0017 |
235 | #define IXL_AQ_CAP_DCB0x0018 0x0018 |
236 | #define IXL_AQ_CAP_FCOE0x0021 0x0021 |
237 | #define IXL_AQ_CAP_ISCSI0x0022 0x0022 |
238 | #define IXL_AQ_CAP_RSS0x0040 0x0040 |
239 | #define IXL_AQ_CAP_RXQ0x0041 0x0041 |
240 | #define IXL_AQ_CAP_TXQ0x0042 0x0042 |
241 | #define IXL_AQ_CAP_MSIX0x0043 0x0043 |
242 | #define IXL_AQ_CAP_VF_MSIX0x0044 0x0044 |
243 | #define IXL_AQ_CAP_FLOW_DIRECTOR0x0045 0x0045 |
244 | #define IXL_AQ_CAP_15880x0046 0x0046 |
245 | #define IXL_AQ_CAP_IWARP0x0051 0x0051 |
246 | #define IXL_AQ_CAP_LED0x0061 0x0061 |
247 | #define IXL_AQ_CAP_SDP0x0062 0x0062 |
248 | #define IXL_AQ_CAP_MDIO0x0063 0x0063 |
249 | #define IXL_AQ_CAP_WSR_PROT0x0064 0x0064 |
250 | #define IXL_AQ_CAP_NVM_MGMT0x0080 0x0080 |
251 | #define IXL_AQ_CAP_FLEX100x00F1 0x00F1 |
252 | #define IXL_AQ_CAP_CEM0x00F2 0x00F2 |
253 | uint8_t major_rev; |
254 | uint8_t minor_rev; |
255 | uint32_t number; |
256 | uint32_t logical_id; |
257 | uint32_t phys_id; |
258 | uint8_t _reserved[16]; |
259 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
260 | |
261 | #define IXL_LLDP_SHUTDOWN0x1 0x1 |
262 | |
263 | struct ixl_aq_switch_config { |
264 | uint16_t num_reported; |
265 | uint16_t num_total; |
266 | uint8_t _reserved[12]; |
267 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
268 | |
269 | struct ixl_aq_switch_config_element { |
270 | uint8_t type; |
271 | #define IXL_AQ_SW_ELEM_TYPE_MAC1 1 |
272 | #define IXL_AQ_SW_ELEM_TYPE_PF2 2 |
273 | #define IXL_AQ_SW_ELEM_TYPE_VF3 3 |
274 | #define IXL_AQ_SW_ELEM_TYPE_EMP4 4 |
275 | #define IXL_AQ_SW_ELEM_TYPE_BMC5 5 |
276 | #define IXL_AQ_SW_ELEM_TYPE_PV16 16 |
277 | #define IXL_AQ_SW_ELEM_TYPE_VEB17 17 |
278 | #define IXL_AQ_SW_ELEM_TYPE_PA18 18 |
279 | #define IXL_AQ_SW_ELEM_TYPE_VSI19 19 |
280 | uint8_t revision; |
281 | #define IXL_AQ_SW_ELEM_REV_11 1 |
282 | uint16_t seid; |
283 | |
284 | uint16_t uplink_seid; |
285 | uint16_t downlink_seid; |
286 | |
287 | uint8_t _reserved[3]; |
288 | uint8_t connection_type; |
289 | #define IXL_AQ_CONN_TYPE_REGULAR0x1 0x1 |
290 | #define IXL_AQ_CONN_TYPE_DEFAULT0x2 0x2 |
291 | #define IXL_AQ_CONN_TYPE_CASCADED0x3 0x3 |
292 | |
293 | uint16_t scheduler_id; |
294 | uint16_t element_info; |
295 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
296 | |
297 | #define IXL_PHY_TYPE_SGMII0x00 0x00 |
298 | #define IXL_PHY_TYPE_1000BASE_KX0x01 0x01 |
299 | #define IXL_PHY_TYPE_10GBASE_KX40x02 0x02 |
300 | #define IXL_PHY_TYPE_10GBASE_KR0x03 0x03 |
301 | #define IXL_PHY_TYPE_40GBASE_KR40x04 0x04 |
302 | #define IXL_PHY_TYPE_XAUI0x05 0x05 |
303 | #define IXL_PHY_TYPE_XFI0x06 0x06 |
304 | #define IXL_PHY_TYPE_SFI0x07 0x07 |
305 | #define IXL_PHY_TYPE_XLAUI0x08 0x08 |
306 | #define IXL_PHY_TYPE_XLPPI0x09 0x09 |
307 | #define IXL_PHY_TYPE_40GBASE_CR4_CU0x0a 0x0a |
308 | #define IXL_PHY_TYPE_10GBASE_CR1_CU0x0b 0x0b |
309 | #define IXL_PHY_TYPE_10GBASE_AOC0x0c 0x0c |
310 | #define IXL_PHY_TYPE_40GBASE_AOC0x0d 0x0d |
311 | #define IXL_PHY_TYPE_100BASE_TX0x11 0x11 |
312 | #define IXL_PHY_TYPE_1000BASE_T0x12 0x12 |
313 | #define IXL_PHY_TYPE_10GBASE_T0x13 0x13 |
314 | #define IXL_PHY_TYPE_10GBASE_SR0x14 0x14 |
315 | #define IXL_PHY_TYPE_10GBASE_LR0x15 0x15 |
316 | #define IXL_PHY_TYPE_10GBASE_SFPP_CU0x16 0x16 |
317 | #define IXL_PHY_TYPE_10GBASE_CR10x17 0x17 |
318 | #define IXL_PHY_TYPE_40GBASE_CR40x18 0x18 |
319 | #define IXL_PHY_TYPE_40GBASE_SR40x19 0x19 |
320 | #define IXL_PHY_TYPE_40GBASE_LR40x1a 0x1a |
321 | #define IXL_PHY_TYPE_1000BASE_SX0x1b 0x1b |
322 | #define IXL_PHY_TYPE_1000BASE_LX0x1c 0x1c |
323 | #define IXL_PHY_TYPE_1000BASE_T_OPTICAL0x1d 0x1d |
324 | #define IXL_PHY_TYPE_20GBASE_KR20x1e 0x1e |
325 | |
326 | #define IXL_PHY_TYPE_25GBASE_KR0x1f 0x1f |
327 | #define IXL_PHY_TYPE_25GBASE_CR0x20 0x20 |
328 | #define IXL_PHY_TYPE_25GBASE_SR0x21 0x21 |
329 | #define IXL_PHY_TYPE_25GBASE_LR0x22 0x22 |
330 | #define IXL_PHY_TYPE_25GBASE_AOC0x23 0x23 |
331 | #define IXL_PHY_TYPE_25GBASE_ACC0x24 0x24 |
332 | |
333 | struct ixl_aq_module_desc { |
334 | uint8_t oui[3]; |
335 | uint8_t _reserved1; |
336 | uint8_t part_number[16]; |
337 | uint8_t revision[4]; |
338 | uint8_t _reserved2[8]; |
339 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
340 | |
341 | struct ixl_aq_phy_abilities { |
342 | uint32_t phy_type; |
343 | |
344 | uint8_t link_speed; |
345 | #define IXL_AQ_PHY_LINK_SPEED_100MB(1 << 1) (1 << 1) |
346 | #define IXL_AQ_PHY_LINK_SPEED_1000MB(1 << 2) (1 << 2) |
347 | #define IXL_AQ_PHY_LINK_SPEED_10GB(1 << 3) (1 << 3) |
348 | #define IXL_AQ_PHY_LINK_SPEED_40GB(1 << 4) (1 << 4) |
349 | #define IXL_AQ_PHY_LINK_SPEED_20GB(1 << 5) (1 << 5) |
350 | #define IXL_AQ_PHY_LINK_SPEED_25GB(1 << 6) (1 << 6) |
351 | uint8_t abilities; |
352 | uint16_t eee_capability; |
353 | |
354 | uint32_t eeer_val; |
355 | |
356 | uint8_t d3_lpan; |
357 | uint8_t phy_type_ext; |
358 | #define IXL_AQ_PHY_TYPE_EXT_25G_KR0x01 0x01 |
359 | #define IXL_AQ_PHY_TYPE_EXT_25G_CR0x02 0x02 |
360 | #define IXL_AQ_PHY_TYPE_EXT_25G_SR0x04 0x04 |
361 | #define IXL_AQ_PHY_TYPE_EXT_25G_LR0x08 0x08 |
362 | uint8_t fec_cfg_curr_mod_ext_info; |
363 | #define IXL_AQ_ENABLE_FEC_KR0x01 0x01 |
364 | #define IXL_AQ_ENABLE_FEC_RS0x02 0x02 |
365 | #define IXL_AQ_REQUEST_FEC_KR0x04 0x04 |
366 | #define IXL_AQ_REQUEST_FEC_RS0x08 0x08 |
367 | #define IXL_AQ_ENABLE_FEC_AUTO0x10 0x10 |
368 | #define IXL_AQ_MODULE_TYPE_EXT_MASK0xe0 0xe0 |
369 | #define IXL_AQ_MODULE_TYPE_EXT_SHIFT5 5 |
370 | uint8_t ext_comp_code; |
371 | |
372 | uint8_t phy_id[4]; |
373 | |
374 | uint8_t module_type[3]; |
375 | #define IXL_SFF8024_ID_SFP0x03 0x03 |
376 | #define IXL_SFF8024_ID_QSFP0x0c 0x0c |
377 | #define IXL_SFF8024_ID_QSFP_PLUS0x0d 0x0d |
378 | #define IXL_SFF8024_ID_QSFP280x11 0x11 |
379 | uint8_t qualified_module_count; |
380 | #define IXL_AQ_PHY_MAX_QMS16 16 |
381 | struct ixl_aq_module_desc |
382 | qualified_module[IXL_AQ_PHY_MAX_QMS16]; |
383 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
384 | |
385 | struct ixl_aq_link_param { |
386 | uint8_t notify; |
387 | #define IXL_AQ_LINK_NOTIFY0x03 0x03 |
388 | uint8_t _reserved1; |
389 | uint8_t phy; |
390 | uint8_t speed; |
391 | uint8_t status; |
392 | uint8_t _reserved2[11]; |
393 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
394 | |
395 | struct ixl_aq_vsi_param { |
396 | uint16_t uplink_seid; |
397 | uint8_t connect_type; |
398 | #define IXL_AQ_VSI_CONN_TYPE_NORMAL(0x1) (0x1) |
399 | #define IXL_AQ_VSI_CONN_TYPE_DEFAULT(0x2) (0x2) |
400 | #define IXL_AQ_VSI_CONN_TYPE_CASCADED(0x3) (0x3) |
401 | uint8_t _reserved1; |
402 | |
403 | uint8_t vf_id; |
404 | uint8_t _reserved2; |
405 | uint16_t vsi_flags; |
406 | #define IXL_AQ_VSI_TYPE_SHIFT0x0 0x0 |
407 | #define IXL_AQ_VSI_TYPE_MASK(0x3 << 0x0) (0x3 << IXL_AQ_VSI_TYPE_SHIFT0x0) |
408 | #define IXL_AQ_VSI_TYPE_VF0x0 0x0 |
409 | #define IXL_AQ_VSI_TYPE_VMDQ20x1 0x1 |
410 | #define IXL_AQ_VSI_TYPE_PF0x2 0x2 |
411 | #define IXL_AQ_VSI_TYPE_EMP_MNG0x3 0x3 |
412 | #define IXL_AQ_VSI_FLAG_CASCADED_PV0x4 0x4 |
413 | |
414 | uint32_t addr_hi; |
415 | uint32_t addr_lo; |
416 | } __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16))); |
417 | |
418 | struct ixl_aq_add_macvlan { |
419 | uint16_t num_addrs; |
420 | uint16_t seid0; |
421 | uint16_t seid1; |
422 | uint16_t seid2; |
423 | uint32_t addr_hi; |
424 | uint32_t addr_lo; |
425 | } __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16))); |
426 | |
427 | struct ixl_aq_add_macvlan_elem { |
428 | uint8_t macaddr[6]; |
429 | uint16_t vlan; |
430 | uint16_t flags; |
431 | #define IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH0x0001 0x0001 |
432 | #define IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN0x0004 0x0004 |
433 | uint16_t queue; |
434 | uint32_t _reserved; |
435 | } __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16))); |
436 | |
437 | struct ixl_aq_remove_macvlan { |
438 | uint16_t num_addrs; |
439 | uint16_t seid0; |
440 | uint16_t seid1; |
441 | uint16_t seid2; |
442 | uint32_t addr_hi; |
443 | uint32_t addr_lo; |
444 | } __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16))); |
445 | |
446 | struct ixl_aq_remove_macvlan_elem { |
447 | uint8_t macaddr[6]; |
448 | uint16_t vlan; |
449 | uint8_t flags; |
450 | #define IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH0x0001 0x0001 |
451 | #define IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN0x0008 0x0008 |
452 | uint8_t _reserved[7]; |
453 | } __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16))); |
454 | |
455 | struct ixl_aq_vsi_reply { |
456 | uint16_t seid; |
457 | uint16_t vsi_number; |
458 | |
459 | uint16_t vsis_used; |
460 | uint16_t vsis_free; |
461 | |
462 | uint32_t addr_hi; |
463 | uint32_t addr_lo; |
464 | } __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16))); |
465 | |
466 | struct ixl_aq_vsi_data { |
467 | /* first 96 byte are written by SW */ |
468 | uint16_t valid_sections; |
469 | #define IXL_AQ_VSI_VALID_SWITCH(1 << 0) (1 << 0) |
470 | #define IXL_AQ_VSI_VALID_SECURITY(1 << 1) (1 << 1) |
471 | #define IXL_AQ_VSI_VALID_VLAN(1 << 2) (1 << 2) |
472 | #define IXL_AQ_VSI_VALID_CAS_PV(1 << 3) (1 << 3) |
473 | #define IXL_AQ_VSI_VALID_INGRESS_UP(1 << 4) (1 << 4) |
474 | #define IXL_AQ_VSI_VALID_EGRESS_UP(1 << 5) (1 << 5) |
475 | #define IXL_AQ_VSI_VALID_QUEUE_MAP(1 << 6) (1 << 6) |
476 | #define IXL_AQ_VSI_VALID_QUEUE_OPT(1 << 7) (1 << 7) |
477 | #define IXL_AQ_VSI_VALID_OUTER_UP(1 << 8) (1 << 8) |
478 | #define IXL_AQ_VSI_VALID_SCHED(1 << 9) (1 << 9) |
479 | /* switch section */ |
480 | uint16_t switch_id; |
481 | #define IXL_AQ_VSI_SWITCH_ID_SHIFT0 0 |
482 | #define IXL_AQ_VSI_SWITCH_ID_MASK(0xfff << 0) (0xfff << IXL_AQ_VSI_SWITCH_ID_SHIFT0) |
483 | #define IXL_AQ_VSI_SWITCH_NOT_STAG(1 << 12) (1 << 12) |
484 | #define IXL_AQ_VSI_SWITCH_LOCAL_LB(1 << 14) (1 << 14) |
485 | |
486 | uint8_t _reserved1[2]; |
487 | /* security section */ |
488 | uint8_t sec_flags; |
489 | #define IXL_AQ_VSI_SEC_ALLOW_DEST_OVRD(1 << 0) (1 << 0) |
490 | #define IXL_AQ_VSI_SEC_ENABLE_VLAN_CHK(1 << 1) (1 << 1) |
491 | #define IXL_AQ_VSI_SEC_ENABLE_MAC_CHK(1 << 2) (1 << 2) |
492 | uint8_t _reserved2; |
493 | |
494 | /* vlan section */ |
495 | uint16_t pvid; |
496 | uint16_t fcoe_pvid; |
497 | |
498 | uint8_t port_vlan_flags; |
499 | #define IXL_AQ_VSI_PVLAN_MODE_SHIFT0 0 |
500 | #define IXL_AQ_VSI_PVLAN_MODE_MASK(0x3 << 0) (0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT0) |
501 | #define IXL_AQ_VSI_PVLAN_MODE_TAGGED(0x1 << 0) (0x1 << IXL_AQ_VSI_PVLAN_MODE_SHIFT0) |
502 | #define IXL_AQ_VSI_PVLAN_MODE_UNTAGGED(0x2 << 0) (0x2 << IXL_AQ_VSI_PVLAN_MODE_SHIFT0) |
503 | #define IXL_AQ_VSI_PVLAN_MODE_ALL(0x3 << 0) (0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT0) |
504 | #define IXL_AQ_VSI_PVLAN_INSERT_PVID(0x4 << 0) (0x4 << IXL_AQ_VSI_PVLAN_MODE_SHIFT0) |
505 | #define IXL_AQ_VSI_PVLAN_EMOD_SHIFT0x3 0x3 |
506 | #define IXL_AQ_VSI_PVLAN_EMOD_MASK(0x3 << 0x3) (0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT0x3) |
507 | #define IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH(0x0 << 0x3) (0x0 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT0x3) |
508 | #define IXL_AQ_VSI_PVLAN_EMOD_STR_UP(0x1 << 0x3) (0x1 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT0x3) |
509 | #define IXL_AQ_VSI_PVLAN_EMOD_STR(0x2 << 0x3) (0x2 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT0x3) |
510 | #define IXL_AQ_VSI_PVLAN_EMOD_NOTHING(0x3 << 0x3) (0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT0x3) |
511 | uint8_t _reserved3[3]; |
512 | |
513 | /* ingress egress up section */ |
514 | uint32_t ingress_table; |
515 | #define IXL_AQ_VSI_UP_SHIFT(_up)((_up) * 3) ((_up) * 3) |
516 | #define IXL_AQ_VSI_UP_MASK(_up)(0x7 << (((_up) * 3)) (0x7 << (IXL_AQ_VSI_UP_SHIFT(_up)((_up) * 3)) |
517 | uint32_t egress_table; |
518 | |
519 | /* cascaded pv section */ |
520 | uint16_t cas_pv_tag; |
521 | uint8_t cas_pv_flags; |
522 | #define IXL_AQ_VSI_CAS_PV_TAGX_SHIFT0 0 |
523 | #define IXL_AQ_VSI_CAS_PV_TAGX_MASK(0x3 << 0) (0x3 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT0) |
524 | #define IXL_AQ_VSI_CAS_PV_TAGX_LEAVE(0x0 << 0) (0x0 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT0) |
525 | #define IXL_AQ_VSI_CAS_PV_TAGX_REMOVE(0x1 << 0) (0x1 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT0) |
526 | #define IXL_AQ_VSI_CAS_PV_TAGX_COPY(0x2 << 0) (0x2 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT0) |
527 | #define IXL_AQ_VSI_CAS_PV_INSERT_TAG(1 << 4) (1 << 4) |
528 | #define IXL_AQ_VSI_CAS_PV_ETAG_PRUNE(1 << 5) (1 << 5) |
529 | #define IXL_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG(1 << 6) \ |
530 | (1 << 6) |
531 | uint8_t _reserved4; |
532 | |
533 | /* queue mapping section */ |
534 | uint16_t mapping_flags; |
535 | #define IXL_AQ_VSI_QUE_MAP_MASK0x1 0x1 |
536 | #define IXL_AQ_VSI_QUE_MAP_CONTIG0x0 0x0 |
537 | #define IXL_AQ_VSI_QUE_MAP_NONCONTIG0x1 0x1 |
538 | uint16_t queue_mapping[16]; |
539 | #define IXL_AQ_VSI_QUEUE_SHIFT0x0 0x0 |
540 | #define IXL_AQ_VSI_QUEUE_MASK(0x7ff << 0x0) (0x7ff << IXL_AQ_VSI_QUEUE_SHIFT0x0) |
541 | uint16_t tc_mapping[8]; |
542 | #define IXL_AQ_VSI_TC_Q_OFFSET_SHIFT0 0 |
543 | #define IXL_AQ_VSI_TC_Q_OFFSET_MASK(0x1ff << 0) (0x1ff << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT0) |
544 | #define IXL_AQ_VSI_TC_Q_NUMBER_SHIFT9 9 |
545 | #define IXL_AQ_VSI_TC_Q_NUMBER_MASK(0x7 << 9) (0x7 << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT9) |
546 | |
547 | /* queueing option section */ |
548 | uint8_t queueing_opt_flags; |
549 | #define IXL_AQ_VSI_QUE_OPT_MCAST_UDP_EN(1 << 2) (1 << 2) |
550 | #define IXL_AQ_VSI_QUE_OPT_UCAST_UDP_EN(1 << 3) (1 << 3) |
551 | #define IXL_AQ_VSI_QUE_OPT_TCP_EN(1 << 4) (1 << 4) |
552 | #define IXL_AQ_VSI_QUE_OPT_FCOE_EN(1 << 5) (1 << 5) |
553 | #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_PF0 0 |
554 | #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_VSI(1 << 6) (1 << 6) |
555 | uint8_t _reserved5[3]; |
556 | |
557 | /* scheduler section */ |
558 | uint8_t up_enable_bits; |
559 | uint8_t _reserved6; |
560 | |
561 | /* outer up section */ |
562 | uint32_t outer_up_table; /* same as ingress/egress tables */ |
563 | uint8_t _reserved7[8]; |
564 | |
565 | /* last 32 bytes are written by FW */ |
566 | uint16_t qs_handle[8]; |
567 | #define IXL_AQ_VSI_QS_HANDLE_INVALID0xffff 0xffff |
568 | uint16_t stat_counter_idx; |
569 | uint16_t sched_id; |
570 | |
571 | uint8_t _reserved8[12]; |
572 | } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8))); |
573 | |
574 | CTASSERT(sizeof(struct ixl_aq_vsi_data) == 128)extern char _ctassert[(sizeof(struct ixl_aq_vsi_data) == 128) ? 1 : -1 ] __attribute__((__unused__)); |
575 | |
576 | struct ixl_aq_vsi_promisc_param { |
577 | uint16_t flags; |
578 | uint16_t valid_flags; |
579 | #define IXL_AQ_VSI_PROMISC_FLAG_UCAST(1 << 0) (1 << 0) |
580 | #define IXL_AQ_VSI_PROMISC_FLAG_MCAST(1 << 1) (1 << 1) |
581 | #define IXL_AQ_VSI_PROMISC_FLAG_BCAST(1 << 2) (1 << 2) |
582 | #define IXL_AQ_VSI_PROMISC_FLAG_DFLT(1 << 3) (1 << 3) |
583 | #define IXL_AQ_VSI_PROMISC_FLAG_VLAN(1 << 4) (1 << 4) |
584 | #define IXL_AQ_VSI_PROMISC_FLAG_RXONLY(1 << 15) (1 << 15) |
585 | |
586 | uint16_t seid; |
587 | #define IXL_AQ_VSI_PROMISC_SEID_VALID(1 << 15) (1 << 15) |
588 | uint16_t vlan; |
589 | #define IXL_AQ_VSI_PROMISC_VLAN_VALID(1 << 15) (1 << 15) |
590 | uint32_t reserved[2]; |
591 | } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8))); |
592 | |
593 | struct ixl_aq_veb_param { |
594 | uint16_t uplink_seid; |
595 | uint16_t downlink_seid; |
596 | uint16_t veb_flags; |
597 | #define IXL_AQ_ADD_VEB_FLOATING(1 << 0) (1 << 0) |
598 | #define IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT1 1 |
599 | #define IXL_AQ_ADD_VEB_PORT_TYPE_MASK(0x3 << 1) (0x3 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT1) |
600 | #define IXL_AQ_ADD_VEB_PORT_TYPE_DEFAULT(0x2 << 1) \ |
601 | (0x2 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT1) |
602 | #define IXL_AQ_ADD_VEB_PORT_TYPE_DATA(0x4 << 1) (0x4 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT1) |
603 | #define IXL_AQ_ADD_VEB_ENABLE_L2_FILTER(1 << 3) (1 << 3) /* deprecated */ |
604 | #define IXL_AQ_ADD_VEB_DISABLE_STATS(1 << 4) (1 << 4) |
605 | uint8_t enable_tcs; |
606 | uint8_t _reserved[9]; |
607 | } __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16))); |
608 | |
609 | struct ixl_aq_veb_reply { |
610 | uint16_t _reserved1; |
611 | uint16_t _reserved2; |
612 | uint16_t _reserved3; |
613 | uint16_t switch_seid; |
614 | uint16_t veb_seid; |
615 | #define IXL_AQ_VEB_ERR_FLAG_NO_VEB(1 << 0) (1 << 0) |
616 | #define IXL_AQ_VEB_ERR_FLAG_NO_SCHED(1 << 1) (1 << 1) |
617 | #define IXL_AQ_VEB_ERR_FLAG_NO_COUNTER(1 << 2) (1 << 2) |
618 | #define IXL_AQ_VEB_ERR_FLAG_NO_ENTRY(1 << 3); (1 << 3); |
619 | uint16_t statistic_index; |
620 | uint16_t vebs_used; |
621 | uint16_t vebs_free; |
622 | } __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16))); |
623 | |
624 | /* GET PHY ABILITIES param[0] */ |
625 | #define IXL_AQ_PHY_REPORT_QUAL(1 << 0) (1 << 0) |
626 | #define IXL_AQ_PHY_REPORT_INIT(1 << 1) (1 << 1) |
627 | |
628 | struct ixl_aq_phy_reg_access { |
629 | uint8_t phy_iface; |
630 | #define IXL_AQ_PHY_IF_INTERNAL0 0 |
631 | #define IXL_AQ_PHY_IF_EXTERNAL1 1 |
632 | #define IXL_AQ_PHY_IF_MODULE2 2 |
633 | uint8_t dev_addr; |
634 | uint16_t recall; |
635 | #define IXL_AQ_PHY_QSFP_DEV_ADDR0 0 |
636 | #define IXL_AQ_PHY_QSFP_LAST1 1 |
637 | uint32_t reg; |
638 | uint32_t val; |
639 | uint32_t _reserved2; |
640 | } __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16))); |
641 | |
642 | /* RESTART_AN param[0] */ |
643 | #define IXL_AQ_PHY_RESTART_AN(1 << 1) (1 << 1) |
644 | #define IXL_AQ_PHY_LINK_ENABLE(1 << 2) (1 << 2) |
645 | |
646 | struct ixl_aq_link_status { /* this occupies the iaq_param space */ |
647 | uint16_t command_flags; /* only field set on command */ |
648 | #define IXL_AQ_LSE_MASK0x3 0x3 |
649 | #define IXL_AQ_LSE_NOP0x0 0x0 |
650 | #define IXL_AQ_LSE_DISABLE0x2 0x2 |
651 | #define IXL_AQ_LSE_ENABLE0x3 0x3 |
652 | #define IXL_AQ_LSE_IS_ENABLED0x1 0x1 /* only set in response */ |
653 | uint8_t phy_type; |
654 | uint8_t link_speed; |
655 | #define IXL_AQ_LINK_SPEED_1GB(1 << 2) (1 << 2) |
656 | #define IXL_AQ_LINK_SPEED_10GB(1 << 3) (1 << 3) |
657 | #define IXL_AQ_LINK_SPEED_40GB(1 << 4) (1 << 4) |
658 | #define IXL_AQ_LINK_SPEED_25GB(1 << 6) (1 << 6) |
659 | uint8_t link_info; |
660 | #define IXL_AQ_LINK_UP_FUNCTION0x01 0x01 |
661 | #define IXL_AQ_LINK_FAULT0x02 0x02 |
662 | #define IXL_AQ_LINK_FAULT_TX0x04 0x04 |
663 | #define IXL_AQ_LINK_FAULT_RX0x08 0x08 |
664 | #define IXL_AQ_LINK_FAULT_REMOTE0x10 0x10 |
665 | #define IXL_AQ_LINK_UP_PORT0x20 0x20 |
666 | #define IXL_AQ_MEDIA_AVAILABLE0x40 0x40 |
667 | #define IXL_AQ_SIGNAL_DETECT0x80 0x80 |
668 | uint8_t an_info; |
669 | #define IXL_AQ_AN_COMPLETED0x01 0x01 |
670 | #define IXL_AQ_LP_AN_ABILITY0x02 0x02 |
671 | #define IXL_AQ_PD_FAULT0x04 0x04 |
672 | #define IXL_AQ_FEC_EN0x08 0x08 |
673 | #define IXL_AQ_PHY_LOW_POWER0x10 0x10 |
674 | #define IXL_AQ_LINK_PAUSE_TX0x20 0x20 |
675 | #define IXL_AQ_LINK_PAUSE_RX0x40 0x40 |
676 | #define IXL_AQ_QUALIFIED_MODULE0x80 0x80 |
677 | |
678 | uint8_t ext_info; |
679 | #define IXL_AQ_LINK_PHY_TEMP_ALARM0x01 0x01 |
680 | #define IXL_AQ_LINK_XCESSIVE_ERRORS0x02 0x02 |
681 | #define IXL_AQ_LINK_TX_SHIFT0x02 0x02 |
682 | #define IXL_AQ_LINK_TX_MASK(0x03 << 0x02) (0x03 << IXL_AQ_LINK_TX_SHIFT0x02) |
683 | #define IXL_AQ_LINK_TX_ACTIVE0x00 0x00 |
684 | #define IXL_AQ_LINK_TX_DRAINED0x01 0x01 |
685 | #define IXL_AQ_LINK_TX_FLUSHED0x03 0x03 |
686 | #define IXL_AQ_LINK_FORCED_40G0x10 0x10 |
687 | /* 25G Error Codes */ |
688 | #define IXL_AQ_25G_NO_ERR0X00 0X00 |
689 | #define IXL_AQ_25G_NOT_PRESENT0X01 0X01 |
690 | #define IXL_AQ_25G_NVM_CRC_ERR0X02 0X02 |
691 | #define IXL_AQ_25G_SBUS_UCODE_ERR0X03 0X03 |
692 | #define IXL_AQ_25G_SERDES_UCODE_ERR0X04 0X04 |
693 | #define IXL_AQ_25G_NIMB_UCODE_ERR0X05 0X05 |
694 | uint8_t loopback; |
695 | uint16_t max_frame_size; |
696 | |
697 | uint8_t config; |
698 | #define IXL_AQ_CONFIG_FEC_KR_ENA0x01 0x01 |
699 | #define IXL_AQ_CONFIG_FEC_RS_ENA0x02 0x02 |
700 | #define IXL_AQ_CONFIG_CRC_ENA0x04 0x04 |
701 | #define IXL_AQ_CONFIG_PACING_MASK0x78 0x78 |
702 | uint8_t power_desc; |
703 | #define IXL_AQ_LINK_POWER_CLASS_10x00 0x00 |
704 | #define IXL_AQ_LINK_POWER_CLASS_20x01 0x01 |
705 | #define IXL_AQ_LINK_POWER_CLASS_30x02 0x02 |
706 | #define IXL_AQ_LINK_POWER_CLASS_40x03 0x03 |
707 | #define IXL_AQ_PWR_CLASS_MASK0x03 0x03 |
708 | |
709 | uint8_t reserved[4]; |
710 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
711 | /* event mask command flags for param[2] */ |
712 | #define IXL_AQ_PHY_EV_MASK0x3ff 0x3ff |
713 | #define IXL_AQ_PHY_EV_LINK_UPDOWN(1 << 1) (1 << 1) |
714 | #define IXL_AQ_PHY_EV_MEDIA_NA(1 << 2) (1 << 2) |
715 | #define IXL_AQ_PHY_EV_LINK_FAULT(1 << 3) (1 << 3) |
716 | #define IXL_AQ_PHY_EV_PHY_TEMP_ALARM(1 << 4) (1 << 4) |
717 | #define IXL_AQ_PHY_EV_EXCESS_ERRORS(1 << 5) (1 << 5) |
718 | #define IXL_AQ_PHY_EV_SIGNAL_DETECT(1 << 6) (1 << 6) |
719 | #define IXL_AQ_PHY_EV_AN_COMPLETED(1 << 7) (1 << 7) |
720 | #define IXL_AQ_PHY_EV_MODULE_QUAL_FAIL(1 << 8) (1 << 8) |
721 | #define IXL_AQ_PHY_EV_PORT_TX_SUSPENDED(1 << 9) (1 << 9) |
722 | |
723 | struct ixl_aq_rss_lut { /* 722 */ |
724 | #define IXL_AQ_SET_RSS_LUT_VSI_VALID(1 << 15) (1 << 15) |
725 | #define IXL_AQ_SET_RSS_LUT_VSI_ID_SHIFT0 0 |
726 | #define IXL_AQ_SET_RSS_LUT_VSI_ID_MASK(0x3FF << 0) \ |
727 | (0x3FF << IXL_AQ_SET_RSS_LUT_VSI_ID_SHIFT0) |
728 | |
729 | uint16_t vsi_number; |
730 | #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_SHIFT0 0 |
731 | #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_MASK(0x1 << 0) \ |
732 | (0x1 << IXL_AQ_SET_RSS_LUT_TABLE_TYPE_SHIFT0) |
733 | #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_VSI0 0 |
734 | #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_PF1 1 |
735 | uint16_t flags; |
736 | uint8_t _reserved[4]; |
737 | uint32_t addr_hi; |
738 | uint32_t addr_lo; |
739 | } __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16))); |
740 | |
741 | struct ixl_aq_get_set_rss_key { /* 722 */ |
742 | #define IXL_AQ_SET_RSS_KEY_VSI_VALID(1 << 15) (1 << 15) |
743 | #define IXL_AQ_SET_RSS_KEY_VSI_ID_SHIFT0 0 |
744 | #define IXL_AQ_SET_RSS_KEY_VSI_ID_MASK(0x3FF << 0) \ |
745 | (0x3FF << IXL_AQ_SET_RSS_KEY_VSI_ID_SHIFT0) |
746 | uint16_t vsi_number; |
747 | uint8_t _reserved[6]; |
748 | uint32_t addr_hi; |
749 | uint32_t addr_lo; |
750 | } __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16))); |
751 | |
752 | /* aq response codes */ |
753 | #define IXL_AQ_RC_OK0 0 /* success */ |
754 | #define IXL_AQ_RC_EPERM1 1 /* Operation not permitted */ |
755 | #define IXL_AQ_RC_ENOENT2 2 /* No such element */ |
756 | #define IXL_AQ_RC_ESRCH3 3 /* Bad opcode */ |
757 | #define IXL_AQ_RC_EINTR4 4 /* operation interrupted */ |
758 | #define IXL_AQ_RC_EIO5 5 /* I/O error */ |
759 | #define IXL_AQ_RC_ENXIO6 6 /* No such resource */ |
760 | #define IXL_AQ_RC_E2BIG7 7 /* Arg too long */ |
761 | #define IXL_AQ_RC_EAGAIN8 8 /* Try again */ |
762 | #define IXL_AQ_RC_ENOMEM9 9 /* Out of memory */ |
763 | #define IXL_AQ_RC_EACCES10 10 /* Permission denied */ |
764 | #define IXL_AQ_RC_EFAULT11 11 /* Bad address */ |
765 | #define IXL_AQ_RC_EBUSY12 12 /* Device or resource busy */ |
766 | #define IXL_AQ_RC_EEXIST13 13 /* object already exists */ |
767 | #define IXL_AQ_RC_EINVAL14 14 /* invalid argument */ |
768 | #define IXL_AQ_RC_ENOTTY15 15 /* not a typewriter */ |
769 | #define IXL_AQ_RC_ENOSPC16 16 /* No space or alloc failure */ |
770 | #define IXL_AQ_RC_ENOSYS17 17 /* function not implemented */ |
771 | #define IXL_AQ_RC_ERANGE18 18 /* parameter out of range */ |
772 | #define IXL_AQ_RC_EFLUSHED19 19 /* cmd flushed due to prev error */ |
773 | #define IXL_AQ_RC_BAD_ADDR20 20 /* contains a bad pointer */ |
774 | #define IXL_AQ_RC_EMODE21 21 /* not allowed in current mode */ |
775 | #define IXL_AQ_RC_EFBIG22 22 /* file too large */ |
776 | |
777 | struct ixl_tx_desc { |
778 | uint64_t addr; |
779 | uint64_t cmd; |
780 | #define IXL_TX_DESC_DTYPE_SHIFT0 0 |
781 | #define IXL_TX_DESC_DTYPE_MASK(0xfULL << 0) (0xfULL << IXL_TX_DESC_DTYPE_SHIFT0) |
782 | #define IXL_TX_DESC_DTYPE_DATA(0x0ULL << 0) (0x0ULL << IXL_TX_DESC_DTYPE_SHIFT0) |
783 | #define IXL_TX_DESC_DTYPE_NOP(0x1ULL << 0) (0x1ULL << IXL_TX_DESC_DTYPE_SHIFT0) |
784 | #define IXL_TX_DESC_DTYPE_CONTEXT(0x1ULL << 0) (0x1ULL << IXL_TX_DESC_DTYPE_SHIFT0) |
785 | #define IXL_TX_DESC_DTYPE_FCOE_CTX(0x2ULL << 0) (0x2ULL << IXL_TX_DESC_DTYPE_SHIFT0) |
786 | #define IXL_TX_DESC_DTYPE_FD(0x8ULL << 0) (0x8ULL << IXL_TX_DESC_DTYPE_SHIFT0) |
787 | #define IXL_TX_DESC_DTYPE_DDP_CTX(0x9ULL << 0) (0x9ULL << IXL_TX_DESC_DTYPE_SHIFT0) |
788 | #define IXL_TX_DESC_DTYPE_FLEX_DATA(0xbULL << 0) (0xbULL << IXL_TX_DESC_DTYPE_SHIFT0) |
789 | #define IXL_TX_DESC_DTYPE_FLEX_CTX_1(0xcULL << 0) (0xcULL << IXL_TX_DESC_DTYPE_SHIFT0) |
790 | #define IXL_TX_DESC_DTYPE_FLEX_CTX_2(0xdULL << 0) (0xdULL << IXL_TX_DESC_DTYPE_SHIFT0) |
791 | #define IXL_TX_DESC_DTYPE_DONE(0xfULL << 0) (0xfULL << IXL_TX_DESC_DTYPE_SHIFT0) |
792 | |
793 | #define IXL_TX_DESC_CMD_SHIFT4 4 |
794 | #define IXL_TX_DESC_CMD_MASK(0x3ffULL << 4) (0x3ffULL << IXL_TX_DESC_CMD_SHIFT4) |
795 | #define IXL_TX_DESC_CMD_EOP(0x001 << 4) (0x001 << IXL_TX_DESC_CMD_SHIFT4) |
796 | #define IXL_TX_DESC_CMD_RS(0x002 << 4) (0x002 << IXL_TX_DESC_CMD_SHIFT4) |
797 | #define IXL_TX_DESC_CMD_ICRC(0x004 << 4) (0x004 << IXL_TX_DESC_CMD_SHIFT4) |
798 | #define IXL_TX_DESC_CMD_IL2TAG1(0x008 << 4) (0x008 << IXL_TX_DESC_CMD_SHIFT4) |
799 | #define IXL_TX_DESC_CMD_DUMMY(0x010 << 4) (0x010 << IXL_TX_DESC_CMD_SHIFT4) |
800 | #define IXL_TX_DESC_CMD_IIPT_MASK(0x060 << 4) (0x060 << IXL_TX_DESC_CMD_SHIFT4) |
801 | #define IXL_TX_DESC_CMD_IIPT_NONIP(0x000 << 4) (0x000 << IXL_TX_DESC_CMD_SHIFT4) |
802 | #define IXL_TX_DESC_CMD_IIPT_IPV6(0x020 << 4) (0x020 << IXL_TX_DESC_CMD_SHIFT4) |
803 | #define IXL_TX_DESC_CMD_IIPT_IPV4(0x040 << 4) (0x040 << IXL_TX_DESC_CMD_SHIFT4) |
804 | #define IXL_TX_DESC_CMD_IIPT_IPV4_CSUM(0x060 << 4) (0x060 << IXL_TX_DESC_CMD_SHIFT4) |
805 | #define IXL_TX_DESC_CMD_FCOET(0x080 << 4) (0x080 << IXL_TX_DESC_CMD_SHIFT4) |
806 | #define IXL_TX_DESC_CMD_L4T_EOFT_MASK(0x300 << 4) (0x300 << IXL_TX_DESC_CMD_SHIFT4) |
807 | #define IXL_TX_DESC_CMD_L4T_EOFT_UNK(0x000 << 4) (0x000 << IXL_TX_DESC_CMD_SHIFT4) |
808 | #define IXL_TX_DESC_CMD_L4T_EOFT_TCP(0x100 << 4) (0x100 << IXL_TX_DESC_CMD_SHIFT4) |
809 | #define IXL_TX_DESC_CMD_L4T_EOFT_SCTP(0x200 << 4) (0x200 << IXL_TX_DESC_CMD_SHIFT4) |
810 | #define IXL_TX_DESC_CMD_L4T_EOFT_UDP(0x300 << 4) (0x300 << IXL_TX_DESC_CMD_SHIFT4) |
811 | |
812 | #define IXL_TX_DESC_MACLEN_SHIFT16 16 |
813 | #define IXL_TX_DESC_MACLEN_MASK(0x7fULL << 16) (0x7fULL << IXL_TX_DESC_MACLEN_SHIFT16) |
814 | #define IXL_TX_DESC_IPLEN_SHIFT23 23 |
815 | #define IXL_TX_DESC_IPLEN_MASK(0x7fULL << 23) (0x7fULL << IXL_TX_DESC_IPLEN_SHIFT23) |
816 | #define IXL_TX_DESC_L4LEN_SHIFT30 30 |
817 | #define IXL_TX_DESC_L4LEN_MASK(0xfULL << 30) (0xfULL << IXL_TX_DESC_L4LEN_SHIFT30) |
818 | #define IXL_TX_DESC_FCLEN_SHIFT30 30 |
819 | #define IXL_TX_DESC_FCLEN_MASK(0xfULL << 30) (0xfULL << IXL_TX_DESC_FCLEN_SHIFT30) |
820 | |
821 | #define IXL_TX_DESC_BSIZE_SHIFT34 34 |
822 | #define IXL_TX_DESC_BSIZE_MAX0x3fffULL 0x3fffULL |
823 | #define IXL_TX_DESC_BSIZE_MASK(0x3fffULL << 34) \ |
824 | (IXL_TX_DESC_BSIZE_MAX0x3fffULL << IXL_TX_DESC_BSIZE_SHIFT34) |
825 | } __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16))); |
826 | |
827 | struct ixl_rx_rd_desc_16 { |
828 | uint64_t paddr; /* packet addr */ |
829 | uint64_t haddr; /* header addr */ |
830 | } __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16))); |
831 | |
832 | struct ixl_rx_rd_desc_32 { |
833 | uint64_t paddr; /* packet addr */ |
834 | uint64_t haddr; /* header addr */ |
835 | uint64_t _reserved1; |
836 | uint64_t _reserved2; |
837 | } __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16))); |
838 | |
839 | struct ixl_rx_wb_desc_16 { |
840 | uint32_t _reserved1; |
841 | uint32_t filter_status; |
842 | uint64_t qword1; |
843 | #define IXL_RX_DESC_DD(1 << 0) (1 << 0) |
844 | #define IXL_RX_DESC_EOP(1 << 1) (1 << 1) |
845 | #define IXL_RX_DESC_L2TAG1P(1 << 2) (1 << 2) |
846 | #define IXL_RX_DESC_L3L4P(1 << 3) (1 << 3) |
847 | #define IXL_RX_DESC_CRCP(1 << 4) (1 << 4) |
848 | #define IXL_RX_DESC_TSYNINDX_SHIFT5 5 /* TSYNINDX */ |
849 | #define IXL_RX_DESC_TSYNINDX_MASK(7 << 5) (7 << IXL_RX_DESC_TSYNINDX_SHIFT5) |
850 | #define IXL_RX_DESC_UMB_SHIFT9 9 |
851 | #define IXL_RX_DESC_UMB_MASK(0x3 << 9) (0x3 << IXL_RX_DESC_UMB_SHIFT9) |
852 | #define IXL_RX_DESC_UMB_UCAST(0x0 << 9) (0x0 << IXL_RX_DESC_UMB_SHIFT9) |
853 | #define IXL_RX_DESC_UMB_MCAST(0x1 << 9) (0x1 << IXL_RX_DESC_UMB_SHIFT9) |
854 | #define IXL_RX_DESC_UMB_BCAST(0x2 << 9) (0x2 << IXL_RX_DESC_UMB_SHIFT9) |
855 | #define IXL_RX_DESC_UMB_MIRROR(0x3 << 9) (0x3 << IXL_RX_DESC_UMB_SHIFT9) |
856 | #define IXL_RX_DESC_FLM(1 << 11) (1 << 11) |
857 | #define IXL_RX_DESC_FLTSTAT_SHIFT12 12 |
858 | #define IXL_RX_DESC_FLTSTAT_MASK(0x3 << 12) (0x3 << IXL_RX_DESC_FLTSTAT_SHIFT12) |
859 | #define IXL_RX_DESC_FLTSTAT_NODATA(0x0 << 12) (0x0 << IXL_RX_DESC_FLTSTAT_SHIFT12) |
860 | #define IXL_RX_DESC_FLTSTAT_FDFILTID(0x1 << 12) (0x1 << IXL_RX_DESC_FLTSTAT_SHIFT12) |
861 | #define IXL_RX_DESC_FLTSTAT_RSS(0x3 << 12) (0x3 << IXL_RX_DESC_FLTSTAT_SHIFT12) |
862 | #define IXL_RX_DESC_LPBK(1 << 14) (1 << 14) |
863 | #define IXL_RX_DESC_IPV6EXTADD(1 << 15) (1 << 15) |
864 | #define IXL_RX_DESC_INT_UDP_0(1 << 18) (1 << 18) |
865 | |
866 | #define IXL_RX_DESC_RXE(1 << 19) (1 << 19) |
867 | #define IXL_RX_DESC_HBO(1 << 21) (1 << 21) |
868 | #define IXL_RX_DESC_IPE(1 << 22) (1 << 22) |
869 | #define IXL_RX_DESC_L4E(1 << 23) (1 << 23) |
870 | #define IXL_RX_DESC_EIPE(1 << 24) (1 << 24) |
871 | #define IXL_RX_DESC_OVERSIZE(1 << 25) (1 << 25) |
872 | |
873 | #define IXL_RX_DESC_PTYPE_SHIFT30 30 |
874 | #define IXL_RX_DESC_PTYPE_MASK(0xffULL << 30) (0xffULL << IXL_RX_DESC_PTYPE_SHIFT30) |
875 | |
876 | #define IXL_RX_DESC_PLEN_SHIFT38 38 |
877 | #define IXL_RX_DESC_PLEN_MASK(0x3fffULL << 38) (0x3fffULL << IXL_RX_DESC_PLEN_SHIFT38) |
878 | #define IXL_RX_DESC_HLEN_SHIFT42 42 |
879 | #define IXL_RX_DESC_HLEN_MASK(0x7ffULL << 42) (0x7ffULL << IXL_RX_DESC_HLEN_SHIFT42) |
880 | } __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16))); |
881 | |
882 | struct ixl_rx_wb_desc_32 { |
883 | uint64_t qword0; |
884 | uint64_t qword1; |
885 | uint64_t qword2; |
886 | uint64_t qword3; |
887 | } __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16))); |
888 | |
889 | #define IXL_TX_PKT_DESCS8 8 |
890 | #define IXL_TX_QUEUE_ALIGN128 128 |
891 | #define IXL_RX_QUEUE_ALIGN128 128 |
892 | |
893 | #define IXL_HARDMTU9712 9712 /* 9726 - ETHER_HDR_LEN */ |
894 | |
895 | #define IXL_PCIREG0x10 PCI_MAPREG_START0x10 |
896 | |
897 | #define IXL_ITR00x0 0x0 |
898 | #define IXL_ITR10x1 0x1 |
899 | #define IXL_ITR20x2 0x2 |
900 | #define IXL_NOITR0x2 0x2 |
901 | |
902 | #define IXL_AQ_NUM256 256 |
903 | #define IXL_AQ_MASK(256 - 1) (IXL_AQ_NUM256 - 1) |
904 | #define IXL_AQ_ALIGN64 64 /* lol */ |
905 | #define IXL_AQ_BUFLEN4096 4096 |
906 | |
907 | /* Packet Classifier Types for filters */ |
908 | /* bits 0-28 are reserved for future use */ |
909 | #define IXL_PCT_NONF_IPV4_UDP_UCAST(1ULL << 29) (1ULL << 29) /* 722 */ |
910 | #define IXL_PCT_NONF_IPV4_UDP_MCAST(1ULL << 30) (1ULL << 30) /* 722 */ |
911 | #define IXL_PCT_NONF_IPV4_UDP(1ULL << 31) (1ULL << 31) |
912 | #define IXL_PCT_NONF_IPV4_TCP_SYN_NOACK(1ULL << 32) (1ULL << 32) /* 722 */ |
913 | #define IXL_PCT_NONF_IPV4_TCP(1ULL << 33) (1ULL << 33) |
914 | #define IXL_PCT_NONF_IPV4_SCTP(1ULL << 34) (1ULL << 34) |
915 | #define IXL_PCT_NONF_IPV4_OTHER(1ULL << 35) (1ULL << 35) |
916 | #define IXL_PCT_FRAG_IPV4(1ULL << 36) (1ULL << 36) |
917 | /* bits 37-38 are reserved for future use */ |
918 | #define IXL_PCT_NONF_IPV6_UDP_UCAST(1ULL << 39) (1ULL << 39) /* 722 */ |
919 | #define IXL_PCT_NONF_IPV6_UDP_MCAST(1ULL << 40) (1ULL << 40) /* 722 */ |
920 | #define IXL_PCT_NONF_IPV6_UDP(1ULL << 41) (1ULL << 41) |
921 | #define IXL_PCT_NONF_IPV6_TCP_SYN_NOACK(1ULL << 42) (1ULL << 42) /* 722 */ |
922 | #define IXL_PCT_NONF_IPV6_TCP(1ULL << 43) (1ULL << 43) |
923 | #define IXL_PCT_NONF_IPV6_SCTP(1ULL << 44) (1ULL << 44) |
924 | #define IXL_PCT_NONF_IPV6_OTHER(1ULL << 45) (1ULL << 45) |
925 | #define IXL_PCT_FRAG_IPV6(1ULL << 46) (1ULL << 46) |
926 | /* bit 47 is reserved for future use */ |
927 | #define IXL_PCT_FCOE_OX(1ULL << 48) (1ULL << 48) |
928 | #define IXL_PCT_FCOE_RX(1ULL << 49) (1ULL << 49) |
929 | #define IXL_PCT_FCOE_OTHER(1ULL << 50) (1ULL << 50) |
930 | /* bits 51-62 are reserved for future use */ |
931 | #define IXL_PCT_L2_PAYLOAD(1ULL << 63) (1ULL << 63) |
932 | |
933 | #define IXL_RSS_HENA_BASE_DEFAULT(1ULL << 31) | (1ULL << 33) | (1ULL << 34) | (1ULL << 35) | (1ULL << 36) | (1ULL << 41) | (1ULL << 43) | (1ULL << 44) | (1ULL << 45 ) | (1ULL << 46) | (1ULL << 63) \ |
934 | IXL_PCT_NONF_IPV4_UDP(1ULL << 31) | \ |
935 | IXL_PCT_NONF_IPV4_TCP(1ULL << 33) | \ |
936 | IXL_PCT_NONF_IPV4_SCTP(1ULL << 34) | \ |
937 | IXL_PCT_NONF_IPV4_OTHER(1ULL << 35) | \ |
938 | IXL_PCT_FRAG_IPV4(1ULL << 36) | \ |
939 | IXL_PCT_NONF_IPV6_UDP(1ULL << 41) | \ |
940 | IXL_PCT_NONF_IPV6_TCP(1ULL << 43) | \ |
941 | IXL_PCT_NONF_IPV6_SCTP(1ULL << 44) | \ |
942 | IXL_PCT_NONF_IPV6_OTHER(1ULL << 45) | \ |
943 | IXL_PCT_FRAG_IPV6(1ULL << 46) | \ |
944 | IXL_PCT_L2_PAYLOAD(1ULL << 63) |
945 | |
946 | #define IXL_RSS_HENA_BASE_710(1ULL << 31) | (1ULL << 33) | (1ULL << 34) | (1ULL << 35) | (1ULL << 36) | (1ULL << 41) | (1ULL << 43) | (1ULL << 44) | (1ULL << 45 ) | (1ULL << 46) | (1ULL << 63) IXL_RSS_HENA_BASE_DEFAULT(1ULL << 31) | (1ULL << 33) | (1ULL << 34) | (1ULL << 35) | (1ULL << 36) | (1ULL << 41) | (1ULL << 43) | (1ULL << 44) | (1ULL << 45 ) | (1ULL << 46) | (1ULL << 63) |
947 | #define IXL_RSS_HENA_BASE_722(1ULL << 31) | (1ULL << 33) | (1ULL << 34) | (1ULL << 35) | (1ULL << 36) | (1ULL << 41) | (1ULL << 43) | (1ULL << 44) | (1ULL << 45 ) | (1ULL << 46) | (1ULL << 63) | (1ULL << 29 ) | (1ULL << 30) | (1ULL << 39) | (1ULL << 40 ) | (1ULL << 32) | (1ULL << 42) IXL_RSS_HENA_BASE_DEFAULT(1ULL << 31) | (1ULL << 33) | (1ULL << 34) | (1ULL << 35) | (1ULL << 36) | (1ULL << 41) | (1ULL << 43) | (1ULL << 44) | (1ULL << 45 ) | (1ULL << 46) | (1ULL << 63) | \ |
948 | IXL_PCT_NONF_IPV4_UDP_UCAST(1ULL << 29) | \ |
949 | IXL_PCT_NONF_IPV4_UDP_MCAST(1ULL << 30) | \ |
950 | IXL_PCT_NONF_IPV6_UDP_UCAST(1ULL << 39) | \ |
951 | IXL_PCT_NONF_IPV6_UDP_MCAST(1ULL << 40) | \ |
952 | IXL_PCT_NONF_IPV4_TCP_SYN_NOACK(1ULL << 32) | \ |
953 | IXL_PCT_NONF_IPV6_TCP_SYN_NOACK(1ULL << 42) |
954 | |
955 | #define IXL_HMC_ROUNDUP512 512 |
956 | #define IXL_HMC_PGSIZE4096 4096 |
957 | #define IXL_HMC_DVASZsizeof(uint64_t) sizeof(uint64_t) |
958 | #define IXL_HMC_PGS(4096 / sizeof(uint64_t)) (IXL_HMC_PGSIZE4096 / IXL_HMC_DVASZsizeof(uint64_t)) |
959 | #define IXL_HMC_L2SZ(4096 * (4096 / sizeof(uint64_t))) (IXL_HMC_PGSIZE4096 * IXL_HMC_PGS(4096 / sizeof(uint64_t))) |
960 | #define IXL_HMC_PDVALID1ULL 1ULL |
961 | |
962 | struct ixl_aq_regs { |
963 | bus_size_t atq_tail; |
964 | bus_size_t atq_head; |
965 | bus_size_t atq_len; |
966 | bus_size_t atq_bal; |
967 | bus_size_t atq_bah; |
968 | |
969 | bus_size_t arq_tail; |
970 | bus_size_t arq_head; |
971 | bus_size_t arq_len; |
972 | bus_size_t arq_bal; |
973 | bus_size_t arq_bah; |
974 | |
975 | uint32_t atq_len_enable; |
976 | uint32_t atq_tail_mask; |
977 | uint32_t atq_head_mask; |
978 | |
979 | uint32_t arq_len_enable; |
980 | uint32_t arq_tail_mask; |
981 | uint32_t arq_head_mask; |
982 | }; |
983 | |
984 | struct ixl_phy_type { |
985 | uint64_t phy_type; |
986 | uint64_t ifm_type; |
987 | }; |
988 | |
989 | struct ixl_speed_type { |
990 | uint8_t dev_speed; |
991 | uint64_t net_speed; |
992 | }; |
993 | |
994 | struct ixl_aq_buf { |
995 | SIMPLEQ_ENTRY(ixl_aq_buf)struct { struct ixl_aq_buf *sqe_next; } |
996 | aqb_entry; |
997 | void *aqb_data; |
998 | bus_dmamap_t aqb_map; |
999 | }; |
1000 | SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf)struct ixl_aq_bufs { struct ixl_aq_buf *sqh_first; struct ixl_aq_buf **sqh_last; }; |
1001 | |
1002 | struct ixl_dmamem { |
1003 | bus_dmamap_t ixm_map; |
1004 | bus_dma_segment_t ixm_seg; |
1005 | int ixm_nsegs; |
1006 | size_t ixm_size; |
1007 | caddr_t ixm_kva; |
1008 | }; |
1009 | #define IXL_DMA_MAP(_ixm)((_ixm)->ixm_map) ((_ixm)->ixm_map) |
1010 | #define IXL_DMA_DVA(_ixm)((_ixm)->ixm_map->dm_segs[0].ds_addr) ((_ixm)->ixm_map->dm_segs[0].ds_addr) |
1011 | #define IXL_DMA_KVA(_ixm)((void *)(_ixm)->ixm_kva) ((void *)(_ixm)->ixm_kva) |
1012 | #define IXL_DMA_LEN(_ixm)((_ixm)->ixm_size) ((_ixm)->ixm_size) |
1013 | |
1014 | struct ixl_hmc_entry { |
1015 | uint64_t hmc_base; |
1016 | uint32_t hmc_count; |
1017 | uint32_t hmc_size; |
1018 | }; |
1019 | |
1020 | #define IXL_HMC_LAN_TX0 0 |
1021 | #define IXL_HMC_LAN_RX1 1 |
1022 | #define IXL_HMC_FCOE_CTX2 2 |
1023 | #define IXL_HMC_FCOE_FILTER3 3 |
1024 | #define IXL_HMC_COUNT4 4 |
1025 | |
1026 | struct ixl_hmc_pack { |
1027 | uint16_t offset; |
1028 | uint16_t width; |
1029 | uint16_t lsb; |
1030 | }; |
1031 | |
1032 | /* |
1033 | * these hmc objects have weird sizes and alignments, so these are abstract |
1034 | * representations of them that are nice for c to populate. |
1035 | * |
1036 | * the packing code relies on little-endian values being stored in the fields, |
1037 | * no high bits in the fields being set, and the fields must be packed in the |
1038 | * same order as they are in the ctx structure. |
1039 | */ |
1040 | |
1041 | struct ixl_hmc_rxq { |
1042 | uint16_t head; |
1043 | uint8_t cpuid; |
1044 | uint64_t base; |
1045 | #define IXL_HMC_RXQ_BASE_UNIT128 128 |
1046 | uint16_t qlen; |
1047 | uint16_t dbuff; |
1048 | #define IXL_HMC_RXQ_DBUFF_UNIT128 128 |
1049 | uint8_t hbuff; |
1050 | #define IXL_HMC_RXQ_HBUFF_UNIT64 64 |
1051 | uint8_t dtype; |
1052 | #define IXL_HMC_RXQ_DTYPE_NOSPLIT0x0 0x0 |
1053 | #define IXL_HMC_RXQ_DTYPE_HSPLIT0x1 0x1 |
1054 | #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS0x2 0x2 |
1055 | uint8_t dsize; |
1056 | #define IXL_HMC_RXQ_DSIZE_160 0 |
1057 | #define IXL_HMC_RXQ_DSIZE_321 1 |
1058 | uint8_t crcstrip; |
1059 | uint8_t fc_ena; |
1060 | uint8_t l2sel; |
1061 | uint8_t hsplit_0; |
1062 | uint8_t hsplit_1; |
1063 | uint8_t showiv; |
1064 | uint16_t rxmax; |
1065 | uint8_t tphrdesc_ena; |
1066 | uint8_t tphwdesc_ena; |
1067 | uint8_t tphdata_ena; |
1068 | uint8_t tphhead_ena; |
1069 | uint8_t lrxqthresh; |
1070 | uint8_t prefena; |
1071 | }; |
1072 | |
1073 | static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = { |
1074 | { offsetof(struct ixl_hmc_rxq, head)__builtin_offsetof(struct ixl_hmc_rxq, head), 13, 0 }, |
1075 | { offsetof(struct ixl_hmc_rxq, cpuid)__builtin_offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 }, |
1076 | { offsetof(struct ixl_hmc_rxq, base)__builtin_offsetof(struct ixl_hmc_rxq, base), 57, 32 }, |
1077 | { offsetof(struct ixl_hmc_rxq, qlen)__builtin_offsetof(struct ixl_hmc_rxq, qlen), 13, 89 }, |
1078 | { offsetof(struct ixl_hmc_rxq, dbuff)__builtin_offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 }, |
1079 | { offsetof(struct ixl_hmc_rxq, hbuff)__builtin_offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 }, |
1080 | { offsetof(struct ixl_hmc_rxq, dtype)__builtin_offsetof(struct ixl_hmc_rxq, dtype), 2, 114 }, |
1081 | { offsetof(struct ixl_hmc_rxq, dsize)__builtin_offsetof(struct ixl_hmc_rxq, dsize), 1, 116 }, |
1082 | { offsetof(struct ixl_hmc_rxq, crcstrip)__builtin_offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 }, |
1083 | { offsetof(struct ixl_hmc_rxq, fc_ena)__builtin_offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 }, |
1084 | { offsetof(struct ixl_hmc_rxq, l2sel)__builtin_offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 }, |
1085 | { offsetof(struct ixl_hmc_rxq, hsplit_0)__builtin_offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 }, |
1086 | { offsetof(struct ixl_hmc_rxq, hsplit_1)__builtin_offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 }, |
1087 | { offsetof(struct ixl_hmc_rxq, showiv)__builtin_offsetof(struct ixl_hmc_rxq, showiv), 1, 127 }, |
1088 | { offsetof(struct ixl_hmc_rxq, rxmax)__builtin_offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 }, |
1089 | { offsetof(struct ixl_hmc_rxq, tphrdesc_ena)__builtin_offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 }, |
1090 | { offsetof(struct ixl_hmc_rxq, tphwdesc_ena)__builtin_offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 }, |
1091 | { offsetof(struct ixl_hmc_rxq, tphdata_ena)__builtin_offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 }, |
1092 | { offsetof(struct ixl_hmc_rxq, tphhead_ena)__builtin_offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 }, |
1093 | { offsetof(struct ixl_hmc_rxq, lrxqthresh)__builtin_offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 }, |
1094 | { offsetof(struct ixl_hmc_rxq, prefena)__builtin_offsetof(struct ixl_hmc_rxq, prefena), 1, 201 }, |
1095 | }; |
1096 | |
1097 | #define IXL_HMC_RXQ_MINSIZE(201 + 1) (201 + 1) |
1098 | |
1099 | struct ixl_hmc_txq { |
1100 | uint16_t head; |
1101 | uint8_t new_context; |
1102 | uint64_t base; |
1103 | #define IXL_HMC_TXQ_BASE_UNIT128 128 |
1104 | uint8_t fc_ena; |
1105 | uint8_t timesync_ena; |
1106 | uint8_t fd_ena; |
1107 | uint8_t alt_vlan_ena; |
1108 | uint16_t thead_wb; |
1109 | uint8_t cpuid; |
1110 | uint8_t head_wb_ena; |
1111 | #define IXL_HMC_TXQ_DESC_WB0 0 |
1112 | #define IXL_HMC_TXQ_HEAD_WB1 1 |
1113 | uint16_t qlen; |
1114 | uint8_t tphrdesc_ena; |
1115 | uint8_t tphrpacket_ena; |
1116 | uint8_t tphwdesc_ena; |
1117 | uint64_t head_wb_addr; |
1118 | uint32_t crc; |
1119 | uint16_t rdylist; |
1120 | uint8_t rdylist_act; |
1121 | }; |
1122 | |
1123 | static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = { |
1124 | { offsetof(struct ixl_hmc_txq, head)__builtin_offsetof(struct ixl_hmc_txq, head), 13, 0 }, |
1125 | { offsetof(struct ixl_hmc_txq, new_context)__builtin_offsetof(struct ixl_hmc_txq, new_context), 1, 30 }, |
1126 | { offsetof(struct ixl_hmc_txq, base)__builtin_offsetof(struct ixl_hmc_txq, base), 57, 32 }, |
1127 | { offsetof(struct ixl_hmc_txq, fc_ena)__builtin_offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 }, |
1128 | { offsetof(struct ixl_hmc_txq, timesync_ena)__builtin_offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 }, |
1129 | { offsetof(struct ixl_hmc_txq, fd_ena)__builtin_offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 }, |
1130 | { offsetof(struct ixl_hmc_txq, alt_vlan_ena)__builtin_offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 }, |
1131 | { offsetof(struct ixl_hmc_txq, cpuid)__builtin_offsetof(struct ixl_hmc_txq, cpuid), 8, 96 }, |
1132 | /* line 1 */ |
1133 | { offsetof(struct ixl_hmc_txq, thead_wb)__builtin_offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 }, |
1134 | { offsetof(struct ixl_hmc_txq, head_wb_ena)__builtin_offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 }, |
1135 | { offsetof(struct ixl_hmc_txq, qlen)__builtin_offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 }, |
1136 | { offsetof(struct ixl_hmc_txq, tphrdesc_ena)__builtin_offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 }, |
1137 | { offsetof(struct ixl_hmc_txq, tphrpacket_ena)__builtin_offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 }, |
1138 | { offsetof(struct ixl_hmc_txq, tphwdesc_ena)__builtin_offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 }, |
1139 | { offsetof(struct ixl_hmc_txq, head_wb_addr)__builtin_offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 }, |
1140 | /* line 7 */ |
1141 | { offsetof(struct ixl_hmc_txq, crc)__builtin_offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) }, |
1142 | { offsetof(struct ixl_hmc_txq, rdylist)__builtin_offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) }, |
1143 | { offsetof(struct ixl_hmc_txq, rdylist_act)__builtin_offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) }, |
1144 | }; |
1145 | |
1146 | #define IXL_HMC_TXQ_MINSIZE(94 + (7*128) + 1) (94 + (7*128) + 1) |
1147 | |
1148 | struct ixl_rss_key { |
1149 | uint32_t key[13]; |
1150 | }; |
1151 | |
1152 | struct ixl_rss_lut_128 { |
1153 | uint32_t entries[128 / sizeof(uint32_t)]; |
1154 | }; |
1155 | |
1156 | struct ixl_rss_lut_512 { |
1157 | uint32_t entries[512 / sizeof(uint32_t)]; |
1158 | }; |
1159 | |
1160 | /* driver structures */ |
1161 | |
1162 | struct ixl_vector; |
1163 | struct ixl_chip; |
1164 | |
1165 | struct ixl_tx_map { |
1166 | struct mbuf *txm_m; |
1167 | bus_dmamap_t txm_map; |
1168 | unsigned int txm_eop; |
1169 | }; |
1170 | |
1171 | struct ixl_tx_ring { |
1172 | struct ixl_softc *txr_sc; |
1173 | struct ixl_vector *txr_vector; |
1174 | struct ifqueue *txr_ifq; |
1175 | |
1176 | unsigned int txr_prod; |
1177 | unsigned int txr_cons; |
1178 | |
1179 | struct ixl_tx_map *txr_maps; |
1180 | struct ixl_dmamem txr_mem; |
1181 | |
1182 | bus_size_t txr_tail; |
1183 | unsigned int txr_qid; |
1184 | } __aligned(CACHE_LINE_SIZE)__attribute__((__aligned__(64))); |
1185 | |
1186 | struct ixl_rx_map { |
1187 | struct mbuf *rxm_m; |
1188 | bus_dmamap_t rxm_map; |
1189 | }; |
1190 | |
1191 | struct ixl_rx_ring { |
1192 | struct ixl_softc *rxr_sc; |
1193 | struct ixl_vector *rxr_vector; |
1194 | struct ifiqueue *rxr_ifiq; |
1195 | |
1196 | struct if_rxring rxr_acct; |
1197 | struct timeout rxr_refill; |
1198 | |
1199 | unsigned int rxr_prod; |
1200 | unsigned int rxr_cons; |
1201 | |
1202 | struct ixl_rx_map *rxr_maps; |
1203 | struct ixl_dmamem rxr_mem; |
1204 | |
1205 | struct mbuf *rxr_m_head; |
1206 | struct mbuf **rxr_m_tail; |
1207 | |
1208 | bus_size_t rxr_tail; |
1209 | unsigned int rxr_qid; |
1210 | } __aligned(CACHE_LINE_SIZE)__attribute__((__aligned__(64))); |
1211 | |
1212 | struct ixl_atq { |
1213 | struct ixl_aq_desc iatq_desc; |
1214 | void *iatq_arg; |
1215 | void (*iatq_fn)(struct ixl_softc *, void *); |
1216 | }; |
1217 | SIMPLEQ_HEAD(ixl_atq_list, ixl_atq)struct ixl_atq_list { struct ixl_atq *sqh_first; struct ixl_atq **sqh_last; }; |
1218 | |
1219 | struct ixl_vector { |
1220 | struct ixl_softc *iv_sc; |
1221 | struct ixl_rx_ring *iv_rxr; |
1222 | struct ixl_tx_ring *iv_txr; |
1223 | int iv_qid; |
1224 | void *iv_ihc; |
1225 | char iv_name[16]; |
1226 | } __aligned(CACHE_LINE_SIZE)__attribute__((__aligned__(64))); |
1227 | |
1228 | struct ixl_softc { |
1229 | struct device sc_dev; |
1230 | const struct ixl_chip *sc_chip; |
1231 | struct arpcom sc_ac; |
1232 | struct ifmedia sc_media; |
1233 | uint64_t sc_media_status; |
1234 | uint64_t sc_media_active; |
1235 | |
1236 | pci_chipset_tag_t sc_pc; |
1237 | pci_intr_handle_t sc_ih; |
1238 | void *sc_ihc; |
1239 | pcitag_t sc_tag; |
1240 | |
1241 | bus_dma_tag_t sc_dmat; |
1242 | bus_space_tag_t sc_memt; |
1243 | bus_space_handle_t sc_memh; |
1244 | bus_size_t sc_mems; |
1245 | |
1246 | uint16_t sc_api_major; |
1247 | uint16_t sc_api_minor; |
1248 | uint8_t sc_pf_id; |
1249 | uint16_t sc_uplink_seid; /* le */ |
1250 | uint16_t sc_downlink_seid; /* le */ |
1251 | uint16_t sc_veb_seid; /* le */ |
1252 | uint16_t sc_vsi_number; /* le */ |
1253 | uint16_t sc_seid; |
1254 | unsigned int sc_base_queue; |
1255 | unsigned int sc_port; |
1256 | |
1257 | struct ixl_dmamem sc_scratch; |
1258 | |
1259 | const struct ixl_aq_regs * |
1260 | sc_aq_regs; |
1261 | |
1262 | struct ixl_dmamem sc_atq; |
1263 | unsigned int sc_atq_prod; |
1264 | unsigned int sc_atq_cons; |
1265 | |
1266 | struct ixl_dmamem sc_arq; |
1267 | struct task sc_arq_task; |
1268 | struct ixl_aq_bufs sc_arq_idle; |
1269 | struct ixl_aq_bufs sc_arq_live; |
1270 | struct if_rxring sc_arq_ring; |
1271 | unsigned int sc_arq_prod; |
1272 | unsigned int sc_arq_cons; |
1273 | |
1274 | struct mutex sc_link_state_mtx; |
1275 | struct task sc_link_state_task; |
1276 | struct ixl_atq sc_link_state_atq; |
1277 | |
1278 | struct ixl_dmamem sc_hmc_sd; |
1279 | struct ixl_dmamem sc_hmc_pd; |
1280 | struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT4]; |
1281 | |
1282 | unsigned int sc_tx_ring_ndescs; |
1283 | unsigned int sc_rx_ring_ndescs; |
1284 | unsigned int sc_nqueues; /* 1 << sc_nqueues */ |
1285 | |
1286 | struct intrmap *sc_intrmap; |
1287 | struct ixl_vector *sc_vectors; |
1288 | |
1289 | struct rwlock sc_cfg_lock; |
1290 | unsigned int sc_dead; |
1291 | |
1292 | uint8_t sc_enaddr[ETHER_ADDR_LEN6]; |
1293 | |
1294 | #if NKSTAT0 > 0 |
1295 | struct mutex sc_kstat_mtx; |
1296 | struct timeout sc_kstat_tmo; |
1297 | struct kstat *sc_port_kstat; |
1298 | struct kstat *sc_vsi_kstat; |
1299 | #endif |
1300 | }; |
1301 | #define DEVNAME(_sc)((_sc)->sc_dev.dv_xname) ((_sc)->sc_dev.dv_xname) |
1302 | |
1303 | #define delaymsec(_ms)(*delay_func)(1000 * (_ms)) delay(1000 * (_ms))(*delay_func)(1000 * (_ms)) |
1304 | |
1305 | static void ixl_clear_hw(struct ixl_softc *); |
1306 | static int ixl_pf_reset(struct ixl_softc *); |
1307 | |
1308 | static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *, |
1309 | bus_size_t, u_int); |
1310 | static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *); |
1311 | |
1312 | static int ixl_arq_fill(struct ixl_softc *); |
1313 | static void ixl_arq_unfill(struct ixl_softc *); |
1314 | |
1315 | static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *, |
1316 | unsigned int); |
1317 | static void ixl_atq_set(struct ixl_atq *, |
1318 | void (*)(struct ixl_softc *, void *), void *); |
1319 | static void ixl_atq_post(struct ixl_softc *, struct ixl_atq *); |
1320 | static void ixl_atq_done(struct ixl_softc *); |
1321 | static void ixl_atq_exec(struct ixl_softc *, struct ixl_atq *, |
1322 | const char *); |
1323 | static int ixl_get_version(struct ixl_softc *); |
1324 | static int ixl_pxe_clear(struct ixl_softc *); |
1325 | static int ixl_lldp_shut(struct ixl_softc *); |
1326 | static int ixl_get_mac(struct ixl_softc *); |
1327 | static int ixl_get_switch_config(struct ixl_softc *); |
1328 | static int ixl_phy_mask_ints(struct ixl_softc *); |
1329 | static int ixl_get_phy_types(struct ixl_softc *, uint64_t *); |
1330 | static int ixl_restart_an(struct ixl_softc *); |
1331 | static int ixl_hmc(struct ixl_softc *); |
1332 | static void ixl_hmc_free(struct ixl_softc *); |
1333 | static int ixl_get_vsi(struct ixl_softc *); |
1334 | static int ixl_set_vsi(struct ixl_softc *); |
1335 | static int ixl_get_link_status(struct ixl_softc *); |
1336 | static int ixl_set_link_status(struct ixl_softc *, |
1337 | const struct ixl_aq_desc *); |
1338 | static int ixl_add_macvlan(struct ixl_softc *, uint8_t *, uint16_t, |
1339 | uint16_t); |
1340 | static int ixl_remove_macvlan(struct ixl_softc *, uint8_t *, uint16_t, |
1341 | uint16_t); |
1342 | static void ixl_link_state_update(void *); |
1343 | static void ixl_arq(void *); |
1344 | static void ixl_hmc_pack(void *, const void *, |
1345 | const struct ixl_hmc_pack *, unsigned int); |
1346 | |
1347 | static int ixl_get_sffpage(struct ixl_softc *, struct if_sffpage *); |
1348 | static int ixl_sff_get_byte(struct ixl_softc *, uint8_t, uint32_t, |
1349 | uint8_t *); |
1350 | static int ixl_sff_set_byte(struct ixl_softc *, uint8_t, uint32_t, |
1351 | uint8_t); |
1352 | |
1353 | static int ixl_match(struct device *, void *, void *); |
1354 | static void ixl_attach(struct device *, struct device *, void *); |
1355 | |
1356 | static void ixl_media_add(struct ixl_softc *, uint64_t); |
1357 | static int ixl_media_change(struct ifnet *); |
1358 | static void ixl_media_status(struct ifnet *, struct ifmediareq *); |
1359 | static void ixl_watchdog(struct ifnet *); |
1360 | static int ixl_ioctl(struct ifnet *, u_long, caddr_t); |
1361 | static void ixl_start(struct ifqueue *); |
1362 | static int ixl_intr0(void *); |
1363 | static int ixl_intr_vector(void *); |
1364 | static int ixl_up(struct ixl_softc *); |
1365 | static int ixl_down(struct ixl_softc *); |
1366 | static int ixl_iff(struct ixl_softc *); |
1367 | |
1368 | static struct ixl_tx_ring * |
1369 | ixl_txr_alloc(struct ixl_softc *, unsigned int); |
1370 | static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int); |
1371 | static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *); |
1372 | static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *); |
1373 | static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *); |
1374 | static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *); |
1375 | static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *); |
1376 | static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *); |
1377 | static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *); |
1378 | |
1379 | static struct ixl_rx_ring * |
1380 | ixl_rxr_alloc(struct ixl_softc *, unsigned int); |
1381 | static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *); |
1382 | static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *); |
1383 | static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *); |
1384 | static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *); |
1385 | static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *); |
1386 | static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *); |
1387 | static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *); |
1388 | static void ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *); |
1389 | static void ixl_rxrefill(void *); |
1390 | static int ixl_rxrinfo(struct ixl_softc *, struct if_rxrinfo *); |
1391 | |
1392 | #if NKSTAT0 > 0 |
1393 | static void ixl_kstat_attach(struct ixl_softc *); |
1394 | #endif |
1395 | |
1396 | struct cfdriver ixl_cd = { |
1397 | NULL((void *)0), |
1398 | "ixl", |
1399 | DV_IFNET, |
1400 | }; |
1401 | |
1402 | struct cfattach ixl_ca = { |
1403 | sizeof(struct ixl_softc), |
1404 | ixl_match, |
1405 | ixl_attach, |
1406 | }; |
1407 | |
1408 | static const struct ixl_phy_type ixl_phy_type_map[] = { |
1409 | { 1ULL << IXL_PHY_TYPE_SGMII0x00, IFM_1000_SGMII36 }, |
1410 | { 1ULL << IXL_PHY_TYPE_1000BASE_KX0x01, IFM_1000_KX28 }, |
1411 | { 1ULL << IXL_PHY_TYPE_10GBASE_KX40x02, IFM_10G_KX429 }, |
1412 | { 1ULL << IXL_PHY_TYPE_10GBASE_KR0x03, IFM_10G_KR30 }, |
1413 | { 1ULL << IXL_PHY_TYPE_40GBASE_KR40x04, IFM_40G_KR440 }, |
1414 | { 1ULL << IXL_PHY_TYPE_XAUI0x05 | |
1415 | 1ULL << IXL_PHY_TYPE_XFI0x06, IFM_10G_CX420 }, |
1416 | { 1ULL << IXL_PHY_TYPE_SFI0x07, IFM_10G_SFI37 }, |
1417 | { 1ULL << IXL_PHY_TYPE_XLAUI0x08 | |
1418 | 1ULL << IXL_PHY_TYPE_XLPPI0x09, IFM_40G_XLPPI38 }, |
1419 | { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU0x0a | |
1420 | 1ULL << IXL_PHY_TYPE_40GBASE_CR40x18, IFM_40G_CR425 }, |
1421 | { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU0x0b | |
1422 | 1ULL << IXL_PHY_TYPE_10GBASE_CR10x17, IFM_10G_CR131 }, |
1423 | { 1ULL << IXL_PHY_TYPE_10GBASE_AOC0x0c, IFM_10G_AOC54 }, |
1424 | { 1ULL << IXL_PHY_TYPE_40GBASE_AOC0x0d, IFM_40G_AOC56 }, |
1425 | { 1ULL << IXL_PHY_TYPE_100BASE_TX0x11, IFM_100_TX6 }, |
1426 | { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL0x1d | |
1427 | 1ULL << IXL_PHY_TYPE_1000BASE_T0x12, IFM_1000_T16 }, |
1428 | { 1ULL << IXL_PHY_TYPE_10GBASE_T0x13, IFM_10G_T22 }, |
1429 | { 1ULL << IXL_PHY_TYPE_10GBASE_SR0x14, IFM_10G_SR19 }, |
1430 | { 1ULL << IXL_PHY_TYPE_10GBASE_LR0x15, IFM_10G_LR18 }, |
1431 | { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU0x16, IFM_10G_SFP_CU23 }, |
1432 | { 1ULL << IXL_PHY_TYPE_40GBASE_SR40x19, IFM_40G_SR426 }, |
1433 | { 1ULL << IXL_PHY_TYPE_40GBASE_LR40x1a, IFM_40G_LR427 }, |
1434 | { 1ULL << IXL_PHY_TYPE_1000BASE_SX0x1b, IFM_1000_SX11 }, |
1435 | { 1ULL << IXL_PHY_TYPE_1000BASE_LX0x1c, IFM_1000_LX14 }, |
1436 | { 1ULL << IXL_PHY_TYPE_20GBASE_KR20x1e, IFM_20G_KR232 }, |
1437 | { 1ULL << IXL_PHY_TYPE_25GBASE_KR0x1f, IFM_25G_KR48 }, |
1438 | { 1ULL << IXL_PHY_TYPE_25GBASE_CR0x20, IFM_25G_CR47 }, |
1439 | { 1ULL << IXL_PHY_TYPE_25GBASE_SR0x21, IFM_25G_SR49 }, |
1440 | { 1ULL << IXL_PHY_TYPE_25GBASE_LR0x22, IFM_25G_LR52 }, |
1441 | { 1ULL << IXL_PHY_TYPE_25GBASE_AOC0x23, IFM_25G_AOC55 }, |
1442 | { 1ULL << IXL_PHY_TYPE_25GBASE_ACC0x24, IFM_25G_CR47 }, |
1443 | }; |
1444 | |
1445 | static const struct ixl_speed_type ixl_speed_type_map[] = { |
1446 | { IXL_AQ_LINK_SPEED_40GB(1 << 4), IF_Gbps(40)((((((40) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
1447 | { IXL_AQ_LINK_SPEED_25GB(1 << 6), IF_Gbps(25)((((((25) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
1448 | { IXL_AQ_LINK_SPEED_10GB(1 << 3), IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
1449 | { IXL_AQ_LINK_SPEED_1GB(1 << 2), IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
1450 | }; |
1451 | |
1452 | static const struct ixl_aq_regs ixl_pf_aq_regs = { |
1453 | .atq_tail = I40E_PF_ATQT0x00080400, |
1454 | .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK((0x3FF) << (0)), |
1455 | .atq_head = I40E_PF_ATQH0x00080300, |
1456 | .atq_head_mask = I40E_PF_ATQH_ATQH_MASK((0x3FF) << (0)), |
1457 | .atq_len = I40E_PF_ATQLEN0x00080200, |
1458 | .atq_bal = I40E_PF_ATQBAL0x00080000, |
1459 | .atq_bah = I40E_PF_ATQBAH0x00080100, |
1460 | .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK((0x1) << (31)), |
1461 | |
1462 | .arq_tail = I40E_PF_ARQT0x00080480, |
1463 | .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK((0x3FF) << (0)), |
1464 | .arq_head = I40E_PF_ARQH0x00080380, |
1465 | .arq_head_mask = I40E_PF_ARQH_ARQH_MASK((0x3FF) << (0)), |
1466 | .arq_len = I40E_PF_ARQLEN0x00080280, |
1467 | .arq_bal = I40E_PF_ARQBAL0x00080080, |
1468 | .arq_bah = I40E_PF_ARQBAH0x00080180, |
1469 | .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK((0x1) << (31)), |
1470 | }; |
1471 | |
1472 | #define ixl_rd(_s, _r)(((_s)->sc_memt)->read_4(((_s)->sc_memh), ((_r)))) \ |
1473 | bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))(((_s)->sc_memt)->read_4(((_s)->sc_memh), ((_r)))) |
1474 | #define ixl_wr(_s, _r, _v)(((_s)->sc_memt)->write_4(((_s)->sc_memh), ((_r)), ( (_v)))) \ |
1475 | bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))(((_s)->sc_memt)->write_4(((_s)->sc_memh), ((_r)), ( (_v)))) |
1476 | #define ixl_barrier(_s, _r, _l, _o)bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), ( _l), (_o)) \ |
1477 | bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o)) |
1478 | #define ixl_intr_enable(_s)((((_s))->sc_memt)->write_4((((_s))->sc_memh), ((0x00038480 )), ((((0x1) << (0)) | ((0x1) << (1)) | (0x2 << 3))))) \ |
1479 | ixl_wr((_s), I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_INTENA_MASK | \((((_s))->sc_memt)->write_4((((_s))->sc_memh), ((0x00038480 )), ((((0x1) << (0)) | ((0x1) << (1)) | (0x2 << 3))))) |
1480 | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | \((((_s))->sc_memt)->write_4((((_s))->sc_memh), ((0x00038480 )), ((((0x1) << (0)) | ((0x1) << (1)) | (0x2 << 3))))) |
1481 | (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT))((((_s))->sc_memt)->write_4((((_s))->sc_memh), ((0x00038480 )), ((((0x1) << (0)) | ((0x1) << (1)) | (0x2 << 3))))) |
1482 | |
1483 | #define ixl_nqueues(_sc)(1 << (_sc)->sc_nqueues) (1 << (_sc)->sc_nqueues) |
1484 | |
1485 | #ifdef __LP64__1 |
1486 | #define ixl_dmamem_hi(_ixm)(uint32_t)(((_ixm)->ixm_map->dm_segs[0].ds_addr) >> 32) (uint32_t)(IXL_DMA_DVA(_ixm)((_ixm)->ixm_map->dm_segs[0].ds_addr) >> 32) |
1487 | #else |
1488 | #define ixl_dmamem_hi(_ixm)(uint32_t)(((_ixm)->ixm_map->dm_segs[0].ds_addr) >> 32) 0 |
1489 | #endif |
1490 | |
1491 | #define ixl_dmamem_lo(_ixm)(uint32_t)((_ixm)->ixm_map->dm_segs[0].ds_addr) (uint32_t)IXL_DMA_DVA(_ixm)((_ixm)->ixm_map->dm_segs[0].ds_addr) |
1492 | |
1493 | static inline void |
1494 | ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr) |
1495 | { |
1496 | #ifdef __LP64__1 |
1497 | htolem32(&iaq->iaq_param[2], addr >> 32)(*(__uint32_t *)(&iaq->iaq_param[2]) = ((__uint32_t)(addr >> 32))); |
1498 | #else |
1499 | iaq->iaq_param[2] = htole32(0)((__uint32_t)(0)); |
1500 | #endif |
1501 | htolem32(&iaq->iaq_param[3], addr)(*(__uint32_t *)(&iaq->iaq_param[3]) = ((__uint32_t)(addr ))); |
1502 | } |
1503 | |
1504 | #if _BYTE_ORDER1234 == _BIG_ENDIAN4321 |
1505 | #define HTOLE16(_x)(_x) (uint16_t)(((_x) & 0xff) << 8 | ((_x) & 0xff00) >> 8) |
1506 | #else |
1507 | #define HTOLE16(_x)(_x) (_x) |
1508 | #endif |
1509 | |
1510 | static struct rwlock ixl_sff_lock = RWLOCK_INITIALIZER("ixlsff"){ 0, "ixlsff" }; |
1511 | |
1512 | /* deal with differences between chips */ |
1513 | |
1514 | struct ixl_chip { |
1515 | uint64_t ic_rss_hena; |
1516 | uint32_t (*ic_rd_ctl)(struct ixl_softc *, uint32_t); |
1517 | void (*ic_wr_ctl)(struct ixl_softc *, uint32_t, |
1518 | uint32_t); |
1519 | |
1520 | int (*ic_set_rss_key)(struct ixl_softc *, |
1521 | const struct ixl_rss_key *); |
1522 | int (*ic_set_rss_lut)(struct ixl_softc *, |
1523 | const struct ixl_rss_lut_128 *); |
1524 | }; |
1525 | |
1526 | static inline uint64_t |
1527 | ixl_rss_hena(struct ixl_softc *sc) |
1528 | { |
1529 | return (sc->sc_chip->ic_rss_hena); |
1530 | } |
1531 | |
1532 | static inline uint32_t |
1533 | ixl_rd_ctl(struct ixl_softc *sc, uint32_t r) |
1534 | { |
1535 | return ((*sc->sc_chip->ic_rd_ctl)(sc, r)); |
1536 | } |
1537 | |
1538 | static inline void |
1539 | ixl_wr_ctl(struct ixl_softc *sc, uint32_t r, uint32_t v) |
1540 | { |
1541 | (*sc->sc_chip->ic_wr_ctl)(sc, r, v); |
1542 | } |
1543 | |
1544 | static inline int |
1545 | ixl_set_rss_key(struct ixl_softc *sc, const struct ixl_rss_key *rsskey) |
1546 | { |
1547 | return ((*sc->sc_chip->ic_set_rss_key)(sc, rsskey)); |
1548 | } |
1549 | |
1550 | static inline int |
1551 | ixl_set_rss_lut(struct ixl_softc *sc, const struct ixl_rss_lut_128 *lut) |
1552 | { |
1553 | return ((*sc->sc_chip->ic_set_rss_lut)(sc, lut)); |
1554 | } |
1555 | |
1556 | /* 710 chip specifics */ |
1557 | |
1558 | static uint32_t ixl_710_rd_ctl(struct ixl_softc *, uint32_t); |
1559 | static void ixl_710_wr_ctl(struct ixl_softc *, uint32_t, uint32_t); |
1560 | static int ixl_710_set_rss_key(struct ixl_softc *, |
1561 | const struct ixl_rss_key *); |
1562 | static int ixl_710_set_rss_lut(struct ixl_softc *, |
1563 | const struct ixl_rss_lut_128 *); |
1564 | |
1565 | static const struct ixl_chip ixl_710 = { |
1566 | .ic_rss_hena = IXL_RSS_HENA_BASE_710(1ULL << 31) | (1ULL << 33) | (1ULL << 34) | (1ULL << 35) | (1ULL << 36) | (1ULL << 41) | (1ULL << 43) | (1ULL << 44) | (1ULL << 45 ) | (1ULL << 46) | (1ULL << 63), |
1567 | .ic_rd_ctl = ixl_710_rd_ctl, |
1568 | .ic_wr_ctl = ixl_710_wr_ctl, |
1569 | .ic_set_rss_key = ixl_710_set_rss_key, |
1570 | .ic_set_rss_lut = ixl_710_set_rss_lut, |
1571 | }; |
1572 | |
1573 | /* 722 chip specifics */ |
1574 | |
1575 | static uint32_t ixl_722_rd_ctl(struct ixl_softc *, uint32_t); |
1576 | static void ixl_722_wr_ctl(struct ixl_softc *, uint32_t, uint32_t); |
1577 | static int ixl_722_set_rss_key(struct ixl_softc *, |
1578 | const struct ixl_rss_key *); |
1579 | static int ixl_722_set_rss_lut(struct ixl_softc *, |
1580 | const struct ixl_rss_lut_128 *); |
1581 | |
1582 | static const struct ixl_chip ixl_722 = { |
1583 | .ic_rss_hena = IXL_RSS_HENA_BASE_722(1ULL << 31) | (1ULL << 33) | (1ULL << 34) | (1ULL << 35) | (1ULL << 36) | (1ULL << 41) | (1ULL << 43) | (1ULL << 44) | (1ULL << 45 ) | (1ULL << 46) | (1ULL << 63) | (1ULL << 29 ) | (1ULL << 30) | (1ULL << 39) | (1ULL << 40 ) | (1ULL << 32) | (1ULL << 42), |
1584 | .ic_rd_ctl = ixl_722_rd_ctl, |
1585 | .ic_wr_ctl = ixl_722_wr_ctl, |
1586 | .ic_set_rss_key = ixl_722_set_rss_key, |
1587 | .ic_set_rss_lut = ixl_722_set_rss_lut, |
1588 | }; |
1589 | |
1590 | /* |
1591 | * 710 chips using an older firmware/API use the same ctl ops as |
1592 | * 722 chips. or 722 chips use the same ctl ops as 710 chips in early |
1593 | * firmware/API versions? |
1594 | */ |
1595 | |
1596 | static const struct ixl_chip ixl_710_decrepit = { |
1597 | .ic_rss_hena = IXL_RSS_HENA_BASE_710(1ULL << 31) | (1ULL << 33) | (1ULL << 34) | (1ULL << 35) | (1ULL << 36) | (1ULL << 41) | (1ULL << 43) | (1ULL << 44) | (1ULL << 45 ) | (1ULL << 46) | (1ULL << 63), |
1598 | .ic_rd_ctl = ixl_722_rd_ctl, |
1599 | .ic_wr_ctl = ixl_722_wr_ctl, |
1600 | .ic_set_rss_key = ixl_710_set_rss_key, |
1601 | .ic_set_rss_lut = ixl_710_set_rss_lut, |
1602 | }; |
1603 | |
1604 | /* driver code */ |
1605 | |
1606 | struct ixl_device { |
1607 | const struct ixl_chip *id_chip; |
1608 | pci_vendor_id_t id_vid; |
1609 | pci_product_id_t id_pid; |
1610 | }; |
1611 | |
1612 | static const struct ixl_device ixl_devices[] = { |
1613 | { &ixl_710, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X710_10G_SFP0x1572 }, |
1614 | { &ixl_710, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X710_10G_SFP_20x104e }, |
1615 | { &ixl_710, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_XL710_40G_BP0x1580 }, |
1616 | { &ixl_710, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X710_10G_BP0x1581, }, |
1617 | { &ixl_710, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_XL710_QSFP_10x1583 }, |
1618 | { &ixl_710, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_XL710_QSFP_20x1584 }, |
1619 | { &ixl_710, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X710_10G_QSFP0x1585 }, |
1620 | { &ixl_710, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X710_10G_BASET0x1586 }, |
1621 | { &ixl_710, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_XL710_20G_BP_10x1587 }, |
1622 | { &ixl_710, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_XL710_20G_BP_20x1588 }, |
1623 | { &ixl_710, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X710_T4_10G0x1589 }, |
1624 | { &ixl_710, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_XXV710_25G_BP0x158a }, |
1625 | { &ixl_710, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_XXV710_25G_SFP280x158b, }, |
1626 | { &ixl_710, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X710_10G_T0x15ff, }, |
1627 | { &ixl_722, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X722_10G_KX0x37ce }, |
1628 | { &ixl_722, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X722_10G_QSFP0x37cf }, |
1629 | { &ixl_722, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X722_10G_SFP_10x37d0 }, |
1630 | { &ixl_722, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X722_1G0x37d1 }, |
1631 | { &ixl_722, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X722_10G_T0x37d2 }, |
1632 | { &ixl_722, PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_X722_10G_SFP_20x37d3 }, |
1633 | }; |
1634 | |
1635 | static const struct ixl_device * |
1636 | ixl_device_lookup(struct pci_attach_args *pa) |
1637 | { |
1638 | pci_vendor_id_t vid = PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff); |
1639 | pci_product_id_t pid = PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff); |
1640 | const struct ixl_device *id; |
1641 | unsigned int i; |
1642 | |
1643 | for (i = 0; i < nitems(ixl_devices)(sizeof((ixl_devices)) / sizeof((ixl_devices)[0])); i++) { |
1644 | id = &ixl_devices[i]; |
1645 | if (id->id_vid == vid && id->id_pid == pid) |
1646 | return (id); |
1647 | } |
1648 | |
1649 | return (NULL((void *)0)); |
1650 | } |
1651 | |
1652 | static int |
1653 | ixl_match(struct device *parent, void *match, void *aux) |
1654 | { |
1655 | return (ixl_device_lookup(aux) != NULL((void *)0)); |
1656 | } |
1657 | |
1658 | void |
1659 | ixl_attach(struct device *parent, struct device *self, void *aux) |
1660 | { |
1661 | struct ixl_softc *sc = (struct ixl_softc *)self; |
1662 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
1663 | struct pci_attach_args *pa = aux; |
1664 | pcireg_t memtype; |
1665 | uint32_t port, ari, func; |
1666 | uint64_t phy_types = 0; |
1667 | unsigned int nqueues, i; |
1668 | int tries; |
1669 | |
1670 | rw_init(&sc->sc_cfg_lock, "ixlcfg")_rw_init_flags(&sc->sc_cfg_lock, "ixlcfg", 0, ((void * )0)); |
1671 | |
1672 | sc->sc_chip = ixl_device_lookup(pa)->id_chip; |
1673 | sc->sc_pc = pa->pa_pc; |
1674 | sc->sc_tag = pa->pa_tag; |
1675 | sc->sc_dmat = pa->pa_dmat; |
1676 | sc->sc_aq_regs = &ixl_pf_aq_regs; |
1677 | |
1678 | sc->sc_nqueues = 0; /* 1 << 0 is 1 queue */ |
1679 | sc->sc_tx_ring_ndescs = 1024; |
1680 | sc->sc_rx_ring_ndescs = 1024; |
1681 | |
1682 | memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, IXL_PCIREG0x10); |
1683 | if (pci_mapreg_map(pa, IXL_PCIREG0x10, memtype, 0, |
1684 | &sc->sc_memt, &sc->sc_memh, NULL((void *)0), &sc->sc_mems, 0)) { |
1685 | printf(": unable to map registers\n"); |
1686 | return; |
1687 | } |
1688 | |
1689 | sc->sc_base_queue = (ixl_rd(sc, I40E_PFLAN_QALLOC)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x001C0400 )))) & |
1690 | I40E_PFLAN_QALLOC_FIRSTQ_MASK((0x7FF) << (0))) >> |
1691 | I40E_PFLAN_QALLOC_FIRSTQ_SHIFT0; |
1692 | |
1693 | ixl_clear_hw(sc); |
1694 | if (ixl_pf_reset(sc) == -1) { |
1695 | /* error printed by ixl_pf_reset */ |
1696 | goto unmap; |
1697 | } |
1698 | |
1699 | port = ixl_rd(sc, I40E_PFGEN_PORTNUM)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x001C0480 )))); |
1700 | port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK((0x3) << (0)); |
1701 | port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT0; |
1702 | sc->sc_port = port; |
1703 | printf(": port %u", port); |
1704 | |
1705 | ari = ixl_rd(sc, I40E_GLPCI_CAPSUP)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x000BE4A8 )))); |
1706 | ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK((0x1) << (4)); |
1707 | ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT4; |
1708 | |
1709 | func = ixl_rd(sc, I40E_PF_FUNC_RID)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x0009C000 )))); |
1710 | sc->sc_pf_id = func & (ari ? 0xff : 0x7); |
1711 | |
1712 | /* initialise the adminq */ |
1713 | |
1714 | if (ixl_dmamem_alloc(sc, &sc->sc_atq, |
1715 | sizeof(struct ixl_aq_desc) * IXL_AQ_NUM256, IXL_AQ_ALIGN64) != 0) { |
1716 | printf("\n" "%s: unable to allocate atq\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1717 | goto unmap; |
1718 | } |
1719 | |
1720 | SIMPLEQ_INIT(&sc->sc_arq_idle)do { (&sc->sc_arq_idle)->sqh_first = ((void *)0); ( &sc->sc_arq_idle)->sqh_last = &(&sc->sc_arq_idle )->sqh_first; } while (0); |
1721 | SIMPLEQ_INIT(&sc->sc_arq_live)do { (&sc->sc_arq_live)->sqh_first = ((void *)0); ( &sc->sc_arq_live)->sqh_last = &(&sc->sc_arq_live )->sqh_first; } while (0); |
1722 | if_rxr_init(&sc->sc_arq_ring, 2, IXL_AQ_NUM256 - 1); |
1723 | task_set(&sc->sc_arq_task, ixl_arq, sc); |
1724 | sc->sc_arq_cons = 0; |
1725 | sc->sc_arq_prod = 0; |
1726 | |
1727 | if (ixl_dmamem_alloc(sc, &sc->sc_arq, |
1728 | sizeof(struct ixl_aq_desc) * IXL_AQ_NUM256, IXL_AQ_ALIGN64) != 0) { |
1729 | printf("\n" "%s: unable to allocate arq\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1730 | goto free_atq; |
1731 | } |
1732 | |
1733 | if (!ixl_arq_fill(sc)) { |
1734 | printf("\n" "%s: unable to fill arq descriptors\n", |
1735 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1736 | goto free_arq; |
1737 | } |
1738 | |
1739 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x01|0x04)) |
1740 | 0, IXL_DMA_LEN(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x01|0x04)) |
1741 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x01|0x04)); |
1742 | |
1743 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)-> ixm_size)), (0x01|0x04)) |
1744 | 0, IXL_DMA_LEN(&sc->sc_arq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)-> ixm_size)), (0x01|0x04)) |
1745 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)-> ixm_size)), (0x01|0x04)); |
1746 | |
1747 | for (tries = 0; tries < 10; tries++) { |
1748 | int rv; |
1749 | |
1750 | sc->sc_atq_cons = 0; |
1751 | sc->sc_atq_prod = 0; |
1752 | |
1753 | ixl_wr(sc, sc->sc_aq_regs->atq_head, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->atq_head)), ((0)))); |
1754 | ixl_wr(sc, sc->sc_aq_regs->arq_head, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->arq_head)), ((0)))); |
1755 | ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->atq_tail)), ((0)))); |
1756 | ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->arq_tail)), ((0)))); |
1757 | |
1758 | ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE)bus_space_barrier((sc)->sc_memt, (sc)->sc_memh, (0), (sc ->sc_mems), (0x02)); |
1759 | |
1760 | ixl_wr(sc, sc->sc_aq_regs->atq_bal,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->atq_bal)), (((uint32_t)((&sc->sc_atq)-> ixm_map->dm_segs[0].ds_addr))))) |
1761 | ixl_dmamem_lo(&sc->sc_atq))(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->atq_bal)), (((uint32_t)((&sc->sc_atq)-> ixm_map->dm_segs[0].ds_addr))))); |
1762 | ixl_wr(sc, sc->sc_aq_regs->atq_bah,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->atq_bah)), (((uint32_t)(((&sc->sc_atq)-> ixm_map->dm_segs[0].ds_addr) >> 32))))) |
1763 | ixl_dmamem_hi(&sc->sc_atq))(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->atq_bah)), (((uint32_t)(((&sc->sc_atq)-> ixm_map->dm_segs[0].ds_addr) >> 32))))); |
1764 | ixl_wr(sc, sc->sc_aq_regs->atq_len,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->atq_len)), ((sc->sc_aq_regs->atq_len_enable | 256)))) |
1765 | sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->atq_len)), ((sc->sc_aq_regs->atq_len_enable | 256)))); |
1766 | |
1767 | ixl_wr(sc, sc->sc_aq_regs->arq_bal,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->arq_bal)), (((uint32_t)((&sc->sc_arq)-> ixm_map->dm_segs[0].ds_addr))))) |
1768 | ixl_dmamem_lo(&sc->sc_arq))(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->arq_bal)), (((uint32_t)((&sc->sc_arq)-> ixm_map->dm_segs[0].ds_addr))))); |
1769 | ixl_wr(sc, sc->sc_aq_regs->arq_bah,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->arq_bah)), (((uint32_t)(((&sc->sc_arq)-> ixm_map->dm_segs[0].ds_addr) >> 32))))) |
1770 | ixl_dmamem_hi(&sc->sc_arq))(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->arq_bah)), (((uint32_t)(((&sc->sc_arq)-> ixm_map->dm_segs[0].ds_addr) >> 32))))); |
1771 | ixl_wr(sc, sc->sc_aq_regs->arq_len,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->arq_len)), ((sc->sc_aq_regs->arq_len_enable | 256)))) |
1772 | sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->arq_len)), ((sc->sc_aq_regs->arq_len_enable | 256)))); |
1773 | |
1774 | rv = ixl_get_version(sc); |
1775 | if (rv == 0) |
1776 | break; |
1777 | if (rv != ETIMEDOUT60) { |
1778 | printf(", unable to get firmware version\n"); |
1779 | goto shutdown; |
1780 | } |
1781 | |
1782 | delaymsec(100)(*delay_func)(1000 * (100)); |
1783 | } |
1784 | |
1785 | ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->arq_tail)), ((sc->sc_arq_prod)))); |
1786 | |
1787 | if (ixl_pxe_clear(sc) != 0) { |
1788 | /* error printed by ixl_pxe_clear */ |
1789 | goto shutdown; |
1790 | } |
1791 | |
1792 | if (ixl_get_mac(sc) != 0) { |
1793 | /* error printed by ixl_get_mac */ |
1794 | goto shutdown; |
1795 | } |
1796 | |
1797 | if (pci_intr_map_msix(pa, 0, &sc->sc_ih) == 0) { |
1798 | int nmsix = pci_intr_msix_count(pa); |
1799 | if (nmsix > 1) { /* we used 1 (the 0th) for the adminq */ |
1800 | nmsix--; |
1801 | |
1802 | sc->sc_intrmap = intrmap_create(&sc->sc_dev, |
1803 | nmsix, IXL_MAX_VECTORS8, INTRMAP_POWEROF2(1 << 0)); |
1804 | nqueues = intrmap_count(sc->sc_intrmap); |
1805 | KASSERT(nqueues > 0)((nqueues > 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_ixl.c" , 1805, "nqueues > 0")); |
1806 | KASSERT(powerof2(nqueues))((((((nqueues)-1)&(nqueues))==0)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/pci/if_ixl.c", 1806, "powerof2(nqueues)") ); |
1807 | sc->sc_nqueues = fls(nqueues) - 1; |
1808 | } |
1809 | } else { |
1810 | if (pci_intr_map_msi(pa, &sc->sc_ih) != 0 && |
1811 | pci_intr_map(pa, &sc->sc_ih) != 0) { |
1812 | printf(", unable to map interrupt\n"); |
1813 | goto shutdown; |
1814 | } |
1815 | } |
1816 | |
1817 | nqueues = ixl_nqueues(sc)(1 << (sc)->sc_nqueues); |
1818 | |
1819 | printf(", %s, %d queue%s, address %s\n", |
1820 | pci_intr_string(sc->sc_pc, sc->sc_ih), ixl_nqueues(sc)(1 << (sc)->sc_nqueues), |
1821 | (nqueues > 1 ? "s" : ""), |
1822 | ether_sprintf(sc->sc_ac.ac_enaddr)); |
1823 | |
1824 | if (ixl_hmc(sc) != 0) { |
1825 | /* error printed by ixl_hmc */ |
1826 | goto shutdown; |
1827 | } |
1828 | |
1829 | if (ixl_lldp_shut(sc) != 0) { |
1830 | /* error printed by ixl_lldp_shut */ |
1831 | goto free_hmc; |
1832 | } |
1833 | |
1834 | if (ixl_phy_mask_ints(sc) != 0) { |
1835 | /* error printed by ixl_phy_mask_ints */ |
1836 | goto free_hmc; |
1837 | } |
1838 | |
1839 | if (ixl_restart_an(sc) != 0) { |
1840 | /* error printed by ixl_restart_an */ |
1841 | goto free_hmc; |
1842 | } |
1843 | |
1844 | if (ixl_get_switch_config(sc) != 0) { |
1845 | /* error printed by ixl_get_switch_config */ |
1846 | goto free_hmc; |
1847 | } |
1848 | |
1849 | if (ixl_get_phy_types(sc, &phy_types) != 0) { |
1850 | /* error printed by ixl_get_phy_abilities */ |
1851 | goto free_hmc; |
1852 | } |
1853 | |
1854 | if (ixl_get_link_status(sc) != 0) { |
1855 | /* error printed by ixl_get_link_status */ |
1856 | goto free_hmc; |
1857 | } |
1858 | |
1859 | if (ixl_dmamem_alloc(sc, &sc->sc_scratch, |
1860 | sizeof(struct ixl_aq_vsi_data), 8) != 0) { |
1861 | printf("%s: unable to allocate scratch buffer\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1862 | goto free_hmc; |
1863 | } |
1864 | |
1865 | if (ixl_get_vsi(sc) != 0) { |
1866 | /* error printed by ixl_get_vsi */ |
1867 | goto free_hmc; |
1868 | } |
1869 | |
1870 | if (ixl_set_vsi(sc) != 0) { |
1871 | /* error printed by ixl_set_vsi */ |
1872 | goto free_scratch; |
1873 | } |
1874 | |
1875 | sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih, |
1876 | IPL_NET0x7 | IPL_MPSAFE0x100, ixl_intr0, sc, DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1877 | if (sc->sc_ihc == NULL((void *)0)) { |
1878 | printf("%s: unable to establish interrupt handler\n", |
1879 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1880 | goto free_scratch; |
1881 | } |
1882 | |
1883 | sc->sc_vectors = mallocarray(sizeof(*sc->sc_vectors), nqueues, |
1884 | M_DEVBUF2, M_WAITOK0x0001|M_CANFAIL0x0004|M_ZERO0x0008); |
1885 | if (sc->sc_vectors == NULL((void *)0)) { |
1886 | printf("%s: unable to allocate vectors\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1887 | goto free_scratch; |
1888 | } |
1889 | |
1890 | for (i = 0; i < nqueues; i++) { |
1891 | struct ixl_vector *iv = &sc->sc_vectors[i]; |
1892 | iv->iv_sc = sc; |
1893 | iv->iv_qid = i; |
1894 | snprintf(iv->iv_name, sizeof(iv->iv_name), |
1895 | "%s:%u", DEVNAME(sc)((sc)->sc_dev.dv_xname), i); /* truncated? */ |
1896 | } |
1897 | |
1898 | if (sc->sc_intrmap) { |
1899 | for (i = 0; i < nqueues; i++) { |
1900 | struct ixl_vector *iv = &sc->sc_vectors[i]; |
1901 | pci_intr_handle_t ih; |
1902 | int v = i + 1; /* 0 is used for adminq */ |
1903 | |
1904 | if (pci_intr_map_msix(pa, v, &ih)) { |
1905 | printf("%s: unable to map msi-x vector %d\n", |
1906 | DEVNAME(sc)((sc)->sc_dev.dv_xname), v); |
1907 | goto free_vectors; |
1908 | } |
1909 | |
1910 | iv->iv_ihc = pci_intr_establish_cpu(sc->sc_pc, ih, |
1911 | IPL_NET0x7 | IPL_MPSAFE0x100, |
1912 | intrmap_cpu(sc->sc_intrmap, i), |
1913 | ixl_intr_vector, iv, iv->iv_name); |
1914 | if (iv->iv_ihc == NULL((void *)0)) { |
1915 | printf("%s: unable to establish interrupt %d\n", |
1916 | DEVNAME(sc)((sc)->sc_dev.dv_xname), v); |
1917 | goto free_vectors; |
1918 | } |
1919 | |
1920 | ixl_wr(sc, I40E_PFINT_DYN_CTLN(i),(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00034800 + ((i) * 4)))), ((((0x1) << (0)) | ((0x1) << (1) ) | (0x2 << 3))))) |
1921 | I40E_PFINT_DYN_CTLN_INTENA_MASK |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00034800 + ((i) * 4)))), ((((0x1) << (0)) | ((0x1) << (1) ) | (0x2 << 3))))) |
1922 | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00034800 + ((i) * 4)))), ((((0x1) << (0)) | ((0x1) << (1) ) | (0x2 << 3))))) |
1923 | (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT))(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00034800 + ((i) * 4)))), ((((0x1) << (0)) | ((0x1) << (1) ) | (0x2 << 3))))); |
1924 | } |
1925 | } |
1926 | |
1927 | /* fixup the chip ops for older fw releases */ |
1928 | if (sc->sc_chip == &ixl_710 && |
1929 | sc->sc_api_major == 1 && sc->sc_api_minor < 5) |
1930 | sc->sc_chip = &ixl_710_decrepit; |
1931 | |
1932 | ifp->if_softc = sc; |
1933 | ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000; |
1934 | ifp->if_xflags = IFXF_MPSAFE0x1; |
1935 | ifp->if_ioctl = ixl_ioctl; |
1936 | ifp->if_qstart = ixl_start; |
1937 | ifp->if_watchdog = ixl_watchdog; |
1938 | ifp->if_hardmtu = IXL_HARDMTU9712; |
1939 | strlcpy(ifp->if_xname, DEVNAME(sc)((sc)->sc_dev.dv_xname), IFNAMSIZ16); |
1940 | ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_ndescs)((&ifp->if_snd)->ifq_maxlen = (sc->sc_tx_ring_ndescs )); |
1941 | |
1942 | ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010; |
1943 | #if 0 |
1944 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020; |
1945 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_CSUM_IPv40x00000001 | IFCAP_CSUM_TCPv40x00000002 | |
1946 | IFCAP_CSUM_UDPv40x00000004; |
1947 | #endif |
1948 | |
1949 | ifmedia_init(&sc->sc_media, 0, ixl_media_change, ixl_media_status); |
1950 | |
1951 | ixl_media_add(sc, phy_types); |
1952 | ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0)); |
1953 | ifmedia_set(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL); |
1954 | |
1955 | if_attach(ifp); |
1956 | ether_ifattach(ifp); |
1957 | |
1958 | if_attach_queues(ifp, nqueues); |
1959 | if_attach_iqueues(ifp, nqueues); |
1960 | |
1961 | mtx_init(&sc->sc_link_state_mtx, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc-> sc_link_state_mtx), ((((0x7)) > 0x0 && ((0x7)) < 0x9) ? 0x9 : ((0x7)))); } while (0); |
1962 | task_set(&sc->sc_link_state_task, ixl_link_state_update, sc); |
1963 | ixl_wr(sc, I40E_PFINT_ICR0_ENA,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00038800 )), ((((0x1) << (25)) | ((0x1) << (30)))))) |
1964 | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00038800 )), ((((0x1) << (25)) | ((0x1) << (30)))))) |
1965 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00038800 )), ((((0x1) << (25)) | ((0x1) << (30)))))); |
1966 | ixl_wr(sc, I40E_PFINT_STAT_CTL0,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00038400 )), ((0x2 << 2)))) |
1967 | IXL_NOITR << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00038400 )), ((0x2 << 2)))); |
1968 | |
1969 | /* remove default mac filter and replace it so we can see vlans */ |
1970 | ixl_remove_macvlan(sc, sc->sc_ac.ac_enaddr, 0, 0); |
1971 | ixl_remove_macvlan(sc, sc->sc_ac.ac_enaddr, 0, |
1972 | IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN0x0008); |
1973 | ixl_add_macvlan(sc, sc->sc_ac.ac_enaddr, 0, |
1974 | IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN0x0004); |
1975 | ixl_add_macvlan(sc, etherbroadcastaddr, 0, |
1976 | IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN0x0004); |
1977 | memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN)__builtin_memcpy((sc->sc_enaddr), (sc->sc_ac.ac_enaddr) , (6)); |
1978 | |
1979 | ixl_intr_enable(sc)((((sc))->sc_memt)->write_4((((sc))->sc_memh), ((0x00038480 )), ((((0x1) << (0)) | ((0x1) << (1)) | (0x2 << 3))))); |
1980 | |
1981 | #if NKSTAT0 > 0 |
1982 | ixl_kstat_attach(sc); |
1983 | #endif |
1984 | |
1985 | return; |
1986 | free_vectors: |
1987 | if (sc->sc_intrmap != NULL((void *)0)) { |
1988 | for (i = 0; i < nqueues; i++) { |
1989 | struct ixl_vector *iv = &sc->sc_vectors[i]; |
1990 | if (iv->iv_ihc == NULL((void *)0)) |
1991 | continue; |
1992 | pci_intr_disestablish(sc->sc_pc, iv->iv_ihc); |
1993 | } |
1994 | } |
1995 | free(sc->sc_vectors, M_DEVBUF2, nqueues * sizeof(*sc->sc_vectors)); |
1996 | free_scratch: |
1997 | ixl_dmamem_free(sc, &sc->sc_scratch); |
1998 | free_hmc: |
1999 | ixl_hmc_free(sc); |
2000 | shutdown: |
2001 | ixl_wr(sc, sc->sc_aq_regs->atq_head, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->atq_head)), ((0)))); |
2002 | ixl_wr(sc, sc->sc_aq_regs->arq_head, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->arq_head)), ((0)))); |
2003 | ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->atq_tail)), ((0)))); |
2004 | ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->arq_tail)), ((0)))); |
2005 | |
2006 | ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->atq_bal)), ((0)))); |
2007 | ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->atq_bah)), ((0)))); |
2008 | ixl_wr(sc, sc->sc_aq_regs->atq_len, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->atq_len)), ((0)))); |
2009 | |
2010 | ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->arq_bal)), ((0)))); |
2011 | ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->arq_bah)), ((0)))); |
2012 | ixl_wr(sc, sc->sc_aq_regs->arq_len, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->arq_len)), ((0)))); |
2013 | |
2014 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)-> ixm_size)), (0x02)) |
2015 | 0, IXL_DMA_LEN(&sc->sc_arq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)-> ixm_size)), (0x02)) |
2016 | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)-> ixm_size)), (0x02)); |
2017 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x02|0x08)) |
2018 | 0, IXL_DMA_LEN(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x02|0x08)) |
2019 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x02|0x08)); |
2020 | |
2021 | ixl_arq_unfill(sc); |
2022 | |
2023 | free_arq: |
2024 | ixl_dmamem_free(sc, &sc->sc_arq); |
2025 | free_atq: |
2026 | ixl_dmamem_free(sc, &sc->sc_atq); |
2027 | unmap: |
2028 | bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); |
2029 | sc->sc_mems = 0; |
2030 | |
2031 | if (sc->sc_intrmap != NULL((void *)0)) |
2032 | intrmap_destroy(sc->sc_intrmap); |
2033 | } |
2034 | |
2035 | static void |
2036 | ixl_media_add(struct ixl_softc *sc, uint64_t phy_types) |
2037 | { |
2038 | struct ifmedia *ifm = &sc->sc_media; |
2039 | const struct ixl_phy_type *itype; |
2040 | unsigned int i; |
2041 | |
2042 | for (i = 0; i < nitems(ixl_phy_type_map)(sizeof((ixl_phy_type_map)) / sizeof((ixl_phy_type_map)[0])); i++) { |
2043 | itype = &ixl_phy_type_map[i]; |
2044 | |
2045 | if (ISSET(phy_types, itype->phy_type)((phy_types) & (itype->phy_type))) |
2046 | ifmedia_add(ifm, IFM_ETHER0x0000000000000100ULL | itype->ifm_type, 0, NULL((void *)0)); |
2047 | } |
2048 | } |
2049 | |
2050 | static int |
2051 | ixl_media_change(struct ifnet *ifp) |
2052 | { |
2053 | /* ignore? */ |
2054 | return (EOPNOTSUPP45); |
2055 | } |
2056 | |
2057 | static void |
2058 | ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifm) |
2059 | { |
2060 | struct ixl_softc *sc = ifp->if_softc; |
2061 | |
2062 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); |
2063 | |
2064 | ifm->ifm_status = sc->sc_media_status; |
2065 | ifm->ifm_active = sc->sc_media_active; |
2066 | } |
2067 | |
2068 | static void |
2069 | ixl_watchdog(struct ifnet *ifp) |
2070 | { |
2071 | |
2072 | } |
2073 | |
2074 | int |
2075 | ixl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) |
2076 | { |
2077 | struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc; |
2078 | struct ifreq *ifr = (struct ifreq *)data; |
2079 | uint8_t addrhi[ETHER_ADDR_LEN6], addrlo[ETHER_ADDR_LEN6]; |
2080 | int aqerror, error = 0; |
2081 | |
2082 | switch (cmd) { |
2083 | case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((12))): |
2084 | ifp->if_flags |= IFF_UP0x1; |
2085 | /* FALLTHROUGH */ |
2086 | |
2087 | case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))): |
2088 | if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) { |
2089 | if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) |
2090 | error = ENETRESET52; |
2091 | else |
2092 | error = ixl_up(sc); |
2093 | } else { |
2094 | if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) |
2095 | error = ixl_down(sc); |
2096 | } |
2097 | break; |
2098 | |
2099 | case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifmediareq) & 0x1fff) << 16) | ((('i')) << 8) | ((56))): |
2100 | case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((55))): |
2101 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); |
2102 | break; |
2103 | |
2104 | case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((170))): |
2105 | error = ixl_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data); |
2106 | break; |
2107 | |
2108 | case SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((49))): |
2109 | if (ether_addmulti(ifr, &sc->sc_ac) == ENETRESET52) { |
2110 | error = ether_multiaddr(&ifr->ifr_addrifr_ifru.ifru_addr, addrlo, addrhi); |
2111 | if (error != 0) |
2112 | return (error); |
2113 | |
2114 | aqerror = ixl_add_macvlan(sc, addrlo, 0, |
2115 | IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN0x0004); |
2116 | if (aqerror == IXL_AQ_RC_ENOSPC16) { |
2117 | ether_delmulti(ifr, &sc->sc_ac); |
2118 | error = ENOSPC28; |
2119 | } |
2120 | |
2121 | if (sc->sc_ac.ac_multirangecnt > 0) { |
2122 | SET(ifp->if_flags, IFF_ALLMULTI)((ifp->if_flags) |= (0x200)); |
2123 | error = ENETRESET52; |
2124 | } |
2125 | } |
2126 | break; |
2127 | |
2128 | case SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((50))): |
2129 | if (ether_delmulti(ifr, &sc->sc_ac) == ENETRESET52) { |
2130 | error = ether_multiaddr(&ifr->ifr_addrifr_ifru.ifru_addr, addrlo, addrhi); |
2131 | if (error != 0) |
2132 | return (error); |
2133 | |
2134 | ixl_remove_macvlan(sc, addrlo, 0, |
2135 | IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN0x0008); |
2136 | |
2137 | if (ISSET(ifp->if_flags, IFF_ALLMULTI)((ifp->if_flags) & (0x200)) && |
2138 | sc->sc_ac.ac_multirangecnt == 0) { |
2139 | CLR(ifp->if_flags, IFF_ALLMULTI)((ifp->if_flags) &= ~(0x200)); |
2140 | error = ENETRESET52; |
2141 | } |
2142 | } |
2143 | break; |
2144 | |
2145 | case SIOCGIFSFFPAGE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct if_sffpage) & 0x1fff) << 16) | ((('i')) << 8) | ((57))): |
2146 | error = rw_enter(&ixl_sff_lock, RW_WRITE0x0001UL|RW_INTR0x0010UL); |
2147 | if (error != 0) |
2148 | break; |
2149 | |
2150 | error = ixl_get_sffpage(sc, (struct if_sffpage *)data); |
2151 | rw_exit(&ixl_sff_lock); |
2152 | break; |
2153 | |
2154 | default: |
2155 | error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); |
2156 | break; |
2157 | } |
2158 | |
2159 | if (error == ENETRESET52) |
2160 | error = ixl_iff(sc); |
2161 | |
2162 | return (error); |
2163 | } |
2164 | |
2165 | static inline void * |
2166 | ixl_hmc_kva(struct ixl_softc *sc, unsigned int type, unsigned int i) |
2167 | { |
2168 | uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd)((void *)(&sc->sc_hmc_pd)->ixm_kva); |
2169 | struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; |
2170 | |
2171 | if (i >= e->hmc_count) |
2172 | return (NULL((void *)0)); |
2173 | |
2174 | kva += e->hmc_base; |
2175 | kva += i * e->hmc_size; |
2176 | |
2177 | return (kva); |
2178 | } |
2179 | |
2180 | static inline size_t |
2181 | ixl_hmc_len(struct ixl_softc *sc, unsigned int type) |
2182 | { |
2183 | struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; |
2184 | |
2185 | return (e->hmc_size); |
2186 | } |
2187 | |
2188 | static int |
2189 | ixl_configure_rss(struct ixl_softc *sc) |
2190 | { |
2191 | struct ixl_rss_key rsskey; |
2192 | struct ixl_rss_lut_128 lut; |
2193 | uint8_t *lute = (uint8_t *)&lut; |
2194 | uint64_t rss_hena; |
2195 | unsigned int i, nqueues; |
2196 | int error; |
2197 | |
2198 | #if 0 |
2199 | /* if we want to do a 512 entry LUT, do this. */ |
2200 | uint32_t v = ixl_rd_ctl(sc, I40E_PFQF_CTL_00x001C0AC0); |
2201 | SET(v, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK)((v) |= (((0x1) << (16)))); |
2202 | ixl_wr_ctl(sc, I40E_PFQF_CTL_00x001C0AC0, v); |
2203 | #endif |
2204 | |
2205 | stoeplitz_to_key(&rsskey, sizeof(rsskey)); |
2206 | |
2207 | nqueues = ixl_nqueues(sc)(1 << (sc)->sc_nqueues); |
2208 | for (i = 0; i < sizeof(lut); i++) { |
2209 | /* |
2210 | * ixl must have a power of 2 rings, so using mod |
2211 | * to populate the table is fine. |
2212 | */ |
2213 | lute[i] = i % nqueues; |
2214 | } |
2215 | |
2216 | error = ixl_set_rss_key(sc, &rsskey); |
2217 | if (error != 0) |
2218 | return (error); |
2219 | |
2220 | rss_hena = (uint64_t)ixl_rd_ctl(sc, I40E_PFQF_HENA(0)(0x00245900 + ((0) * 128))); |
2221 | rss_hena |= (uint64_t)ixl_rd_ctl(sc, I40E_PFQF_HENA(1)(0x00245900 + ((1) * 128))) << 32; |
2222 | rss_hena |= ixl_rss_hena(sc); |
2223 | ixl_wr_ctl(sc, I40E_PFQF_HENA(0)(0x00245900 + ((0) * 128)), rss_hena); |
2224 | ixl_wr_ctl(sc, I40E_PFQF_HENA(1)(0x00245900 + ((1) * 128)), rss_hena >> 32); |
2225 | |
2226 | error = ixl_set_rss_lut(sc, &lut); |
2227 | if (error != 0) |
2228 | return (error); |
2229 | |
2230 | /* nothing to clena up :( */ |
2231 | |
2232 | return (0); |
2233 | } |
2234 | |
2235 | static int |
2236 | ixl_up(struct ixl_softc *sc) |
2237 | { |
2238 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
2239 | struct ifqueue *ifq; |
2240 | struct ifiqueue *ifiq; |
2241 | struct ixl_vector *iv; |
2242 | struct ixl_rx_ring *rxr; |
2243 | struct ixl_tx_ring *txr; |
2244 | unsigned int nqueues, i; |
2245 | uint32_t reg; |
2246 | int rv = ENOMEM12; |
2247 | |
2248 | nqueues = ixl_nqueues(sc)(1 << (sc)->sc_nqueues); |
2249 | |
2250 | rw_enter_write(&sc->sc_cfg_lock); |
2251 | if (sc->sc_dead) { |
2252 | rw_exit_write(&sc->sc_cfg_lock); |
2253 | return (ENXIO6); |
2254 | } |
2255 | |
2256 | /* allocation is the only thing that can fail, so do it up front */ |
2257 | for (i = 0; i < nqueues; i++) { |
2258 | rxr = ixl_rxr_alloc(sc, i); |
2259 | if (rxr == NULL((void *)0)) |
2260 | goto free; |
2261 | |
2262 | txr = ixl_txr_alloc(sc, i); |
2263 | if (txr == NULL((void *)0)) { |
2264 | ixl_rxr_free(sc, rxr); |
2265 | goto free; |
2266 | } |
2267 | |
2268 | /* wire everything together */ |
2269 | iv = &sc->sc_vectors[i]; |
2270 | iv->iv_rxr = rxr; |
2271 | iv->iv_txr = txr; |
2272 | |
2273 | ifq = ifp->if_ifqs[i]; |
2274 | ifq->ifq_softc_ifq_ptr._ifq_softc = txr; |
2275 | txr->txr_ifq = ifq; |
2276 | |
2277 | ifiq = ifp->if_iqs[i]; |
2278 | ifiq->ifiq_softc_ifiq_ptr._ifiq_softc = rxr; |
2279 | rxr->rxr_ifiq = ifiq; |
2280 | } |
2281 | |
2282 | /* XXX wait 50ms from completion of last RX queue disable */ |
2283 | |
2284 | for (i = 0; i < nqueues; i++) { |
2285 | iv = &sc->sc_vectors[i]; |
2286 | rxr = iv->iv_rxr; |
2287 | txr = iv->iv_txr; |
2288 | |
2289 | ixl_txr_qdis(sc, txr, 1); |
2290 | |
2291 | ixl_rxr_config(sc, rxr); |
2292 | ixl_txr_config(sc, txr); |
2293 | |
2294 | ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00104000 + ((i) * 4)))), ((0x2 | (sc->sc_pf_id << 2))))) |
2295 | (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT))(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00104000 + ((i) * 4)))), ((0x2 | (sc->sc_pf_id << 2))))); |
2296 | |
2297 | ixl_wr(sc, rxr->rxr_tail, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((rxr-> rxr_tail)), ((0)))); |
2298 | ixl_rxfill(sc, rxr); |
2299 | |
2300 | reg = ixl_rd(sc, I40E_QRX_ENA(i))(((sc)->sc_memt)->read_4(((sc)->sc_memh), (((0x00120000 + ((i) * 4)))))); |
2301 | SET(reg, I40E_QRX_ENA_QENA_REQ_MASK)((reg) |= (((0x1) << (0)))); |
2302 | ixl_wr(sc, I40E_QRX_ENA(i), reg)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00120000 + ((i) * 4)))), ((reg)))); |
2303 | |
2304 | reg = ixl_rd(sc, I40E_QTX_ENA(i))(((sc)->sc_memt)->read_4(((sc)->sc_memh), (((0x00100000 + ((i) * 4)))))); |
2305 | SET(reg, I40E_QTX_ENA_QENA_REQ_MASK)((reg) |= (((0x1) << (0)))); |
2306 | ixl_wr(sc, I40E_QTX_ENA(i), reg)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00100000 + ((i) * 4)))), ((reg)))); |
2307 | } |
2308 | |
2309 | for (i = 0; i < nqueues; i++) { |
2310 | iv = &sc->sc_vectors[i]; |
2311 | rxr = iv->iv_rxr; |
2312 | txr = iv->iv_txr; |
2313 | |
2314 | if (ixl_rxr_enabled(sc, rxr) != 0) |
2315 | goto down; |
2316 | |
2317 | if (ixl_txr_enabled(sc, txr) != 0) |
2318 | goto down; |
2319 | } |
2320 | |
2321 | ixl_configure_rss(sc); |
2322 | |
2323 | SET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) |= (0x40)); |
2324 | |
2325 | if (sc->sc_intrmap == NULL((void *)0)) { |
2326 | ixl_wr(sc, I40E_PFINT_LNKLST0,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00038500 )), (((0 << 0) | (0x0 << 11))))) |
2327 | (I40E_INTR_NOTX_QUEUE <<(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00038500 )), (((0 << 0) | (0x0 << 11))))) |
2328 | I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00038500 )), (((0 << 0) | (0x0 << 11))))) |
2329 | (I40E_QUEUE_TYPE_RX <<(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00038500 )), (((0 << 0) | (0x0 << 11))))) |
2330 | I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT))(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00038500 )), (((0 << 0) | (0x0 << 11))))); |
2331 | |
2332 | ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE),(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003A000 + ((0) * 4)))), (((0 << 0) | (0x0 << 11) | (0 << 13) | (0 << 16) | (0x1 << 27) | ((0x1) << ( 30)))))) |
2333 | (I40E_INTR_NOTX_INTR << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003A000 + ((0) * 4)))), (((0 << 0) | (0x0 << 11) | (0 << 13) | (0 << 16) | (0x1 << 27) | ((0x1) << ( 30)))))) |
2334 | (I40E_ITR_INDEX_RX << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003A000 + ((0) * 4)))), (((0 << 0) | (0x0 << 11) | (0 << 13) | (0 << 16) | (0x1 << 27) | ((0x1) << ( 30)))))) |
2335 | (I40E_INTR_NOTX_RX_QUEUE <<(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003A000 + ((0) * 4)))), (((0 << 0) | (0x0 << 11) | (0 << 13) | (0 << 16) | (0x1 << 27) | ((0x1) << ( 30)))))) |
2336 | I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003A000 + ((0) * 4)))), (((0 << 0) | (0x0 << 11) | (0 << 13) | (0 << 16) | (0x1 << 27) | ((0x1) << ( 30)))))) |
2337 | (I40E_INTR_NOTX_QUEUE << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003A000 + ((0) * 4)))), (((0 << 0) | (0x0 << 11) | (0 << 13) | (0 << 16) | (0x1 << 27) | ((0x1) << ( 30)))))) |
2338 | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003A000 + ((0) * 4)))), (((0 << 0) | (0x0 << 11) | (0 << 13) | (0 << 16) | (0x1 << 27) | ((0x1) << ( 30)))))) |
2339 | I40E_QINT_RQCTL_CAUSE_ENA_MASK)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003A000 + ((0) * 4)))), (((0 << 0) | (0x0 << 11) | (0 << 13) | (0 << 16) | (0x1 << 27) | ((0x1) << ( 30)))))); |
2340 | |
2341 | ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE),(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((0) * 4)))), (((0 << 0) | (0x1 << 11) | (1 << 13) | (0x7ff << 16) | (0x0 << 27) | ((0x1) << (30)))))) |
2342 | (I40E_INTR_NOTX_INTR << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((0) * 4)))), (((0 << 0) | (0x1 << 11) | (1 << 13) | (0x7ff << 16) | (0x0 << 27) | ((0x1) << (30)))))) |
2343 | (I40E_ITR_INDEX_TX << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((0) * 4)))), (((0 << 0) | (0x1 << 11) | (1 << 13) | (0x7ff << 16) | (0x0 << 27) | ((0x1) << (30)))))) |
2344 | (I40E_INTR_NOTX_TX_QUEUE <<(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((0) * 4)))), (((0 << 0) | (0x1 << 11) | (1 << 13) | (0x7ff << 16) | (0x0 << 27) | ((0x1) << (30)))))) |
2345 | I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((0) * 4)))), (((0 << 0) | (0x1 << 11) | (1 << 13) | (0x7ff << 16) | (0x0 << 27) | ((0x1) << (30)))))) |
2346 | (I40E_QUEUE_TYPE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((0) * 4)))), (((0 << 0) | (0x1 << 11) | (1 << 13) | (0x7ff << 16) | (0x0 << 27) | ((0x1) << (30)))))) |
2347 | (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((0) * 4)))), (((0 << 0) | (0x1 << 11) | (1 << 13) | (0x7ff << 16) | (0x0 << 27) | ((0x1) << (30)))))) |
2348 | I40E_QINT_TQCTL_CAUSE_ENA_MASK)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((0) * 4)))), (((0 << 0) | (0x1 << 11) | (1 << 13) | (0x7ff << 16) | (0x0 << 27) | ((0x1) << (30)))))); |
2349 | } else { |
2350 | /* vector 0 has no queues */ |
2351 | ixl_wr(sc, I40E_PFINT_LNKLST0,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00038500 )), ((0x7ff << 0)))) |
2352 | I40E_QUEUE_TYPE_EOL <<(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00038500 )), ((0x7ff << 0)))) |
2353 | I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00038500 )), ((0x7ff << 0)))); |
2354 | |
2355 | /* queue n is mapped to vector n+1 */ |
2356 | for (i = 0; i < nqueues; i++) { |
2357 | /* LNKLSTN(i) configures vector i+1 */ |
2358 | ixl_wr(sc, I40E_PFINT_LNKLSTN(i),(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00035000 + ((i) * 4)))), (((i << 0) | (0x0 << 11))))) |
2359 | (i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00035000 + ((i) * 4)))), (((i << 0) | (0x0 << 11))))) |
2360 | (I40E_QUEUE_TYPE_RX <<(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00035000 + ((i) * 4)))), (((i << 0) | (0x0 << 11))))) |
2361 | I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT))(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00035000 + ((i) * 4)))), (((i << 0) | (0x0 << 11))))); |
2362 | ixl_wr(sc, I40E_QINT_RQCTL(i),(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003A000 + ((i) * 4)))), ((((i+1) << 0) | (0x0 << 11) | ( i << 16) | (0x1 << 27) | ((0x1) << (30))))) ) |
2363 | ((i+1) << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003A000 + ((i) * 4)))), ((((i+1) << 0) | (0x0 << 11) | ( i << 16) | (0x1 << 27) | ((0x1) << (30))))) ) |
2364 | (I40E_ITR_INDEX_RX <<(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003A000 + ((i) * 4)))), ((((i+1) << 0) | (0x0 << 11) | ( i << 16) | (0x1 << 27) | ((0x1) << (30))))) ) |
2365 | I40E_QINT_RQCTL_ITR_INDX_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003A000 + ((i) * 4)))), ((((i+1) << 0) | (0x0 << 11) | ( i << 16) | (0x1 << 27) | ((0x1) << (30))))) ) |
2366 | (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003A000 + ((i) * 4)))), ((((i+1) << 0) | (0x0 << 11) | ( i << 16) | (0x1 << 27) | ((0x1) << (30))))) ) |
2367 | (I40E_QUEUE_TYPE_TX <<(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003A000 + ((i) * 4)))), ((((i+1) << 0) | (0x0 << 11) | ( i << 16) | (0x1 << 27) | ((0x1) << (30))))) ) |
2368 | I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003A000 + ((i) * 4)))), ((((i+1) << 0) | (0x0 << 11) | ( i << 16) | (0x1 << 27) | ((0x1) << (30))))) ) |
2369 | I40E_QINT_RQCTL_CAUSE_ENA_MASK)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003A000 + ((i) * 4)))), ((((i+1) << 0) | (0x0 << 11) | ( i << 16) | (0x1 << 27) | ((0x1) << (30))))) ); |
2370 | ixl_wr(sc, I40E_QINT_TQCTL(i),(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((i) * 4)))), ((((i+1) << 0) | (0x1 << 11) | ( 0x7ff << 16) | (0x0 << 27) | ((0x1) << (30) ))))) |
2371 | ((i+1) << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((i) * 4)))), ((((i+1) << 0) | (0x1 << 11) | ( 0x7ff << 16) | (0x0 << 27) | ((0x1) << (30) ))))) |
2372 | (I40E_ITR_INDEX_TX <<(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((i) * 4)))), ((((i+1) << 0) | (0x1 << 11) | ( 0x7ff << 16) | (0x0 << 27) | ((0x1) << (30) ))))) |
2373 | I40E_QINT_TQCTL_ITR_INDX_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((i) * 4)))), ((((i+1) << 0) | (0x1 << 11) | ( 0x7ff << 16) | (0x0 << 27) | ((0x1) << (30) ))))) |
2374 | (I40E_QUEUE_TYPE_EOL <<(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((i) * 4)))), ((((i+1) << 0) | (0x1 << 11) | ( 0x7ff << 16) | (0x0 << 27) | ((0x1) << (30) ))))) |
2375 | I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((i) * 4)))), ((((i+1) << 0) | (0x1 << 11) | ( 0x7ff << 16) | (0x0 << 27) | ((0x1) << (30) ))))) |
2376 | (I40E_QUEUE_TYPE_RX <<(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((i) * 4)))), ((((i+1) << 0) | (0x1 << 11) | ( 0x7ff << 16) | (0x0 << 27) | ((0x1) << (30) ))))) |
2377 | I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((i) * 4)))), ((((i+1) << 0) | (0x1 << 11) | ( 0x7ff << 16) | (0x0 << 27) | ((0x1) << (30) ))))) |
2378 | I40E_QINT_TQCTL_CAUSE_ENA_MASK)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((i) * 4)))), ((((i+1) << 0) | (0x1 << 11) | ( 0x7ff << 16) | (0x0 << 27) | ((0x1) << (30) ))))); |
2379 | |
2380 | ixl_wr(sc, I40E_PFINT_ITRN(0, i), 0x7a)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00030000 + ((0) * 2048 + (i) * 4)))), ((0x7a)))); |
2381 | ixl_wr(sc, I40E_PFINT_ITRN(1, i), 0x7a)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00030000 + ((1) * 2048 + (i) * 4)))), ((0x7a)))); |
2382 | ixl_wr(sc, I40E_PFINT_ITRN(2, i), 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00030000 + ((2) * 2048 + (i) * 4)))), ((0)))); |
2383 | } |
2384 | } |
2385 | |
2386 | ixl_wr(sc, I40E_PFINT_ITR0(0), 0x7a)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00038000 + ((0) * 128)))), ((0x7a)))); |
2387 | ixl_wr(sc, I40E_PFINT_ITR0(1), 0x7a)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00038000 + ((1) * 128)))), ((0x7a)))); |
2388 | ixl_wr(sc, I40E_PFINT_ITR0(2), 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00038000 + ((2) * 128)))), ((0)))); |
2389 | |
2390 | rw_exit_write(&sc->sc_cfg_lock); |
2391 | |
2392 | return (ENETRESET52); |
2393 | |
2394 | free: |
2395 | for (i = 0; i < nqueues; i++) { |
2396 | iv = &sc->sc_vectors[i]; |
2397 | rxr = iv->iv_rxr; |
2398 | txr = iv->iv_txr; |
2399 | |
2400 | if (rxr == NULL((void *)0)) { |
2401 | /* |
2402 | * tx and rx get set at the same time, so if one |
2403 | * is NULL, the other is too. |
2404 | */ |
2405 | continue; |
2406 | } |
2407 | |
2408 | ixl_txr_free(sc, txr); |
2409 | ixl_rxr_free(sc, rxr); |
2410 | } |
2411 | rw_exit_write(&sc->sc_cfg_lock); |
2412 | return (rv); |
2413 | down: |
2414 | rw_exit_write(&sc->sc_cfg_lock); |
2415 | ixl_down(sc); |
2416 | return (ETIMEDOUT60); |
2417 | } |
2418 | |
2419 | static int |
2420 | ixl_iff(struct ixl_softc *sc) |
2421 | { |
2422 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
2423 | struct ixl_atq iatq; |
2424 | struct ixl_aq_desc *iaq; |
2425 | struct ixl_aq_vsi_promisc_param *param; |
2426 | |
2427 | if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) |
2428 | return (0); |
2429 | |
2430 | memset(&iatq, 0, sizeof(iatq))__builtin_memset((&iatq), (0), (sizeof(iatq))); |
2431 | |
2432 | iaq = &iatq.iatq_desc; |
2433 | iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC)((__uint16_t)(0x0254)); |
2434 | |
2435 | param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param; |
2436 | param->flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |((__uint16_t)((1 << 2) | (1 << 4))) |
2437 | IXL_AQ_VSI_PROMISC_FLAG_VLAN)((__uint16_t)((1 << 2) | (1 << 4))); |
2438 | if (ISSET(ifp->if_flags, IFF_PROMISC)((ifp->if_flags) & (0x100))) { |
2439 | param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |((__uint16_t)((1 << 0) | (1 << 1))) |
2440 | IXL_AQ_VSI_PROMISC_FLAG_MCAST)((__uint16_t)((1 << 0) | (1 << 1))); |
2441 | } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)((ifp->if_flags) & (0x200))) { |
2442 | param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST)((__uint16_t)((1 << 1))); |
2443 | } |
2444 | param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |((__uint16_t)((1 << 0) | (1 << 1) | (1 << 2 ) | (1 << 4))) |
2445 | IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |((__uint16_t)((1 << 0) | (1 << 1) | (1 << 2 ) | (1 << 4))) |
2446 | IXL_AQ_VSI_PROMISC_FLAG_VLAN)((__uint16_t)((1 << 0) | (1 << 1) | (1 << 2 ) | (1 << 4))); |
2447 | param->seid = sc->sc_seid; |
2448 | |
2449 | ixl_atq_exec(sc, &iatq, "ixliff"); |
2450 | |
2451 | if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)((__uint16_t)(0))) |
2452 | return (EIO5); |
2453 | |
2454 | if (memcmp(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN)__builtin_memcmp((sc->sc_enaddr), (sc->sc_ac.ac_enaddr) , (6)) != 0) { |
2455 | ixl_remove_macvlan(sc, sc->sc_enaddr, 0, |
2456 | IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN0x0008); |
2457 | ixl_add_macvlan(sc, sc->sc_ac.ac_enaddr, 0, |
2458 | IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN0x0004); |
2459 | memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN)__builtin_memcpy((sc->sc_enaddr), (sc->sc_ac.ac_enaddr) , (6)); |
2460 | } |
2461 | return (0); |
2462 | } |
2463 | |
2464 | static int |
2465 | ixl_down(struct ixl_softc *sc) |
2466 | { |
2467 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
2468 | struct ixl_vector *iv; |
2469 | struct ixl_rx_ring *rxr; |
2470 | struct ixl_tx_ring *txr; |
2471 | unsigned int nqueues, i; |
2472 | uint32_t reg; |
2473 | int error = 0; |
2474 | |
2475 | nqueues = ixl_nqueues(sc)(1 << (sc)->sc_nqueues); |
2476 | |
2477 | rw_enter_write(&sc->sc_cfg_lock); |
2478 | |
2479 | CLR(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) &= ~(0x40)); |
2480 | |
2481 | NET_UNLOCK()do { rw_exit_write(&netlock); } while (0); |
2482 | |
2483 | /* mask interrupts */ |
2484 | reg = ixl_rd(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE))(((sc)->sc_memt)->read_4(((sc)->sc_memh), (((0x0003A000 + ((0) * 4)))))); |
2485 | CLR(reg, I40E_QINT_RQCTL_CAUSE_ENA_MASK)((reg) &= ~(((0x1) << (30)))); |
2486 | ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE), reg)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003A000 + ((0) * 4)))), ((reg)))); |
2487 | |
2488 | reg = ixl_rd(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE))(((sc)->sc_memt)->read_4(((sc)->sc_memh), (((0x0003C000 + ((0) * 4)))))); |
2489 | CLR(reg, I40E_QINT_TQCTL_CAUSE_ENA_MASK)((reg) &= ~(((0x1) << (30)))); |
2490 | ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE), reg)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((0) * 4)))), ((reg)))); |
2491 | |
2492 | ixl_wr(sc, I40E_PFINT_LNKLST0, I40E_QUEUE_TYPE_EOL)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00038500 )), ((0x7ff)))); |
2493 | |
2494 | /* make sure the no hw generated work is still in flight */ |
2495 | intr_barrier(sc->sc_ihc); |
2496 | if (sc->sc_intrmap != NULL((void *)0)) { |
2497 | for (i = 0; i < nqueues; i++) { |
2498 | iv = &sc->sc_vectors[i]; |
2499 | rxr = iv->iv_rxr; |
2500 | txr = iv->iv_txr; |
2501 | |
2502 | ixl_txr_qdis(sc, txr, 0); |
2503 | |
2504 | ifq_barrier(txr->txr_ifq); |
2505 | |
2506 | timeout_del_barrier(&rxr->rxr_refill); |
2507 | |
2508 | intr_barrier(iv->iv_ihc); |
2509 | } |
2510 | } |
2511 | |
2512 | /* XXX wait at least 400 usec for all tx queues in one go */ |
2513 | delay(500)(*delay_func)(500); |
2514 | |
2515 | for (i = 0; i < nqueues; i++) { |
2516 | reg = ixl_rd(sc, I40E_QTX_ENA(i))(((sc)->sc_memt)->read_4(((sc)->sc_memh), (((0x00100000 + ((i) * 4)))))); |
2517 | CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK)((reg) &= ~(((0x1) << (0)))); |
2518 | ixl_wr(sc, I40E_QTX_ENA(i), reg)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00100000 + ((i) * 4)))), ((reg)))); |
2519 | |
2520 | reg = ixl_rd(sc, I40E_QRX_ENA(i))(((sc)->sc_memt)->read_4(((sc)->sc_memh), (((0x00120000 + ((i) * 4)))))); |
2521 | CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK)((reg) &= ~(((0x1) << (0)))); |
2522 | ixl_wr(sc, I40E_QRX_ENA(i), reg)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00120000 + ((i) * 4)))), ((reg)))); |
2523 | } |
2524 | |
2525 | for (i = 0; i < nqueues; i++) { |
2526 | iv = &sc->sc_vectors[i]; |
2527 | rxr = iv->iv_rxr; |
2528 | txr = iv->iv_txr; |
2529 | |
2530 | if (ixl_txr_disabled(sc, txr) != 0) |
2531 | goto die; |
2532 | |
2533 | if (ixl_rxr_disabled(sc, rxr) != 0) |
2534 | goto die; |
2535 | } |
2536 | |
2537 | for (i = 0; i < nqueues; i++) { |
2538 | iv = &sc->sc_vectors[i]; |
2539 | rxr = iv->iv_rxr; |
2540 | txr = iv->iv_txr; |
2541 | |
2542 | ixl_txr_unconfig(sc, txr); |
2543 | ixl_rxr_unconfig(sc, rxr); |
2544 | |
2545 | ixl_txr_clean(sc, txr); |
2546 | ixl_rxr_clean(sc, rxr); |
2547 | |
2548 | ixl_txr_free(sc, txr); |
2549 | ixl_rxr_free(sc, rxr); |
2550 | |
2551 | ifp->if_iqs[i]->ifiq_softc_ifiq_ptr._ifiq_softc = NULL((void *)0); |
2552 | ifp->if_ifqs[i]->ifq_softc_ifq_ptr._ifq_softc = NULL((void *)0); |
2553 | } |
2554 | |
2555 | out: |
2556 | rw_exit_write(&sc->sc_cfg_lock); |
2557 | NET_LOCK()do { rw_enter_write(&netlock); } while (0); |
2558 | return (error); |
2559 | die: |
2560 | sc->sc_dead = 1; |
2561 | log(LOG_CRIT2, "%s: failed to shut down rings", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
2562 | error = ETIMEDOUT60; |
2563 | goto out; |
2564 | } |
2565 | |
2566 | static struct ixl_tx_ring * |
2567 | ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid) |
2568 | { |
2569 | struct ixl_tx_ring *txr; |
2570 | struct ixl_tx_map *maps, *txm; |
2571 | unsigned int i; |
2572 | |
2573 | txr = malloc(sizeof(*txr), M_DEVBUF2, M_WAITOK0x0001|M_CANFAIL0x0004); |
2574 | if (txr == NULL((void *)0)) |
2575 | return (NULL((void *)0)); |
2576 | |
2577 | maps = mallocarray(sizeof(*maps), |
2578 | sc->sc_tx_ring_ndescs, M_DEVBUF2, M_WAITOK0x0001|M_CANFAIL0x0004|M_ZERO0x0008); |
2579 | if (maps == NULL((void *)0)) |
2580 | goto free; |
2581 | |
2582 | if (ixl_dmamem_alloc(sc, &txr->txr_mem, |
2583 | sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs, |
2584 | IXL_TX_QUEUE_ALIGN128) != 0) |
2585 | goto freemap; |
2586 | |
2587 | for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { |
2588 | txm = &maps[i]; |
2589 | |
2590 | if (bus_dmamap_create(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9712 ), (8), (9712), (0), (0x0000 | 0x0002 | 0x2000), (&txm-> txm_map)) |
2591 | IXL_HARDMTU, IXL_TX_PKT_DESCS, IXL_HARDMTU, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9712 ), (8), (9712), (0), (0x0000 | 0x0002 | 0x2000), (&txm-> txm_map)) |
2592 | BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9712 ), (8), (9712), (0), (0x0000 | 0x0002 | 0x2000), (&txm-> txm_map)) |
2593 | &txm->txm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9712 ), (8), (9712), (0), (0x0000 | 0x0002 | 0x2000), (&txm-> txm_map)) != 0) |
2594 | goto uncreate; |
2595 | |
2596 | txm->txm_eop = -1; |
2597 | txm->txm_m = NULL((void *)0); |
2598 | } |
2599 | |
2600 | txr->txr_cons = txr->txr_prod = 0; |
2601 | txr->txr_maps = maps; |
2602 | |
2603 | txr->txr_tail = I40E_QTX_TAIL(qid)(0x00108000 + ((qid) * 4)); |
2604 | txr->txr_qid = qid; |
2605 | |
2606 | return (txr); |
2607 | |
2608 | uncreate: |
2609 | for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { |
2610 | txm = &maps[i]; |
2611 | |
2612 | if (txm->txm_map == NULL((void *)0)) |
2613 | continue; |
2614 | |
2615 | bus_dmamap_destroy(sc->sc_dmat, txm->txm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (txm ->txm_map)); |
2616 | } |
2617 | |
2618 | ixl_dmamem_free(sc, &txr->txr_mem); |
2619 | freemap: |
2620 | free(maps, M_DEVBUF2, sizeof(*maps) * sc->sc_tx_ring_ndescs); |
2621 | free: |
2622 | free(txr, M_DEVBUF2, sizeof(*txr)); |
2623 | return (NULL((void *)0)); |
2624 | } |
2625 | |
2626 | static void |
2627 | ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable) |
2628 | { |
2629 | unsigned int qid; |
2630 | bus_size_t reg; |
2631 | uint32_t r; |
2632 | |
2633 | qid = txr->txr_qid + sc->sc_base_queue; |
2634 | reg = I40E_GLLAN_TXPRE_QDIS(qid / 128)(0x000e6500 + ((qid / 128) * 4)); |
2635 | qid %= 128; |
2636 | |
2637 | r = ixl_rd(sc, reg)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((reg)))); |
2638 | CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK)((r) &= ~(((0x7FF) << (0)))); |
2639 | SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)((r) |= (qid << 0)); |
2640 | SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :((r) |= (enable ? ((0x1) << (31)) : ((0x1) << (30 )))) |
2641 | I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK)((r) |= (enable ? ((0x1) << (31)) : ((0x1) << (30 )))); |
2642 | ixl_wr(sc, reg, r)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((reg)), ( (r)))); |
2643 | } |
2644 | |
2645 | static void |
2646 | ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr) |
2647 | { |
2648 | struct ixl_hmc_txq txq; |
2649 | struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch)((void *)(&sc->sc_scratch)->ixm_kva); |
2650 | void *hmc; |
2651 | |
2652 | memset(&txq, 0, sizeof(txq))__builtin_memset((&txq), (0), (sizeof(txq))); |
2653 | txq.head = htole16(0)((__uint16_t)(0)); |
2654 | txq.new_context = 1; |
2655 | htolem64(&txq.base,(*(__uint64_t *)(&txq.base) = ((__uint64_t)(((&txr-> txr_mem)->ixm_map->dm_segs[0].ds_addr) / 128))) |
2656 | IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT)(*(__uint64_t *)(&txq.base) = ((__uint64_t)(((&txr-> txr_mem)->ixm_map->dm_segs[0].ds_addr) / 128))); |
2657 | txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB0; |
2658 | htolem16(&txq.qlen, sc->sc_tx_ring_ndescs)(*(__uint16_t *)(&txq.qlen) = ((__uint16_t)(sc->sc_tx_ring_ndescs ))); |
2659 | txq.tphrdesc_ena = 0; |
2660 | txq.tphrpacket_ena = 0; |
2661 | txq.tphwdesc_ena = 0; |
2662 | txq.rdylist = data->qs_handle[0]; |
2663 | |
2664 | hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX0, txr->txr_qid); |
2665 | memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX))__builtin_memset((hmc), (0), (ixl_hmc_len(sc, 0))); |
2666 | ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq, nitems(ixl_hmc_pack_txq)(sizeof((ixl_hmc_pack_txq)) / sizeof((ixl_hmc_pack_txq)[0]))); |
2667 | } |
2668 | |
2669 | static void |
2670 | ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr) |
2671 | { |
2672 | void *hmc; |
2673 | |
2674 | hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX0, txr->txr_qid); |
2675 | memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX))__builtin_memset((hmc), (0), (ixl_hmc_len(sc, 0))); |
2676 | } |
2677 | |
2678 | static void |
2679 | ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr) |
2680 | { |
2681 | struct ixl_tx_map *maps, *txm; |
2682 | bus_dmamap_t map; |
2683 | unsigned int i; |
2684 | |
2685 | maps = txr->txr_maps; |
2686 | for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { |
2687 | txm = &maps[i]; |
2688 | |
2689 | if (txm->txm_m == NULL((void *)0)) |
2690 | continue; |
2691 | |
2692 | map = txm->txm_map; |
2693 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x08)) |
2694 | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x08)); |
2695 | bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map )); |
2696 | |
2697 | m_freem(txm->txm_m); |
2698 | txm->txm_m = NULL((void *)0); |
2699 | } |
2700 | } |
2701 | |
2702 | static int |
2703 | ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr) |
2704 | { |
2705 | bus_size_t ena = I40E_QTX_ENA(txr->txr_qid)(0x00100000 + ((txr->txr_qid) * 4)); |
2706 | uint32_t reg; |
2707 | int i; |
2708 | |
2709 | for (i = 0; i < 10; i++) { |
2710 | reg = ixl_rd(sc, ena)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((ena)))); |
2711 | if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK)((reg) & (((0x1) << (2))))) |
2712 | return (0); |
2713 | |
2714 | delaymsec(10)(*delay_func)(1000 * (10)); |
2715 | } |
2716 | |
2717 | return (ETIMEDOUT60); |
2718 | } |
2719 | |
2720 | static int |
2721 | ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr) |
2722 | { |
2723 | bus_size_t ena = I40E_QTX_ENA(txr->txr_qid)(0x00100000 + ((txr->txr_qid) * 4)); |
2724 | uint32_t reg; |
2725 | int i; |
2726 | |
2727 | for (i = 0; i < 20; i++) { |
2728 | reg = ixl_rd(sc, ena)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((ena)))); |
2729 | if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK)((reg) & (((0x1) << (2)))) == 0) |
2730 | return (0); |
2731 | |
2732 | delaymsec(10)(*delay_func)(1000 * (10)); |
2733 | } |
2734 | |
2735 | return (ETIMEDOUT60); |
2736 | } |
2737 | |
2738 | static void |
2739 | ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr) |
2740 | { |
2741 | struct ixl_tx_map *maps, *txm; |
2742 | unsigned int i; |
2743 | |
2744 | maps = txr->txr_maps; |
2745 | for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { |
2746 | txm = &maps[i]; |
2747 | |
2748 | bus_dmamap_destroy(sc->sc_dmat, txm->txm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (txm ->txm_map)); |
2749 | } |
2750 | |
2751 | ixl_dmamem_free(sc, &txr->txr_mem); |
2752 | free(maps, M_DEVBUF2, sizeof(*maps) * sc->sc_tx_ring_ndescs); |
2753 | free(txr, M_DEVBUF2, sizeof(*txr)); |
2754 | } |
2755 | |
2756 | static inline int |
2757 | ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m) |
2758 | { |
2759 | int error; |
2760 | |
2761 | error = bus_dmamap_load_mbuf(dmat, map, m,(*(dmat)->_dmamap_load_mbuf)((dmat), (map), (m), (0x0100 | 0x0001)) |
2762 | BUS_DMA_STREAMING | BUS_DMA_NOWAIT)(*(dmat)->_dmamap_load_mbuf)((dmat), (map), (m), (0x0100 | 0x0001)); |
2763 | if (error != EFBIG27) |
2764 | return (error); |
2765 | |
2766 | error = m_defrag(m, M_DONTWAIT0x0002); |
2767 | if (error != 0) |
2768 | return (error); |
2769 | |
2770 | return (bus_dmamap_load_mbuf(dmat, map, m,(*(dmat)->_dmamap_load_mbuf)((dmat), (map), (m), (0x0100 | 0x0001)) |
2771 | BUS_DMA_STREAMING | BUS_DMA_NOWAIT)(*(dmat)->_dmamap_load_mbuf)((dmat), (map), (m), (0x0100 | 0x0001))); |
2772 | } |
2773 | |
2774 | static void |
2775 | ixl_start(struct ifqueue *ifq) |
2776 | { |
2777 | struct ifnet *ifp = ifq->ifq_if; |
2778 | struct ixl_softc *sc = ifp->if_softc; |
2779 | struct ixl_tx_ring *txr = ifq->ifq_softc_ifq_ptr._ifq_softc; |
2780 | struct ixl_tx_desc *ring, *txd; |
2781 | struct ixl_tx_map *txm; |
2782 | bus_dmamap_t map; |
2783 | struct mbuf *m; |
2784 | uint64_t cmd; |
2785 | unsigned int prod, free, last, i; |
2786 | unsigned int mask; |
2787 | int post = 0; |
2788 | #if NBPFILTER1 > 0 |
2789 | caddr_t if_bpf; |
2790 | #endif |
2791 | |
2792 | if (!LINK_STATE_IS_UP(ifp->if_link_state)((ifp->if_data.ifi_link_state) >= 4 || (ifp->if_data .ifi_link_state) == 0)) { |
2793 | ifq_purge(ifq); |
2794 | return; |
2795 | } |
2796 | |
2797 | prod = txr->txr_prod; |
2798 | free = txr->txr_cons; |
2799 | if (free <= prod) |
2800 | free += sc->sc_tx_ring_ndescs; |
2801 | free -= prod; |
2802 | |
2803 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& txr->txr_mem)->ixm_map)), (0), (((&txr->txr_mem) ->ixm_size)), (0x08)) |
2804 | 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& txr->txr_mem)->ixm_map)), (0), (((&txr->txr_mem) ->ixm_size)), (0x08)); |
2805 | |
2806 | ring = IXL_DMA_KVA(&txr->txr_mem)((void *)(&txr->txr_mem)->ixm_kva); |
2807 | mask = sc->sc_tx_ring_ndescs - 1; |
2808 | |
2809 | for (;;) { |
2810 | if (free <= IXL_TX_PKT_DESCS8) { |
2811 | ifq_set_oactive(ifq); |
2812 | break; |
2813 | } |
2814 | |
2815 | m = ifq_dequeue(ifq); |
2816 | if (m == NULL((void *)0)) |
2817 | break; |
2818 | |
2819 | txm = &txr->txr_maps[prod]; |
2820 | map = txm->txm_map; |
2821 | |
2822 | if (ixl_load_mbuf(sc->sc_dmat, map, m) != 0) { |
2823 | ifq->ifq_errors++; |
2824 | m_freem(m); |
2825 | continue; |
2826 | } |
2827 | |
2828 | bus_dmamap_sync(sc->sc_dmat, map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x04)) |
2829 | map->dm_mapsize, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x04)); |
2830 | |
2831 | for (i = 0; i < map->dm_nsegs; i++) { |
2832 | txd = &ring[prod]; |
2833 | |
2834 | cmd = (uint64_t)map->dm_segs[i].ds_len << |
2835 | IXL_TX_DESC_BSIZE_SHIFT34; |
2836 | cmd |= IXL_TX_DESC_DTYPE_DATA(0x0ULL << 0) | IXL_TX_DESC_CMD_ICRC(0x004 << 4); |
2837 | |
2838 | htolem64(&txd->addr, map->dm_segs[i].ds_addr)(*(__uint64_t *)(&txd->addr) = ((__uint64_t)(map->dm_segs [i].ds_addr))); |
2839 | htolem64(&txd->cmd, cmd)(*(__uint64_t *)(&txd->cmd) = ((__uint64_t)(cmd))); |
2840 | |
2841 | last = prod; |
2842 | |
2843 | prod++; |
2844 | prod &= mask; |
2845 | } |
2846 | cmd |= IXL_TX_DESC_CMD_EOP(0x001 << 4) | IXL_TX_DESC_CMD_RS(0x002 << 4); |
2847 | htolem64(&txd->cmd, cmd)(*(__uint64_t *)(&txd->cmd) = ((__uint64_t)(cmd))); |
2848 | |
2849 | txm->txm_m = m; |
2850 | txm->txm_eop = last; |
2851 | |
2852 | #if NBPFILTER1 > 0 |
2853 | if_bpf = ifp->if_bpf; |
2854 | if (if_bpf) |
2855 | bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT(1 << 1)); |
2856 | #endif |
2857 | |
2858 | free -= i; |
2859 | post = 1; |
2860 | } |
2861 | |
2862 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& txr->txr_mem)->ixm_map)), (0), (((&txr->txr_mem) ->ixm_size)), (0x04)) |
2863 | 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& txr->txr_mem)->ixm_map)), (0), (((&txr->txr_mem) ->ixm_size)), (0x04)); |
2864 | |
2865 | if (post) { |
2866 | txr->txr_prod = prod; |
2867 | ixl_wr(sc, txr->txr_tail, prod)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((txr-> txr_tail)), ((prod)))); |
2868 | } |
2869 | } |
2870 | |
2871 | static int |
2872 | ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr) |
2873 | { |
2874 | struct ifqueue *ifq = txr->txr_ifq; |
2875 | struct ixl_tx_desc *ring, *txd; |
2876 | struct ixl_tx_map *txm; |
2877 | bus_dmamap_t map; |
2878 | unsigned int cons, prod, last; |
2879 | unsigned int mask; |
2880 | uint64_t dtype; |
2881 | int done = 0; |
2882 | |
2883 | prod = txr->txr_prod; |
2884 | cons = txr->txr_cons; |
2885 | |
2886 | if (cons == prod) |
2887 | return (0); |
2888 | |
2889 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& txr->txr_mem)->ixm_map)), (0), (((&txr->txr_mem) ->ixm_size)), (0x02)) |
2890 | 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& txr->txr_mem)->ixm_map)), (0), (((&txr->txr_mem) ->ixm_size)), (0x02)); |
2891 | |
2892 | ring = IXL_DMA_KVA(&txr->txr_mem)((void *)(&txr->txr_mem)->ixm_kva); |
2893 | mask = sc->sc_tx_ring_ndescs - 1; |
2894 | |
2895 | do { |
2896 | txm = &txr->txr_maps[cons]; |
2897 | last = txm->txm_eop; |
2898 | txd = &ring[last]; |
2899 | |
2900 | dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK)((__uint64_t)((0xfULL << 0))); |
2901 | if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE)((__uint64_t)((0xfULL << 0)))) |
2902 | break; |
2903 | |
2904 | map = txm->txm_map; |
2905 | |
2906 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x08)) |
2907 | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x08)); |
2908 | bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map )); |
2909 | m_freem(txm->txm_m); |
2910 | |
2911 | txm->txm_m = NULL((void *)0); |
2912 | txm->txm_eop = -1; |
2913 | |
2914 | cons = last + 1; |
2915 | cons &= mask; |
2916 | |
2917 | done = 1; |
2918 | } while (cons != prod); |
2919 | |
2920 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& txr->txr_mem)->ixm_map)), (0), (((&txr->txr_mem) ->ixm_size)), (0x01)) |
2921 | 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& txr->txr_mem)->ixm_map)), (0), (((&txr->txr_mem) ->ixm_size)), (0x01)); |
2922 | |
2923 | txr->txr_cons = cons; |
2924 | |
2925 | //ixl_enable(sc, txr->txr_msix); |
2926 | |
2927 | if (ifq_is_oactive(ifq)) |
2928 | ifq_restart(ifq); |
2929 | |
2930 | return (done); |
2931 | } |
2932 | |
2933 | static struct ixl_rx_ring * |
2934 | ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid) |
2935 | { |
2936 | struct ixl_rx_ring *rxr; |
2937 | struct ixl_rx_map *maps, *rxm; |
2938 | unsigned int i; |
2939 | |
2940 | rxr = malloc(sizeof(*rxr), M_DEVBUF2, M_WAITOK0x0001|M_CANFAIL0x0004); |
2941 | if (rxr == NULL((void *)0)) |
2942 | return (NULL((void *)0)); |
2943 | |
2944 | maps = mallocarray(sizeof(*maps), |
2945 | sc->sc_rx_ring_ndescs, M_DEVBUF2, M_WAITOK0x0001|M_CANFAIL0x0004|M_ZERO0x0008); |
2946 | if (maps == NULL((void *)0)) |
2947 | goto free; |
2948 | |
2949 | if (ixl_dmamem_alloc(sc, &rxr->rxr_mem, |
2950 | sizeof(struct ixl_rx_rd_desc_16) * sc->sc_rx_ring_ndescs, |
2951 | IXL_RX_QUEUE_ALIGN128) != 0) |
2952 | goto freemap; |
2953 | |
2954 | for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { |
2955 | rxm = &maps[i]; |
2956 | |
2957 | if (bus_dmamap_create(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9712 ), (1), (9712), (0), (0x0000 | 0x0002 | 0x2000), (&rxm-> rxm_map)) |
2958 | IXL_HARDMTU, 1, IXL_HARDMTU, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9712 ), (1), (9712), (0), (0x0000 | 0x0002 | 0x2000), (&rxm-> rxm_map)) |
2959 | BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9712 ), (1), (9712), (0), (0x0000 | 0x0002 | 0x2000), (&rxm-> rxm_map)) |
2960 | &rxm->rxm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (9712 ), (1), (9712), (0), (0x0000 | 0x0002 | 0x2000), (&rxm-> rxm_map)) != 0) |
2961 | goto uncreate; |
2962 | |
2963 | rxm->rxm_m = NULL((void *)0); |
2964 | } |
2965 | |
2966 | rxr->rxr_sc = sc; |
2967 | if_rxr_init(&rxr->rxr_acct, 17, sc->sc_rx_ring_ndescs - 1); |
2968 | timeout_set(&rxr->rxr_refill, ixl_rxrefill, rxr); |
2969 | rxr->rxr_cons = rxr->rxr_prod = 0; |
2970 | rxr->rxr_m_head = NULL((void *)0); |
2971 | rxr->rxr_m_tail = &rxr->rxr_m_head; |
2972 | rxr->rxr_maps = maps; |
2973 | |
2974 | rxr->rxr_tail = I40E_QRX_TAIL(qid)(0x00128000 + ((qid) * 4)); |
2975 | rxr->rxr_qid = qid; |
2976 | |
2977 | return (rxr); |
2978 | |
2979 | uncreate: |
2980 | for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { |
2981 | rxm = &maps[i]; |
2982 | |
2983 | if (rxm->rxm_map == NULL((void *)0)) |
2984 | continue; |
2985 | |
2986 | bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (rxm ->rxm_map)); |
2987 | } |
2988 | |
2989 | ixl_dmamem_free(sc, &rxr->rxr_mem); |
2990 | freemap: |
2991 | free(maps, M_DEVBUF2, sizeof(*maps) * sc->sc_rx_ring_ndescs); |
2992 | free: |
2993 | free(rxr, M_DEVBUF2, sizeof(*rxr)); |
2994 | return (NULL((void *)0)); |
2995 | } |
2996 | |
2997 | static void |
2998 | ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr) |
2999 | { |
3000 | struct ixl_rx_map *maps, *rxm; |
3001 | bus_dmamap_t map; |
3002 | unsigned int i; |
3003 | |
3004 | timeout_del_barrier(&rxr->rxr_refill); |
3005 | |
3006 | maps = rxr->rxr_maps; |
3007 | for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { |
3008 | rxm = &maps[i]; |
3009 | |
3010 | if (rxm->rxm_m == NULL((void *)0)) |
3011 | continue; |
3012 | |
3013 | map = rxm->rxm_map; |
3014 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x08)) |
3015 | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x08)); |
3016 | bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map )); |
3017 | |
3018 | m_freem(rxm->rxm_m); |
3019 | rxm->rxm_m = NULL((void *)0); |
3020 | } |
3021 | |
3022 | m_freem(rxr->rxr_m_head); |
3023 | rxr->rxr_m_head = NULL((void *)0); |
3024 | rxr->rxr_m_tail = &rxr->rxr_m_head; |
3025 | |
3026 | rxr->rxr_prod = rxr->rxr_cons = 0; |
3027 | } |
3028 | |
3029 | static int |
3030 | ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr) |
3031 | { |
3032 | bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid)(0x00120000 + ((rxr->rxr_qid) * 4)); |
3033 | uint32_t reg; |
3034 | int i; |
3035 | |
3036 | for (i = 0; i < 10; i++) { |
3037 | reg = ixl_rd(sc, ena)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((ena)))); |
3038 | if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK)((reg) & (((0x1) << (2))))) |
3039 | return (0); |
3040 | |
3041 | delaymsec(10)(*delay_func)(1000 * (10)); |
3042 | } |
3043 | |
3044 | return (ETIMEDOUT60); |
3045 | } |
3046 | |
3047 | static int |
3048 | ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr) |
3049 | { |
3050 | bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid)(0x00120000 + ((rxr->rxr_qid) * 4)); |
3051 | uint32_t reg; |
3052 | int i; |
3053 | |
3054 | for (i = 0; i < 20; i++) { |
3055 | reg = ixl_rd(sc, ena)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((ena)))); |
3056 | if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK)((reg) & (((0x1) << (2)))) == 0) |
3057 | return (0); |
3058 | |
3059 | delaymsec(10)(*delay_func)(1000 * (10)); |
3060 | } |
3061 | |
3062 | return (ETIMEDOUT60); |
3063 | } |
3064 | |
3065 | static void |
3066 | ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr) |
3067 | { |
3068 | struct ixl_hmc_rxq rxq; |
3069 | void *hmc; |
3070 | |
3071 | memset(&rxq, 0, sizeof(rxq))__builtin_memset((&rxq), (0), (sizeof(rxq))); |
3072 | |
3073 | rxq.head = htole16(0)((__uint16_t)(0)); |
3074 | htolem64(&rxq.base,(*(__uint64_t *)(&rxq.base) = ((__uint64_t)(((&rxr-> rxr_mem)->ixm_map->dm_segs[0].ds_addr) / 128))) |
3075 | IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT)(*(__uint64_t *)(&rxq.base) = ((__uint64_t)(((&rxr-> rxr_mem)->ixm_map->dm_segs[0].ds_addr) / 128))); |
3076 | htolem16(&rxq.qlen, sc->sc_rx_ring_ndescs)(*(__uint16_t *)(&rxq.qlen) = ((__uint16_t)(sc->sc_rx_ring_ndescs ))); |
3077 | rxq.dbuff = htole16(MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT)((__uint16_t)((1 << 11) / 128)); |
3078 | rxq.hbuff = 0; |
3079 | rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT0x0; |
3080 | rxq.dsize = IXL_HMC_RXQ_DSIZE_160; |
3081 | rxq.crcstrip = 1; |
3082 | rxq.l2sel = 0; |
3083 | rxq.showiv = 0; |
3084 | rxq.rxmax = htole16(IXL_HARDMTU)((__uint16_t)(9712)); |
3085 | rxq.tphrdesc_ena = 0; |
3086 | rxq.tphwdesc_ena = 0; |
3087 | rxq.tphdata_ena = 0; |
3088 | rxq.tphhead_ena = 0; |
3089 | rxq.lrxqthresh = 0; |
3090 | rxq.prefena = 1; |
3091 | |
3092 | hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX1, rxr->rxr_qid); |
3093 | memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX))__builtin_memset((hmc), (0), (ixl_hmc_len(sc, 1))); |
3094 | ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq, nitems(ixl_hmc_pack_rxq)(sizeof((ixl_hmc_pack_rxq)) / sizeof((ixl_hmc_pack_rxq)[0]))); |
3095 | } |
3096 | |
3097 | static void |
3098 | ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr) |
3099 | { |
3100 | void *hmc; |
3101 | |
3102 | hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX1, rxr->rxr_qid); |
3103 | memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX))__builtin_memset((hmc), (0), (ixl_hmc_len(sc, 1))); |
3104 | } |
3105 | |
3106 | static void |
3107 | ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr) |
3108 | { |
3109 | struct ixl_rx_map *maps, *rxm; |
3110 | unsigned int i; |
3111 | |
3112 | maps = rxr->rxr_maps; |
3113 | for (i = 0; i < sc->sc_rx_ring_ndescs; i++) { |
3114 | rxm = &maps[i]; |
3115 | |
3116 | bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (rxm ->rxm_map)); |
3117 | } |
3118 | |
3119 | ixl_dmamem_free(sc, &rxr->rxr_mem); |
3120 | free(maps, M_DEVBUF2, sizeof(*maps) * sc->sc_rx_ring_ndescs); |
3121 | free(rxr, M_DEVBUF2, sizeof(*rxr)); |
3122 | } |
3123 | |
3124 | static int |
3125 | ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr) |
3126 | { |
3127 | struct ifiqueue *ifiq = rxr->rxr_ifiq; |
3128 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
3129 | struct ixl_rx_wb_desc_16 *ring, *rxd; |
3130 | struct ixl_rx_map *rxm; |
3131 | bus_dmamap_t map; |
3132 | unsigned int cons, prod; |
3133 | struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 }; |
3134 | struct mbuf *m; |
3135 | uint64_t word; |
3136 | unsigned int len; |
3137 | unsigned int mask; |
3138 | int done = 0; |
3139 | |
3140 | prod = rxr->rxr_prod; |
3141 | cons = rxr->rxr_cons; |
3142 | |
3143 | if (cons == prod) |
3144 | return (0); |
3145 | |
3146 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rxr->rxr_mem)->ixm_map)), (0), (((&rxr->rxr_mem) ->ixm_size)), (0x02|0x08)) |
3147 | 0, IXL_DMA_LEN(&rxr->rxr_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rxr->rxr_mem)->ixm_map)), (0), (((&rxr->rxr_mem) ->ixm_size)), (0x02|0x08)) |
3148 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rxr->rxr_mem)->ixm_map)), (0), (((&rxr->rxr_mem) ->ixm_size)), (0x02|0x08)); |
3149 | |
3150 | ring = IXL_DMA_KVA(&rxr->rxr_mem)((void *)(&rxr->rxr_mem)->ixm_kva); |
3151 | mask = sc->sc_rx_ring_ndescs - 1; |
3152 | |
3153 | do { |
3154 | rxd = &ring[cons]; |
3155 | |
3156 | word = lemtoh64(&rxd->qword1)((__uint64_t)(*(__uint64_t *)(&rxd->qword1))); |
3157 | if (!ISSET(word, IXL_RX_DESC_DD)((word) & ((1 << 0)))) |
3158 | break; |
3159 | |
3160 | if_rxr_put(&rxr->rxr_acct, 1)do { (&rxr->rxr_acct)->rxr_alive -= (1); } while (0 ); |
3161 | |
3162 | rxm = &rxr->rxr_maps[cons]; |
3163 | |
3164 | map = rxm->rxm_map; |
3165 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x02)) |
3166 | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x02)); |
3167 | bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map )); |
3168 | |
3169 | m = rxm->rxm_m; |
3170 | rxm->rxm_m = NULL((void *)0); |
3171 | |
3172 | len = (word & IXL_RX_DESC_PLEN_MASK(0x3fffULL << 38)) >> IXL_RX_DESC_PLEN_SHIFT38; |
3173 | m->m_lenm_hdr.mh_len = len; |
3174 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = 0; |
3175 | |
3176 | m->m_nextm_hdr.mh_next = NULL((void *)0); |
3177 | *rxr->rxr_m_tail = m; |
3178 | rxr->rxr_m_tail = &m->m_nextm_hdr.mh_next; |
3179 | |
3180 | m = rxr->rxr_m_head; |
3181 | m->m_pkthdrM_dat.MH.MH_pkthdr.len += len; |
3182 | |
3183 | if (ISSET(word, IXL_RX_DESC_EOP)((word) & ((1 << 1)))) { |
3184 | if (!ISSET(word,((word) & ((1 << 19) | (1 << 25))) |
3185 | IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)((word) & ((1 << 19) | (1 << 25)))) { |
3186 | if ((word & IXL_RX_DESC_FLTSTAT_MASK(0x3 << 12)) == |
3187 | IXL_RX_DESC_FLTSTAT_RSS(0x3 << 12)) { |
3188 | m->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid = |
3189 | lemtoh32(&rxd->filter_status)((__uint32_t)(*(__uint32_t *)(&rxd->filter_status))); |
3190 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_FLOWID0x4000; |
3191 | } |
3192 | |
3193 | ml_enqueue(&ml, m); |
3194 | } else { |
3195 | ifp->if_ierrorsif_data.ifi_ierrors++; /* XXX */ |
3196 | m_freem(m); |
3197 | } |
3198 | |
3199 | rxr->rxr_m_head = NULL((void *)0); |
3200 | rxr->rxr_m_tail = &rxr->rxr_m_head; |
3201 | } |
3202 | |
3203 | cons++; |
3204 | cons &= mask; |
3205 | |
3206 | done = 1; |
3207 | } while (cons != prod); |
3208 | |
3209 | if (done) { |
3210 | rxr->rxr_cons = cons; |
3211 | if (ifiq_input(ifiq, &ml)) |
3212 | if_rxr_livelocked(&rxr->rxr_acct); |
3213 | ixl_rxfill(sc, rxr); |
3214 | } |
3215 | |
3216 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rxr->rxr_mem)->ixm_map)), (0), (((&rxr->rxr_mem) ->ixm_size)), (0x01|0x04)) |
3217 | 0, IXL_DMA_LEN(&rxr->rxr_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rxr->rxr_mem)->ixm_map)), (0), (((&rxr->rxr_mem) ->ixm_size)), (0x01|0x04)) |
3218 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rxr->rxr_mem)->ixm_map)), (0), (((&rxr->rxr_mem) ->ixm_size)), (0x01|0x04)); |
3219 | |
3220 | return (done); |
3221 | } |
3222 | |
3223 | static void |
3224 | ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr) |
3225 | { |
3226 | struct ixl_rx_rd_desc_16 *ring, *rxd; |
3227 | struct ixl_rx_map *rxm; |
3228 | bus_dmamap_t map; |
3229 | struct mbuf *m; |
3230 | unsigned int prod; |
3231 | unsigned int slots; |
3232 | unsigned int mask; |
3233 | int post = 0; |
3234 | |
3235 | slots = if_rxr_get(&rxr->rxr_acct, sc->sc_rx_ring_ndescs); |
3236 | if (slots == 0) |
3237 | return; |
3238 | |
3239 | prod = rxr->rxr_prod; |
3240 | |
3241 | ring = IXL_DMA_KVA(&rxr->rxr_mem)((void *)(&rxr->rxr_mem)->ixm_kva); |
3242 | mask = sc->sc_rx_ring_ndescs - 1; |
3243 | |
3244 | do { |
3245 | rxm = &rxr->rxr_maps[prod]; |
3246 | |
3247 | m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES + ETHER_ALIGN)m_clget((((void *)0)), (0x0002), ((1 << 11) + 2)); |
3248 | if (m == NULL((void *)0)) |
3249 | break; |
3250 | m->m_datam_hdr.mh_data += (m->m_extM_dat.MH.MH_dat.MH_ext.ext_size - (MCLBYTES(1 << 11) + ETHER_ALIGN2)); |
3251 | m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = MCLBYTES(1 << 11) + ETHER_ALIGN2; |
3252 | |
3253 | map = rxm->rxm_map; |
3254 | |
3255 | if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( map), (m), (0x0001)) |
3256 | BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( map), (m), (0x0001)) != 0) { |
3257 | m_freem(m); |
3258 | break; |
3259 | } |
3260 | |
3261 | rxm->rxm_m = m; |
3262 | |
3263 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x01)) |
3264 | BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x01)); |
3265 | |
3266 | rxd = &ring[prod]; |
3267 | |
3268 | htolem64(&rxd->paddr, map->dm_segs[0].ds_addr)(*(__uint64_t *)(&rxd->paddr) = ((__uint64_t)(map-> dm_segs[0].ds_addr))); |
3269 | rxd->haddr = htole64(0)((__uint64_t)(0)); |
3270 | |
3271 | prod++; |
3272 | prod &= mask; |
3273 | |
3274 | post = 1; |
3275 | } while (--slots); |
3276 | |
3277 | if_rxr_put(&rxr->rxr_acct, slots)do { (&rxr->rxr_acct)->rxr_alive -= (slots); } while (0); |
3278 | |
3279 | if (if_rxr_inuse(&rxr->rxr_acct)((&rxr->rxr_acct)->rxr_alive) == 0) |
3280 | timeout_add(&rxr->rxr_refill, 1); |
3281 | else if (post) { |
3282 | rxr->rxr_prod = prod; |
3283 | ixl_wr(sc, rxr->rxr_tail, prod)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((rxr-> rxr_tail)), ((prod)))); |
3284 | } |
3285 | } |
3286 | |
3287 | void |
3288 | ixl_rxrefill(void *arg) |
3289 | { |
3290 | struct ixl_rx_ring *rxr = arg; |
3291 | struct ixl_softc *sc = rxr->rxr_sc; |
3292 | |
3293 | ixl_rxfill(sc, rxr); |
3294 | } |
3295 | |
3296 | static int |
3297 | ixl_rxrinfo(struct ixl_softc *sc, struct if_rxrinfo *ifri) |
3298 | { |
3299 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
3300 | struct if_rxring_info *ifr; |
3301 | struct ixl_rx_ring *ring; |
3302 | int i, rv; |
3303 | |
3304 | if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) |
3305 | return (ENOTTY25); |
3306 | |
3307 | ifr = mallocarray(sizeof(*ifr), ixl_nqueues(sc)(1 << (sc)->sc_nqueues), M_TEMP127, |
3308 | M_WAITOK0x0001|M_CANFAIL0x0004|M_ZERO0x0008); |
3309 | if (ifr == NULL((void *)0)) |
3310 | return (ENOMEM12); |
3311 | |
3312 | for (i = 0; i < ixl_nqueues(sc)(1 << (sc)->sc_nqueues); i++) { |
3313 | ring = ifp->if_iqs[i]->ifiq_softc_ifiq_ptr._ifiq_softc; |
3314 | ifr[i].ifr_size = MCLBYTES(1 << 11); |
3315 | snprintf(ifr[i].ifr_name, sizeof(ifr[i].ifr_name), "%d", i); |
3316 | ifr[i].ifr_info = ring->rxr_acct; |
3317 | } |
3318 | |
3319 | rv = if_rxr_info_ioctl(ifri, ixl_nqueues(sc)(1 << (sc)->sc_nqueues), ifr); |
3320 | free(ifr, M_TEMP127, ixl_nqueues(sc)(1 << (sc)->sc_nqueues) * sizeof(*ifr)); |
3321 | |
3322 | return (rv); |
3323 | } |
3324 | |
3325 | static int |
3326 | ixl_intr0(void *xsc) |
3327 | { |
3328 | struct ixl_softc *sc = xsc; |
3329 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
3330 | uint32_t icr; |
3331 | int rv = 0; |
3332 | |
3333 | ixl_intr_enable(sc)((((sc))->sc_memt)->write_4((((sc))->sc_memh), ((0x00038480 )), ((((0x1) << (0)) | ((0x1) << (1)) | (0x2 << 3))))); |
3334 | icr = ixl_rd(sc, I40E_PFINT_ICR0)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x00038780 )))); |
3335 | |
3336 | if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)((icr) & (((0x1) << (30))))) { |
3337 | ixl_atq_done(sc); |
3338 | task_add(systq, &sc->sc_arq_task); |
3339 | rv = 1; |
3340 | } |
3341 | |
3342 | if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)((icr) & (((0x1) << (25))))) { |
3343 | task_add(systq, &sc->sc_link_state_task); |
3344 | rv = 1; |
3345 | } |
3346 | |
3347 | if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) { |
3348 | struct ixl_vector *iv = sc->sc_vectors; |
3349 | if (ISSET(icr, I40E_INTR_NOTX_RX_MASK)((icr) & (((0x1) << (1))))) |
3350 | rv |= ixl_rxeof(sc, iv->iv_rxr); |
3351 | if (ISSET(icr, I40E_INTR_NOTX_TX_MASK)((icr) & (((0x1) << (2))))) |
3352 | rv |= ixl_txeof(sc, iv->iv_txr); |
3353 | } |
3354 | |
3355 | return (rv); |
3356 | } |
3357 | |
3358 | static int |
3359 | ixl_intr_vector(void *v) |
3360 | { |
3361 | struct ixl_vector *iv = v; |
3362 | struct ixl_softc *sc = iv->iv_sc; |
3363 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
3364 | int rv = 0; |
3365 | |
3366 | if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) { |
3367 | rv |= ixl_rxeof(sc, iv->iv_rxr); |
3368 | rv |= ixl_txeof(sc, iv->iv_txr); |
3369 | } |
3370 | |
3371 | ixl_wr(sc, I40E_PFINT_DYN_CTLN(iv->iv_qid),(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00034800 + ((iv->iv_qid) * 4)))), ((((0x1) << (0)) | ((0x1) << (1)) | (0x2 << 3))))) |
3372 | I40E_PFINT_DYN_CTLN_INTENA_MASK |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00034800 + ((iv->iv_qid) * 4)))), ((((0x1) << (0)) | ((0x1) << (1)) | (0x2 << 3))))) |
3373 | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00034800 + ((iv->iv_qid) * 4)))), ((((0x1) << (0)) | ((0x1) << (1)) | (0x2 << 3))))) |
3374 | (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT))(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00034800 + ((iv->iv_qid) * 4)))), ((((0x1) << (0)) | ((0x1) << (1)) | (0x2 << 3))))); |
3375 | |
3376 | return (rv); |
3377 | } |
3378 | |
3379 | static void |
3380 | ixl_link_state_update_iaq(struct ixl_softc *sc, void *arg) |
3381 | { |
3382 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
3383 | struct ixl_aq_desc *iaq = arg; |
3384 | uint16_t retval; |
3385 | int link_state; |
3386 | int change = 0; |
3387 | |
3388 | retval = lemtoh16(&iaq->iaq_retval)((__uint16_t)(*(__uint16_t *)(&iaq->iaq_retval))); |
3389 | if (retval != IXL_AQ_RC_OK0) { |
3390 | printf("%s: LINK STATUS error %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), retval); |
3391 | return; |
3392 | } |
3393 | |
3394 | link_state = ixl_set_link_status(sc, iaq); |
3395 | mtx_enter(&sc->sc_link_state_mtx); |
3396 | if (ifp->if_link_stateif_data.ifi_link_state != link_state) { |
3397 | ifp->if_link_stateif_data.ifi_link_state = link_state; |
3398 | change = 1; |
3399 | } |
3400 | mtx_leave(&sc->sc_link_state_mtx); |
3401 | |
3402 | if (change) |
3403 | if_link_state_change(ifp); |
3404 | } |
3405 | |
3406 | static void |
3407 | ixl_link_state_update(void *xsc) |
3408 | { |
3409 | struct ixl_softc *sc = xsc; |
3410 | struct ixl_aq_desc *iaq; |
3411 | struct ixl_aq_link_param *param; |
3412 | |
3413 | memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq))__builtin_memset((&sc->sc_link_state_atq), (0), (sizeof (sc->sc_link_state_atq))); |
3414 | iaq = &sc->sc_link_state_atq.iatq_desc; |
3415 | iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS)((__uint16_t)(0x0607)); |
3416 | param = (struct ixl_aq_link_param *)iaq->iaq_param; |
3417 | param->notify = IXL_AQ_LINK_NOTIFY0x03; |
3418 | |
3419 | ixl_atq_set(&sc->sc_link_state_atq, ixl_link_state_update_iaq, iaq); |
3420 | ixl_atq_post(sc, &sc->sc_link_state_atq); |
3421 | } |
3422 | |
3423 | #if 0 |
3424 | static void |
3425 | ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq) |
3426 | { |
3427 | printf("%s: flags %b opcode %04x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
3428 | lemtoh16(&iaq->iaq_flags)((__uint16_t)(*(__uint16_t *)(&iaq->iaq_flags))), IXL_AQ_FLAGS_FMT"\020" "\020FE" "\017EI" "\016SI" "\015BUF" "\014VFC" "\013DB" "\012LB" "\004VFE" "\003ERR" "\002CMP" "\001DD", |
3429 | lemtoh16(&iaq->iaq_opcode)((__uint16_t)(*(__uint16_t *)(&iaq->iaq_opcode)))); |
3430 | printf("%s: datalen %u retval %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
3431 | lemtoh16(&iaq->iaq_datalen)((__uint16_t)(*(__uint16_t *)(&iaq->iaq_datalen))), lemtoh16(&iaq->iaq_retval)((__uint16_t)(*(__uint16_t *)(&iaq->iaq_retval)))); |
3432 | printf("%s: cookie %016llx\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), iaq->iaq_cookie); |
3433 | printf("%s: %08x %08x %08x %08x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
3434 | lemtoh32(&iaq->iaq_param[0])((__uint32_t)(*(__uint32_t *)(&iaq->iaq_param[0]))), lemtoh32(&iaq->iaq_param[1])((__uint32_t)(*(__uint32_t *)(&iaq->iaq_param[1]))), |
3435 | lemtoh32(&iaq->iaq_param[2])((__uint32_t)(*(__uint32_t *)(&iaq->iaq_param[2]))), lemtoh32(&iaq->iaq_param[3])((__uint32_t)(*(__uint32_t *)(&iaq->iaq_param[3])))); |
3436 | } |
3437 | #endif |
3438 | |
3439 | static void |
3440 | ixl_arq(void *xsc) |
3441 | { |
3442 | struct ixl_softc *sc = xsc; |
3443 | struct ixl_aq_desc *arq, *iaq; |
3444 | struct ixl_aq_buf *aqb; |
3445 | unsigned int cons = sc->sc_arq_cons; |
3446 | unsigned int prod; |
3447 | int done = 0; |
3448 | |
3449 | prod = ixl_rd(sc, sc->sc_aq_regs->arq_head)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((sc->sc_aq_regs ->arq_head)))) & |
3450 | sc->sc_aq_regs->arq_head_mask; |
3451 | |
3452 | if (cons == prod) |
3453 | goto done; |
3454 | |
3455 | arq = IXL_DMA_KVA(&sc->sc_arq)((void *)(&sc->sc_arq)->ixm_kva); |
3456 | |
3457 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)-> ixm_size)), (0x02|0x08)) |
3458 | 0, IXL_DMA_LEN(&sc->sc_arq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)-> ixm_size)), (0x02|0x08)) |
3459 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)-> ixm_size)), (0x02|0x08)); |
3460 | |
3461 | do { |
3462 | iaq = &arq[cons]; |
3463 | |
3464 | aqb = SIMPLEQ_FIRST(&sc->sc_arq_live)((&sc->sc_arq_live)->sqh_first); |
3465 | SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry)do { if (((&sc->sc_arq_live)->sqh_first = (&sc-> sc_arq_live)->sqh_first->aqb_entry.sqe_next) == ((void * )0)) (&sc->sc_arq_live)->sqh_last = &(&sc-> sc_arq_live)->sqh_first; } while (0); |
3466 | bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (aqb-> aqb_map), (0), (4096), (0x02)) |
3467 | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (aqb-> aqb_map), (0), (4096), (0x02)); |
3468 | |
3469 | switch (iaq->iaq_opcode) { |
3470 | case HTOLE16(IXL_AQ_OP_PHY_LINK_STATUS)(0x0607): |
3471 | ixl_link_state_update_iaq(sc, iaq); |
3472 | break; |
3473 | } |
3474 | |
3475 | memset(iaq, 0, sizeof(*iaq))__builtin_memset((iaq), (0), (sizeof(*iaq))); |
3476 | SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry)do { (aqb)->aqb_entry.sqe_next = ((void *)0); *(&sc-> sc_arq_idle)->sqh_last = (aqb); (&sc->sc_arq_idle)-> sqh_last = &(aqb)->aqb_entry.sqe_next; } while (0); |
3477 | if_rxr_put(&sc->sc_arq_ring, 1)do { (&sc->sc_arq_ring)->rxr_alive -= (1); } while ( 0); |
3478 | |
3479 | cons++; |
3480 | cons &= IXL_AQ_MASK(256 - 1); |
3481 | |
3482 | done = 1; |
3483 | } while (cons != prod); |
3484 | |
3485 | if (done && ixl_arq_fill(sc)) |
3486 | ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->arq_tail)), ((sc->sc_arq_prod)))); |
3487 | |
3488 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)-> ixm_size)), (0x01|0x04)) |
3489 | 0, IXL_DMA_LEN(&sc->sc_arq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)-> ixm_size)), (0x01|0x04)) |
3490 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_arq)->ixm_map)), (0), (((&sc->sc_arq)-> ixm_size)), (0x01|0x04)); |
3491 | |
3492 | sc->sc_arq_cons = cons; |
3493 | |
3494 | done: |
3495 | ixl_intr_enable(sc)((((sc))->sc_memt)->write_4((((sc))->sc_memh), ((0x00038480 )), ((((0x1) << (0)) | ((0x1) << (1)) | (0x2 << 3))))); |
3496 | } |
3497 | |
3498 | static void |
3499 | ixl_atq_set(struct ixl_atq *iatq, |
3500 | void (*fn)(struct ixl_softc *, void *), void *arg) |
3501 | { |
3502 | iatq->iatq_fn = fn; |
3503 | iatq->iatq_arg = arg; |
3504 | } |
3505 | |
3506 | static void |
3507 | ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq) |
3508 | { |
3509 | struct ixl_aq_desc *atq, *slot; |
3510 | unsigned int prod; |
3511 | |
3512 | /* assert locked */ |
3513 | |
3514 | atq = IXL_DMA_KVA(&sc->sc_atq)((void *)(&sc->sc_atq)->ixm_kva); |
3515 | prod = sc->sc_atq_prod; |
3516 | slot = atq + prod; |
3517 | |
3518 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x08)) |
3519 | 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x08)); |
3520 | |
3521 | *slot = iatq->iatq_desc; |
3522 | slot->iaq_cookie = (uint64_t)iatq; |
3523 | |
3524 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x04)) |
3525 | 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x04)); |
3526 | |
3527 | prod++; |
3528 | prod &= IXL_AQ_MASK(256 - 1); |
3529 | sc->sc_atq_prod = prod; |
3530 | ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->atq_tail)), ((prod)))); |
3531 | } |
3532 | |
3533 | static void |
3534 | ixl_atq_done(struct ixl_softc *sc) |
3535 | { |
3536 | struct ixl_aq_desc *atq, *slot; |
3537 | struct ixl_atq *iatq; |
3538 | unsigned int cons; |
3539 | unsigned int prod; |
3540 | |
3541 | prod = sc->sc_atq_prod; |
3542 | cons = sc->sc_atq_cons; |
3543 | |
3544 | if (prod == cons) |
3545 | return; |
3546 | |
3547 | atq = IXL_DMA_KVA(&sc->sc_atq)((void *)(&sc->sc_atq)->ixm_kva); |
3548 | |
3549 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x02|0x08)) |
3550 | 0, IXL_DMA_LEN(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x02|0x08)) |
3551 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x02|0x08)); |
3552 | |
3553 | do { |
3554 | slot = &atq[cons]; |
3555 | if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD))((slot->iaq_flags) & (((__uint16_t)((1U << 0)))) )) |
3556 | break; |
3557 | |
3558 | iatq = (struct ixl_atq *)slot->iaq_cookie; |
3559 | iatq->iatq_desc = *slot; |
3560 | |
3561 | memset(slot, 0, sizeof(*slot))__builtin_memset((slot), (0), (sizeof(*slot))); |
3562 | |
3563 | (*iatq->iatq_fn)(sc, iatq->iatq_arg); |
3564 | |
3565 | cons++; |
3566 | cons &= IXL_AQ_MASK(256 - 1); |
3567 | } while (cons != prod); |
3568 | |
3569 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x01|0x04)) |
3570 | 0, IXL_DMA_LEN(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x01|0x04)) |
3571 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x01|0x04)); |
3572 | |
3573 | sc->sc_atq_cons = cons; |
3574 | } |
3575 | |
3576 | static void |
3577 | ixl_wakeup(struct ixl_softc *sc, void *arg) |
3578 | { |
3579 | struct cond *c = arg; |
3580 | |
3581 | cond_signal(c); |
3582 | } |
3583 | |
3584 | static void |
3585 | ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq, const char *wmesg) |
3586 | { |
3587 | struct cond c = COND_INITIALIZER(){ 1 }; |
3588 | |
3589 | KASSERT(iatq->iatq_desc.iaq_cookie == 0)((iatq->iatq_desc.iaq_cookie == 0) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/pci/if_ixl.c", 3589, "iatq->iatq_desc.iaq_cookie == 0" )); |
3590 | |
3591 | ixl_atq_set(iatq, ixl_wakeup, &c); |
3592 | ixl_atq_post(sc, iatq); |
3593 | |
3594 | cond_wait(&c, wmesg); |
3595 | } |
3596 | |
3597 | static int |
3598 | ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm) |
3599 | { |
3600 | struct ixl_aq_desc *atq, *slot; |
3601 | unsigned int prod; |
3602 | unsigned int t = 0; |
3603 | |
3604 | atq = IXL_DMA_KVA(&sc->sc_atq)((void *)(&sc->sc_atq)->ixm_kva); |
3605 | prod = sc->sc_atq_prod; |
3606 | slot = atq + prod; |
3607 | |
3608 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x08)) |
3609 | 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x08)); |
3610 | |
3611 | *slot = *iaq; |
3612 | slot->iaq_flags |= htole16(IXL_AQ_SI)((__uint16_t)((1U << 13))); |
3613 | |
3614 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x04)) |
3615 | 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x04)); |
3616 | |
3617 | prod++; |
3618 | prod &= IXL_AQ_MASK(256 - 1); |
3619 | sc->sc_atq_prod = prod; |
3620 | ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((sc-> sc_aq_regs->atq_tail)), ((prod)))); |
3621 | |
3622 | while (ixl_rd(sc, sc->sc_aq_regs->atq_head)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((sc->sc_aq_regs ->atq_head)))) != prod) { |
3623 | delaymsec(1)(*delay_func)(1000 * (1)); |
3624 | |
3625 | if (t++ > tm) |
3626 | return (ETIMEDOUT60); |
3627 | } |
3628 | |
3629 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x02)) |
3630 | 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x02)); |
3631 | *iaq = *slot; |
3632 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x01)) |
3633 | 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_atq)->ixm_map)), (0), (((&sc->sc_atq)-> ixm_size)), (0x01)); |
3634 | |
3635 | sc->sc_atq_cons = prod; |
3636 | |
3637 | return (0); |
3638 | } |
3639 | |
3640 | static int |
3641 | ixl_get_version(struct ixl_softc *sc) |
3642 | { |
3643 | struct ixl_aq_desc iaq; |
3644 | uint32_t fwbuild, fwver, apiver; |
3645 | |
3646 | memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq))); |
3647 | iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION)((__uint16_t)(0x0001)); |
3648 | |
3649 | if (ixl_atq_poll(sc, &iaq, 2000) != 0) |
3650 | return (ETIMEDOUT60); |
3651 | if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)((__uint16_t)(0))) |
3652 | return (EIO5); |
3653 | |
3654 | fwbuild = lemtoh32(&iaq.iaq_param[1])((__uint32_t)(*(__uint32_t *)(&iaq.iaq_param[1]))); |
3655 | fwver = lemtoh32(&iaq.iaq_param[2])((__uint32_t)(*(__uint32_t *)(&iaq.iaq_param[2]))); |
3656 | apiver = lemtoh32(&iaq.iaq_param[3])((__uint32_t)(*(__uint32_t *)(&iaq.iaq_param[3]))); |
3657 | |
3658 | sc->sc_api_major = apiver & 0xffff; |
3659 | sc->sc_api_minor = (apiver >> 16) & 0xffff; |
3660 | |
3661 | printf(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver, |
3662 | (uint16_t)(fwver >> 16), fwbuild, |
3663 | sc->sc_api_major, sc->sc_api_minor); |
3664 | |
3665 | return (0); |
3666 | } |
3667 | |
3668 | static int |
3669 | ixl_pxe_clear(struct ixl_softc *sc) |
3670 | { |
3671 | struct ixl_aq_desc iaq; |
3672 | |
3673 | memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq))); |
3674 | iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE)((__uint16_t)(0x0110)); |
3675 | iaq.iaq_param[0] = htole32(0x2)((__uint32_t)(0x2)); |
3676 | |
3677 | if (ixl_atq_poll(sc, &iaq, 250) != 0) { |
3678 | printf(", CLEAR PXE MODE timeout\n"); |
3679 | return (-1); |
3680 | } |
3681 | |
3682 | switch (iaq.iaq_retval) { |
3683 | case HTOLE16(IXL_AQ_RC_OK)(0): |
3684 | case HTOLE16(IXL_AQ_RC_EEXIST)(13): |
3685 | break; |
3686 | default: |
3687 | printf(", CLEAR PXE MODE error\n"); |
3688 | return (-1); |
3689 | } |
3690 | |
3691 | return (0); |
3692 | } |
3693 | |
3694 | static int |
3695 | ixl_lldp_shut(struct ixl_softc *sc) |
3696 | { |
3697 | struct ixl_aq_desc iaq; |
3698 | |
3699 | memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq))); |
3700 | iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT)((__uint16_t)(0x0a05)); |
3701 | iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN)((__uint32_t)(0x1)); |
3702 | |
3703 | if (ixl_atq_poll(sc, &iaq, 250) != 0) { |
3704 | printf(", STOP LLDP AGENT timeout\n"); |
3705 | return (-1); |
3706 | } |
3707 | |
3708 | switch (iaq.iaq_retval) { |
3709 | case HTOLE16(IXL_AQ_RC_EMODE)(21): |
3710 | case HTOLE16(IXL_AQ_RC_EPERM)(1): |
3711 | /* ignore silently */ |
3712 | default: |
3713 | break; |
3714 | } |
3715 | |
3716 | return (0); |
3717 | } |
3718 | |
3719 | static int |
3720 | ixl_get_mac(struct ixl_softc *sc) |
3721 | { |
3722 | struct ixl_dmamem idm; |
3723 | struct ixl_aq_desc iaq; |
3724 | struct ixl_aq_mac_addresses *addrs; |
3725 | int rv; |
3726 | |
3727 | #ifdef __sparc64__ |
3728 | if (OF_getprop(PCITAG_NODE(sc->sc_tag), "local-mac-address", |
3729 | sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN6) == ETHER_ADDR_LEN6) |
3730 | return (0); |
3731 | #endif |
3732 | |
3733 | if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) { |
3734 | printf(", unable to allocate mac addresses\n"); |
3735 | return (-1); |
3736 | } |
3737 | |
3738 | memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq))); |
3739 | iaq.iaq_flags = htole16(IXL_AQ_BUF)((__uint16_t)((1U << 12))); |
3740 | iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ)((__uint16_t)(0x0107)); |
3741 | iaq.iaq_datalen = htole16(sizeof(*addrs))((__uint16_t)(sizeof(*addrs))); |
3742 | ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)((&idm)->ixm_map->dm_segs[0].ds_addr)); |
3743 | |
3744 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& idm)->ixm_map)), (0), (((&idm)->ixm_size)), (0x01)) |
3745 | BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& idm)->ixm_map)), (0), (((&idm)->ixm_size)), (0x01)); |
3746 | |
3747 | rv = ixl_atq_poll(sc, &iaq, 250); |
3748 | |
3749 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& idm)->ixm_map)), (0), (((&idm)->ixm_size)), (0x02)) |
3750 | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& idm)->ixm_map)), (0), (((&idm)->ixm_size)), (0x02)); |
3751 | |
3752 | if (rv != 0) { |
3753 | printf(", MAC ADDRESS READ timeout\n"); |
3754 | rv = -1; |
3755 | goto done; |
3756 | } |
3757 | if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)((__uint16_t)(0))) { |
3758 | printf(", MAC ADDRESS READ error\n"); |
3759 | rv = -1; |
3760 | goto done; |
3761 | } |
3762 | |
3763 | addrs = IXL_DMA_KVA(&idm)((void *)(&idm)->ixm_kva); |
3764 | if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))((iaq.iaq_param[0]) & (((__uint32_t)((1U << 6)))))) { |
3765 | printf(", port address is not valid\n"); |
3766 | goto done; |
3767 | } |
3768 | |
3769 | memcpy(sc->sc_ac.ac_enaddr, addrs->port, ETHER_ADDR_LEN)__builtin_memcpy((sc->sc_ac.ac_enaddr), (addrs->port), ( 6)); |
3770 | rv = 0; |
3771 | |
3772 | done: |
3773 | ixl_dmamem_free(sc, &idm); |
3774 | return (rv); |
3775 | } |
3776 | |
3777 | static int |
3778 | ixl_get_switch_config(struct ixl_softc *sc) |
3779 | { |
3780 | struct ixl_dmamem idm; |
3781 | struct ixl_aq_desc iaq; |
3782 | struct ixl_aq_switch_config *hdr; |
3783 | struct ixl_aq_switch_config_element *elms, *elm; |
3784 | unsigned int nelm; |
3785 | int rv; |
3786 | |
3787 | if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN4096, 0) != 0) { |
3788 | printf("%s: unable to allocate switch config buffer\n", |
3789 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
3790 | return (-1); |
3791 | } |
3792 | |
3793 | memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq))); |
3794 | iaq.iaq_flags = htole16(IXL_AQ_BUF |((__uint16_t)((1U << 12) | (4096 > 512 ? (1U << 9) : 0))) |
3795 | (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0))((__uint16_t)((1U << 12) | (4096 > 512 ? (1U << 9) : 0))); |
3796 | iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG)((__uint16_t)(0x0200)); |
3797 | iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN)((__uint16_t)(4096)); |
3798 | ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm)((&idm)->ixm_map->dm_segs[0].ds_addr)); |
3799 | |
3800 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& idm)->ixm_map)), (0), (((&idm)->ixm_size)), (0x01)) |
3801 | BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& idm)->ixm_map)), (0), (((&idm)->ixm_size)), (0x01)); |
3802 | |
3803 | rv = ixl_atq_poll(sc, &iaq, 250); |
3804 | |
3805 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& idm)->ixm_map)), (0), (((&idm)->ixm_size)), (0x02)) |
3806 | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& idm)->ixm_map)), (0), (((&idm)->ixm_size)), (0x02)); |
3807 | |
3808 | if (rv != 0) { |
3809 | printf("%s: GET SWITCH CONFIG timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
3810 | rv = -1; |
3811 | goto done; |
3812 | } |
3813 | if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)((__uint16_t)(0))) { |
3814 | printf("%s: GET SWITCH CONFIG error\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
3815 | rv = -1; |
3816 | goto done; |
3817 | } |
3818 | |
3819 | hdr = IXL_DMA_KVA(&idm)((void *)(&idm)->ixm_kva); |
3820 | elms = (struct ixl_aq_switch_config_element *)(hdr + 1); |
3821 | |
3822 | nelm = lemtoh16(&hdr->num_reported)((__uint16_t)(*(__uint16_t *)(&hdr->num_reported))); |
3823 | if (nelm < 1) { |
3824 | printf("%s: no switch config available\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
3825 | rv = -1; |
3826 | goto done; |
3827 | } |
3828 | |
3829 | #if 0 |
3830 | for (i = 0; i < nelm; i++) { |
3831 | elm = &elms[i]; |
3832 | |
3833 | printf("%s: type %x revision %u seid %04x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
3834 | elm->type, elm->revision, lemtoh16(&elm->seid)((__uint16_t)(*(__uint16_t *)(&elm->seid)))); |
3835 | printf("%s: uplink %04x downlink %04x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
3836 | lemtoh16(&elm->uplink_seid)((__uint16_t)(*(__uint16_t *)(&elm->uplink_seid))), |
3837 | lemtoh16(&elm->downlink_seid)((__uint16_t)(*(__uint16_t *)(&elm->downlink_seid)))); |
3838 | printf("%s: conntype %x scheduler %04x extra %04x\n", |
3839 | DEVNAME(sc)((sc)->sc_dev.dv_xname), elm->connection_type, |
3840 | lemtoh16(&elm->scheduler_id)((__uint16_t)(*(__uint16_t *)(&elm->scheduler_id))), |
3841 | lemtoh16(&elm->element_info)((__uint16_t)(*(__uint16_t *)(&elm->element_info)))); |
3842 | } |
3843 | #endif |
3844 | |
3845 | elm = &elms[0]; |
3846 | |
3847 | sc->sc_uplink_seid = elm->uplink_seid; |
3848 | sc->sc_downlink_seid = elm->downlink_seid; |
3849 | sc->sc_seid = elm->seid; |
3850 | |
3851 | if ((sc->sc_uplink_seid == htole16(0)((__uint16_t)(0))) != |
3852 | (sc->sc_downlink_seid == htole16(0)((__uint16_t)(0)))) { |
3853 | printf("%s: SEIDs are misconfigured\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
3854 | rv = -1; |
3855 | goto done; |
3856 | } |
3857 | |
3858 | done: |
3859 | ixl_dmamem_free(sc, &idm); |
3860 | return (rv); |
3861 | } |
3862 | |
3863 | static int |
3864 | ixl_phy_mask_ints(struct ixl_softc *sc) |
3865 | { |
3866 | struct ixl_aq_desc iaq; |
3867 | |
3868 | memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq))); |
3869 | iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK)((__uint16_t)(0x0613)); |
3870 | iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &((__uint32_t)(0x3ff & ~((1 << 1) | (1 << 8) | (1 << 2)))) |
3871 | ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |((__uint32_t)(0x3ff & ~((1 << 1) | (1 << 8) | (1 << 2)))) |
3872 | IXL_AQ_PHY_EV_MEDIA_NA))((__uint32_t)(0x3ff & ~((1 << 1) | (1 << 8) | (1 << 2)))); |
3873 | |
3874 | if (ixl_atq_poll(sc, &iaq, 250) != 0) { |
3875 | printf("%s: SET PHY EVENT MASK timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
3876 | return (-1); |
3877 | } |
3878 | if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)((__uint16_t)(0))) { |
3879 | printf("%s: SET PHY EVENT MASK error\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
3880 | return (-1); |
3881 | } |
3882 | |
3883 | return (0); |
3884 | } |
3885 | |
3886 | static int |
3887 | ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm) |
3888 | { |
3889 | struct ixl_aq_desc iaq; |
3890 | int rv; |
3891 | |
3892 | memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq))); |
3893 | iaq.iaq_flags = htole16(IXL_AQ_BUF |((__uint16_t)((1U << 12) | (((idm)->ixm_size) > 512 ? (1U << 9) : 0))) |
3894 | (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0))((__uint16_t)((1U << 12) | (((idm)->ixm_size) > 512 ? (1U << 9) : 0))); |
3895 | iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES)((__uint16_t)(0x0600)); |
3896 | htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(idm))(*(__uint16_t *)(&iaq.iaq_datalen) = ((__uint16_t)(((idm) ->ixm_size)))); |
3897 | iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT)((__uint32_t)((1 << 1))); |
3898 | ixl_aq_dva(&iaq, IXL_DMA_DVA(idm)((idm)->ixm_map->dm_segs[0].ds_addr)); |
3899 | |
3900 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((idm )->ixm_map)), (0), (((idm)->ixm_size)), (0x01)) |
3901 | BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((idm )->ixm_map)), (0), (((idm)->ixm_size)), (0x01)); |
3902 | |
3903 | rv = ixl_atq_poll(sc, &iaq, 250); |
3904 | |
3905 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((idm )->ixm_map)), (0), (((idm)->ixm_size)), (0x02)) |
3906 | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((idm )->ixm_map)), (0), (((idm)->ixm_size)), (0x02)); |
3907 | |
3908 | if (rv != 0) |
3909 | return (-1); |
3910 | |
3911 | return (lemtoh16(&iaq.iaq_retval)((__uint16_t)(*(__uint16_t *)(&iaq.iaq_retval)))); |
3912 | } |
3913 | |
3914 | static int |
3915 | ixl_get_phy_types(struct ixl_softc *sc, uint64_t *phy_types_ptr) |
3916 | { |
3917 | struct ixl_dmamem idm; |
3918 | struct ixl_aq_phy_abilities *phy; |
3919 | uint64_t phy_types; |
3920 | int rv; |
3921 | |
3922 | if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN4096, 0) != 0) { |
3923 | printf("%s: unable to allocate phy abilities buffer\n", |
3924 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
3925 | return (-1); |
3926 | } |
3927 | |
3928 | rv = ixl_get_phy_abilities(sc, &idm); |
3929 | switch (rv) { |
3930 | case -1: |
3931 | printf("%s: GET PHY ABILITIES timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
3932 | goto err; |
3933 | case IXL_AQ_RC_OK0: |
3934 | break; |
3935 | case IXL_AQ_RC_EIO5: |
3936 | /* API is too old to handle this command */ |
3937 | phy_types = 0; |
3938 | goto done; |
3939 | default: |
3940 | printf("%s: GET PHY ABILITIES error %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), rv); |
3941 | goto err; |
3942 | } |
3943 | |
3944 | phy = IXL_DMA_KVA(&idm)((void *)(&idm)->ixm_kva); |
3945 | |
3946 | phy_types = lemtoh32(&phy->phy_type)((__uint32_t)(*(__uint32_t *)(&phy->phy_type))); |
3947 | phy_types |= (uint64_t)phy->phy_type_ext << 32; |
3948 | |
3949 | done: |
3950 | *phy_types_ptr = phy_types; |
3951 | |
3952 | rv = 0; |
3953 | |
3954 | err: |
3955 | ixl_dmamem_free(sc, &idm); |
3956 | return (rv); |
3957 | } |
3958 | |
3959 | /* |
3960 | * this returns -2 on software/driver failure, -1 for problems |
3961 | * talking to the hardware, or the sff module type. |
3962 | */ |
3963 | |
3964 | static int |
3965 | ixl_get_module_type(struct ixl_softc *sc) |
3966 | { |
3967 | struct ixl_dmamem idm; |
3968 | struct ixl_aq_phy_abilities *phy; |
3969 | int rv; |
3970 | |
3971 | if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN4096, 0) != 0) |
3972 | return (-2); |
3973 | |
3974 | rv = ixl_get_phy_abilities(sc, &idm); |
3975 | if (rv != IXL_AQ_RC_OK0) { |
3976 | rv = -1; |
3977 | goto done; |
3978 | } |
3979 | |
3980 | phy = IXL_DMA_KVA(&idm)((void *)(&idm)->ixm_kva); |
3981 | |
3982 | rv = phy->module_type[0]; |
3983 | |
3984 | done: |
3985 | ixl_dmamem_free(sc, &idm); |
3986 | return (rv); |
3987 | } |
3988 | |
3989 | static int |
3990 | ixl_get_link_status(struct ixl_softc *sc) |
3991 | { |
3992 | struct ixl_aq_desc iaq; |
3993 | struct ixl_aq_link_param *param; |
3994 | |
3995 | memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq))); |
3996 | iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS)((__uint16_t)(0x0607)); |
3997 | param = (struct ixl_aq_link_param *)iaq.iaq_param; |
3998 | param->notify = IXL_AQ_LINK_NOTIFY0x03; |
3999 | |
4000 | if (ixl_atq_poll(sc, &iaq, 250) != 0) { |
4001 | printf("%s: GET LINK STATUS timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4002 | return (-1); |
4003 | } |
4004 | if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)((__uint16_t)(0))) { |
4005 | printf("%s: GET LINK STATUS error\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4006 | return (0); |
4007 | } |
4008 | |
4009 | sc->sc_ac.ac_if.if_link_stateif_data.ifi_link_state = ixl_set_link_status(sc, &iaq); |
4010 | |
4011 | return (0); |
4012 | } |
4013 | |
4014 | struct ixl_sff_ops { |
4015 | int (*open)(struct ixl_softc *sc, struct if_sffpage *, uint8_t *); |
4016 | int (*get)(struct ixl_softc *sc, struct if_sffpage *, size_t); |
4017 | int (*close)(struct ixl_softc *sc, struct if_sffpage *, uint8_t); |
4018 | }; |
4019 | |
4020 | static int |
4021 | ixl_sfp_open(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t *page) |
4022 | { |
4023 | int error; |
4024 | |
4025 | if (sff->sff_addr != IFSFF_ADDR_EEPROM0xa0) |
4026 | return (0); |
4027 | |
4028 | error = ixl_sff_get_byte(sc, IFSFF_ADDR_EEPROM0xa0, 127, page); |
4029 | if (error != 0) |
4030 | return (error); |
4031 | if (*page == sff->sff_page) |
4032 | return (0); |
4033 | error = ixl_sff_set_byte(sc, IFSFF_ADDR_EEPROM0xa0, 127, sff->sff_page); |
4034 | if (error != 0) |
4035 | return (error); |
4036 | |
4037 | return (0); |
4038 | } |
4039 | |
4040 | static int |
4041 | ixl_sfp_get(struct ixl_softc *sc, struct if_sffpage *sff, size_t i) |
4042 | { |
4043 | return (ixl_sff_get_byte(sc, sff->sff_addr, i, &sff->sff_data[i])); |
4044 | } |
4045 | |
4046 | static int |
4047 | ixl_sfp_close(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t page) |
4048 | { |
4049 | int error; |
4050 | |
4051 | if (sff->sff_addr != IFSFF_ADDR_EEPROM0xa0) |
4052 | return (0); |
4053 | |
4054 | if (page == sff->sff_page) |
4055 | return (0); |
4056 | |
4057 | error = ixl_sff_set_byte(sc, IFSFF_ADDR_EEPROM0xa0, 127, page); |
4058 | if (error != 0) |
4059 | return (error); |
4060 | |
4061 | return (0); |
4062 | } |
4063 | |
4064 | static const struct ixl_sff_ops ixl_sfp_ops = { |
4065 | ixl_sfp_open, |
4066 | ixl_sfp_get, |
4067 | ixl_sfp_close, |
4068 | }; |
4069 | |
4070 | static int |
4071 | ixl_qsfp_open(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t *page) |
4072 | { |
4073 | if (sff->sff_addr != IFSFF_ADDR_EEPROM0xa0) |
4074 | return (EIO5); |
4075 | |
4076 | return (0); |
4077 | } |
4078 | |
4079 | static int |
4080 | ixl_qsfp_get(struct ixl_softc *sc, struct if_sffpage *sff, size_t i) |
4081 | { |
4082 | return (ixl_sff_get_byte(sc, sff->sff_page, i, &sff->sff_data[i])); |
4083 | } |
4084 | |
4085 | static int |
4086 | ixl_qsfp_close(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t page) |
4087 | { |
4088 | return (0); |
4089 | } |
4090 | |
4091 | static const struct ixl_sff_ops ixl_qsfp_ops = { |
4092 | ixl_qsfp_open, |
4093 | ixl_qsfp_get, |
4094 | ixl_qsfp_close, |
4095 | }; |
4096 | |
4097 | static int |
4098 | ixl_get_sffpage(struct ixl_softc *sc, struct if_sffpage *sff) |
4099 | { |
4100 | const struct ixl_sff_ops *ops; |
4101 | uint8_t page; |
4102 | size_t i; |
4103 | int error; |
4104 | |
4105 | switch (ixl_get_module_type(sc)) { |
4106 | case -2: |
4107 | return (ENOMEM12); |
4108 | case -1: |
4109 | return (ENXIO6); |
4110 | case IXL_SFF8024_ID_SFP0x03: |
4111 | ops = &ixl_sfp_ops; |
4112 | break; |
4113 | case IXL_SFF8024_ID_QSFP0x0c: |
4114 | case IXL_SFF8024_ID_QSFP_PLUS0x0d: |
4115 | case IXL_SFF8024_ID_QSFP280x11: |
4116 | ops = &ixl_qsfp_ops; |
4117 | break; |
4118 | default: |
4119 | return (EOPNOTSUPP45); |
4120 | } |
4121 | |
4122 | error = (*ops->open)(sc, sff, &page); |
4123 | if (error != 0) |
4124 | return (error); |
4125 | |
4126 | for (i = 0; i < sizeof(sff->sff_data); i++) { |
4127 | error = (*ops->get)(sc, sff, i); |
4128 | if (error != 0) |
4129 | return (error); |
4130 | } |
4131 | |
4132 | error = (*ops->close)(sc, sff, page); |
Value stored to 'error' is never read | |
4133 | |
4134 | return (0); |
4135 | } |
4136 | |
4137 | static int |
4138 | ixl_sff_get_byte(struct ixl_softc *sc, uint8_t dev, uint32_t reg, uint8_t *p) |
4139 | { |
4140 | struct ixl_atq iatq; |
4141 | struct ixl_aq_desc *iaq; |
4142 | struct ixl_aq_phy_reg_access *param; |
4143 | |
4144 | memset(&iatq, 0, sizeof(iatq))__builtin_memset((&iatq), (0), (sizeof(iatq))); |
4145 | iaq = &iatq.iatq_desc; |
4146 | iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_REGISTER)((__uint16_t)(0x0629)); |
4147 | param = (struct ixl_aq_phy_reg_access *)iaq->iaq_param; |
4148 | param->phy_iface = IXL_AQ_PHY_IF_MODULE2; |
4149 | param->dev_addr = dev; |
4150 | htolem32(¶m->reg, reg)(*(__uint32_t *)(¶m->reg) = ((__uint32_t)(reg))); |
4151 | |
4152 | ixl_atq_exec(sc, &iatq, "ixlsffget"); |
4153 | |
4154 | if (ISSET(sc->sc_ac.ac_if.if_flags, IFF_DEBUG)((sc->sc_ac.ac_if.if_flags) & (0x4))) { |
4155 | printf("%s: %s(dev 0x%02x, reg 0x%02x) -> %04x\n", |
4156 | DEVNAME(sc)((sc)->sc_dev.dv_xname), __func__, |
4157 | dev, reg, lemtoh16(&iaq->iaq_retval)((__uint16_t)(*(__uint16_t *)(&iaq->iaq_retval)))); |
4158 | } |
4159 | |
4160 | switch (iaq->iaq_retval) { |
4161 | case htole16(IXL_AQ_RC_OK)((__uint16_t)(0)): |
4162 | break; |
4163 | case htole16(IXL_AQ_RC_EBUSY)((__uint16_t)(12)): |
4164 | return (EBUSY16); |
4165 | case htole16(IXL_AQ_RC_ESRCH)((__uint16_t)(3)): |
4166 | return (ENODEV19); |
4167 | case htole16(IXL_AQ_RC_EIO)((__uint16_t)(5)): |
4168 | case htole16(IXL_AQ_RC_EINVAL)((__uint16_t)(14)): |
4169 | default: |
4170 | return (EIO5); |
4171 | } |
4172 | |
4173 | *p = lemtoh32(¶m->val)((__uint32_t)(*(__uint32_t *)(¶m->val))); |
4174 | |
4175 | return (0); |
4176 | } |
4177 | |
4178 | static int |
4179 | ixl_sff_set_byte(struct ixl_softc *sc, uint8_t dev, uint32_t reg, uint8_t v) |
4180 | { |
4181 | struct ixl_atq iatq; |
4182 | struct ixl_aq_desc *iaq; |
4183 | struct ixl_aq_phy_reg_access *param; |
4184 | |
4185 | memset(&iatq, 0, sizeof(iatq))__builtin_memset((&iatq), (0), (sizeof(iatq))); |
4186 | iaq = &iatq.iatq_desc; |
4187 | iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_REGISTER)((__uint16_t)(0x0628)); |
4188 | param = (struct ixl_aq_phy_reg_access *)iaq->iaq_param; |
4189 | param->phy_iface = IXL_AQ_PHY_IF_MODULE2; |
4190 | param->dev_addr = dev; |
4191 | htolem32(¶m->reg, reg)(*(__uint32_t *)(¶m->reg) = ((__uint32_t)(reg))); |
4192 | htolem32(¶m->val, v)(*(__uint32_t *)(¶m->val) = ((__uint32_t)(v))); |
4193 | |
4194 | ixl_atq_exec(sc, &iatq, "ixlsffset"); |
4195 | |
4196 | if (ISSET(sc->sc_ac.ac_if.if_flags, IFF_DEBUG)((sc->sc_ac.ac_if.if_flags) & (0x4))) { |
4197 | printf("%s: %s(dev 0x%02x, reg 0x%02x, val 0x%02x) -> %04x\n", |
4198 | DEVNAME(sc)((sc)->sc_dev.dv_xname), __func__, |
4199 | dev, reg, v, lemtoh16(&iaq->iaq_retval)((__uint16_t)(*(__uint16_t *)(&iaq->iaq_retval)))); |
4200 | } |
4201 | |
4202 | switch (iaq->iaq_retval) { |
4203 | case htole16(IXL_AQ_RC_OK)((__uint16_t)(0)): |
4204 | break; |
4205 | case htole16(IXL_AQ_RC_EBUSY)((__uint16_t)(12)): |
4206 | return (EBUSY16); |
4207 | case htole16(IXL_AQ_RC_ESRCH)((__uint16_t)(3)): |
4208 | return (ENODEV19); |
4209 | case htole16(IXL_AQ_RC_EIO)((__uint16_t)(5)): |
4210 | case htole16(IXL_AQ_RC_EINVAL)((__uint16_t)(14)): |
4211 | default: |
4212 | return (EIO5); |
4213 | } |
4214 | |
4215 | return (0); |
4216 | } |
4217 | |
4218 | static int |
4219 | ixl_get_vsi(struct ixl_softc *sc) |
4220 | { |
4221 | struct ixl_dmamem *vsi = &sc->sc_scratch; |
4222 | struct ixl_aq_desc iaq; |
4223 | struct ixl_aq_vsi_param *param; |
4224 | struct ixl_aq_vsi_reply *reply; |
4225 | int rv; |
4226 | |
4227 | /* grumble, vsi info isn't "known" at compile time */ |
4228 | |
4229 | memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq))); |
4230 | htolem16(&iaq.iaq_flags, IXL_AQ_BUF |(*(__uint16_t *)(&iaq.iaq_flags) = ((__uint16_t)((1U << 12) | (((vsi)->ixm_size) > 512 ? (1U << 9) : 0)) )) |
4231 | (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0))(*(__uint16_t *)(&iaq.iaq_flags) = ((__uint16_t)((1U << 12) | (((vsi)->ixm_size) > 512 ? (1U << 9) : 0)) )); |
4232 | iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS)((__uint16_t)(0x0212)); |
4233 | htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi))(*(__uint16_t *)(&iaq.iaq_datalen) = ((__uint16_t)(((vsi) ->ixm_size)))); |
4234 | ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi)((vsi)->ixm_map->dm_segs[0].ds_addr)); |
4235 | |
4236 | param = (struct ixl_aq_vsi_param *)iaq.iaq_param; |
4237 | param->uplink_seid = sc->sc_seid; |
4238 | |
4239 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((vsi )->ixm_map)), (0), (((vsi)->ixm_size)), (0x01)) |
4240 | BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((vsi )->ixm_map)), (0), (((vsi)->ixm_size)), (0x01)); |
4241 | |
4242 | rv = ixl_atq_poll(sc, &iaq, 250); |
4243 | |
4244 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((vsi )->ixm_map)), (0), (((vsi)->ixm_size)), (0x02)) |
4245 | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((vsi )->ixm_map)), (0), (((vsi)->ixm_size)), (0x02)); |
4246 | |
4247 | if (rv != 0) { |
4248 | printf("%s: GET VSI timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4249 | return (-1); |
4250 | } |
4251 | |
4252 | if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)((__uint16_t)(0))) { |
4253 | printf("%s: GET VSI error %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
4254 | lemtoh16(&iaq.iaq_retval)((__uint16_t)(*(__uint16_t *)(&iaq.iaq_retval)))); |
4255 | return (-1); |
4256 | } |
4257 | |
4258 | reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param; |
4259 | sc->sc_vsi_number = reply->vsi_number; |
4260 | |
4261 | return (0); |
4262 | } |
4263 | |
4264 | static int |
4265 | ixl_set_vsi(struct ixl_softc *sc) |
4266 | { |
4267 | struct ixl_dmamem *vsi = &sc->sc_scratch; |
4268 | struct ixl_aq_desc iaq; |
4269 | struct ixl_aq_vsi_param *param; |
4270 | struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi)((void *)(vsi)->ixm_kva); |
4271 | int rv; |
4272 | |
4273 | data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |((__uint16_t)((1 << 6) | (1 << 2))) |
4274 | IXL_AQ_VSI_VALID_VLAN)((__uint16_t)((1 << 6) | (1 << 2))); |
4275 | |
4276 | CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK))((data->mapping_flags) &= ~(((__uint16_t)(0x1)))); |
4277 | SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG))((data->mapping_flags) |= (((__uint16_t)(0x0)))); |
4278 | data->queue_mapping[0] = htole16(0)((__uint16_t)(0)); |
4279 | data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |((__uint16_t)((0 << 0) | (sc->sc_nqueues << 9) )) |
4280 | (sc->sc_nqueues << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT))((__uint16_t)((0 << 0) | (sc->sc_nqueues << 9) )); |
4281 | |
4282 | CLR(data->port_vlan_flags,((data->port_vlan_flags) &= ~(((__uint16_t)((0x3 << 0) | (0x3 << 0x3))))) |
4283 | htole16(IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK))((data->port_vlan_flags) &= ~(((__uint16_t)((0x3 << 0) | (0x3 << 0x3))))); |
4284 | SET(data->port_vlan_flags,((data->port_vlan_flags) |= (((__uint16_t)((0x3 << 0 ) | (0x3 << 0x3))))) |
4285 | htole16(IXL_AQ_VSI_PVLAN_MODE_ALL | IXL_AQ_VSI_PVLAN_EMOD_NOTHING))((data->port_vlan_flags) |= (((__uint16_t)((0x3 << 0 ) | (0x3 << 0x3))))); |
4286 | |
4287 | /* grumble, vsi info isn't "known" at compile time */ |
4288 | |
4289 | memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq))); |
4290 | htolem16(&iaq.iaq_flags, IXL_AQ_BUF | IXL_AQ_RD |(*(__uint16_t *)(&iaq.iaq_flags) = ((__uint16_t)((1U << 12) | (1U << 10) | (((vsi)->ixm_size) > 512 ? (1U << 9) : 0)))) |
4291 | (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0))(*(__uint16_t *)(&iaq.iaq_flags) = ((__uint16_t)((1U << 12) | (1U << 10) | (((vsi)->ixm_size) > 512 ? (1U << 9) : 0)))); |
4292 | iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS)((__uint16_t)(0x0211)); |
4293 | htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi))(*(__uint16_t *)(&iaq.iaq_datalen) = ((__uint16_t)(((vsi) ->ixm_size)))); |
4294 | ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi)((vsi)->ixm_map->dm_segs[0].ds_addr)); |
4295 | |
4296 | param = (struct ixl_aq_vsi_param *)iaq.iaq_param; |
4297 | param->uplink_seid = sc->sc_seid; |
4298 | |
4299 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((vsi )->ixm_map)), (0), (((vsi)->ixm_size)), (0x04)) |
4300 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((vsi )->ixm_map)), (0), (((vsi)->ixm_size)), (0x04)); |
4301 | |
4302 | rv = ixl_atq_poll(sc, &iaq, 250); |
4303 | |
4304 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((vsi )->ixm_map)), (0), (((vsi)->ixm_size)), (0x08)) |
4305 | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((vsi )->ixm_map)), (0), (((vsi)->ixm_size)), (0x08)); |
4306 | |
4307 | if (rv != 0) { |
4308 | printf("%s: UPDATE VSI timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4309 | return (-1); |
4310 | } |
4311 | |
4312 | if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)((__uint16_t)(0))) { |
4313 | printf("%s: UPDATE VSI error %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
4314 | lemtoh16(&iaq.iaq_retval)((__uint16_t)(*(__uint16_t *)(&iaq.iaq_retval)))); |
4315 | return (-1); |
4316 | } |
4317 | |
4318 | return (0); |
4319 | } |
4320 | |
4321 | static const struct ixl_phy_type * |
4322 | ixl_search_phy_type(uint8_t phy_type) |
4323 | { |
4324 | const struct ixl_phy_type *itype; |
4325 | uint64_t mask; |
4326 | unsigned int i; |
4327 | |
4328 | if (phy_type >= 64) |
4329 | return (NULL((void *)0)); |
4330 | |
4331 | mask = 1ULL << phy_type; |
4332 | |
4333 | for (i = 0; i < nitems(ixl_phy_type_map)(sizeof((ixl_phy_type_map)) / sizeof((ixl_phy_type_map)[0])); i++) { |
4334 | itype = &ixl_phy_type_map[i]; |
4335 | |
4336 | if (ISSET(itype->phy_type, mask)((itype->phy_type) & (mask))) |
4337 | return (itype); |
4338 | } |
4339 | |
4340 | return (NULL((void *)0)); |
4341 | } |
4342 | |
4343 | static uint64_t |
4344 | ixl_search_link_speed(uint8_t link_speed) |
4345 | { |
4346 | const struct ixl_speed_type *type; |
4347 | unsigned int i; |
4348 | |
4349 | for (i = 0; i < nitems(ixl_speed_type_map)(sizeof((ixl_speed_type_map)) / sizeof((ixl_speed_type_map)[0 ])); i++) { |
4350 | type = &ixl_speed_type_map[i]; |
4351 | |
4352 | if (ISSET(type->dev_speed, link_speed)((type->dev_speed) & (link_speed))) |
4353 | return (type->net_speed); |
4354 | } |
4355 | |
4356 | return (0); |
4357 | } |
4358 | |
4359 | static int |
4360 | ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq) |
4361 | { |
4362 | const struct ixl_aq_link_status *status; |
4363 | const struct ixl_phy_type *itype; |
4364 | |
4365 | uint64_t ifm_active = IFM_ETHER0x0000000000000100ULL; |
4366 | uint64_t ifm_status = IFM_AVALID0x0000000000000001ULL; |
4367 | int link_state = LINK_STATE_DOWN2; |
4368 | uint64_t baudrate = 0; |
4369 | |
4370 | status = (const struct ixl_aq_link_status *)iaq->iaq_param; |
4371 | if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION)((status->link_info) & (0x01))) |
4372 | goto done; |
4373 | |
4374 | ifm_active |= IFM_FDX0x0000010000000000ULL; |
4375 | ifm_status |= IFM_ACTIVE0x0000000000000002ULL; |
4376 | link_state = LINK_STATE_FULL_DUPLEX6; |
4377 | |
4378 | itype = ixl_search_phy_type(status->phy_type); |
4379 | if (itype != NULL((void *)0)) |
4380 | ifm_active |= itype->ifm_type; |
4381 | |
4382 | if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX)((status->an_info) & (0x20))) |
4383 | ifm_active |= IFM_ETH_TXPAUSE0x0000000000040000ULL; |
4384 | if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX)((status->an_info) & (0x40))) |
4385 | ifm_active |= IFM_ETH_RXPAUSE0x0000000000020000ULL; |
4386 | |
4387 | baudrate = ixl_search_link_speed(status->link_speed); |
4388 | |
4389 | done: |
4390 | /* NET_ASSERT_LOCKED() except during attach */ |
4391 | sc->sc_media_active = ifm_active; |
4392 | sc->sc_media_status = ifm_status; |
4393 | sc->sc_ac.ac_if.if_baudrateif_data.ifi_baudrate = baudrate; |
4394 | |
4395 | return (link_state); |
4396 | } |
4397 | |
4398 | static int |
4399 | ixl_restart_an(struct ixl_softc *sc) |
4400 | { |
4401 | struct ixl_aq_desc iaq; |
4402 | |
4403 | memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq))); |
4404 | iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN)((__uint16_t)(0x0605)); |
4405 | iaq.iaq_param[0] = |
4406 | htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE)((__uint32_t)((1 << 1) | (1 << 2))); |
4407 | |
4408 | if (ixl_atq_poll(sc, &iaq, 250) != 0) { |
4409 | printf("%s: RESTART AN timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4410 | return (-1); |
4411 | } |
4412 | if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)((__uint16_t)(0))) { |
4413 | printf("%s: RESTART AN error\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4414 | return (-1); |
4415 | } |
4416 | |
4417 | return (0); |
4418 | } |
4419 | |
4420 | static int |
4421 | ixl_add_macvlan(struct ixl_softc *sc, uint8_t *macaddr, uint16_t vlan, uint16_t flags) |
4422 | { |
4423 | struct ixl_aq_desc iaq; |
4424 | struct ixl_aq_add_macvlan *param; |
4425 | struct ixl_aq_add_macvlan_elem *elem; |
4426 | |
4427 | memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq))); |
4428 | iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD)((__uint16_t)((1U << 12) | (1U << 10))); |
4429 | iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN)((__uint16_t)(0x0250)); |
4430 | iaq.iaq_datalen = htole16(sizeof(*elem))((__uint16_t)(sizeof(*elem))); |
4431 | ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch)((&sc->sc_scratch)->ixm_map->dm_segs[0].ds_addr)); |
4432 | |
4433 | param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param; |
4434 | param->num_addrs = htole16(1)((__uint16_t)(1)); |
4435 | param->seid0 = htole16(0x8000)((__uint16_t)(0x8000)) | sc->sc_seid; |
4436 | param->seid1 = 0; |
4437 | param->seid2 = 0; |
4438 | |
4439 | elem = IXL_DMA_KVA(&sc->sc_scratch)((void *)(&sc->sc_scratch)->ixm_kva); |
4440 | memset(elem, 0, sizeof(*elem))__builtin_memset((elem), (0), (sizeof(*elem))); |
4441 | memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN)__builtin_memcpy((elem->macaddr), (macaddr), (6)); |
4442 | elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags)((__uint16_t)(0x0001 | flags)); |
4443 | elem->vlan = htole16(vlan)((__uint16_t)(vlan)); |
4444 | |
4445 | if (ixl_atq_poll(sc, &iaq, 250) != 0) { |
4446 | printf("%s: ADD_MACVLAN timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4447 | return (IXL_AQ_RC_EINVAL14); |
4448 | } |
4449 | |
4450 | return letoh16(iaq.iaq_retval)((__uint16_t)(iaq.iaq_retval)); |
4451 | } |
4452 | |
4453 | static int |
4454 | ixl_remove_macvlan(struct ixl_softc *sc, uint8_t *macaddr, uint16_t vlan, uint16_t flags) |
4455 | { |
4456 | struct ixl_aq_desc iaq; |
4457 | struct ixl_aq_remove_macvlan *param; |
4458 | struct ixl_aq_remove_macvlan_elem *elem; |
4459 | |
4460 | memset(&iaq, 0, sizeof(iaq))__builtin_memset((&iaq), (0), (sizeof(iaq))); |
4461 | iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD)((__uint16_t)((1U << 12) | (1U << 10))); |
4462 | iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN)((__uint16_t)(0x0251)); |
4463 | iaq.iaq_datalen = htole16(sizeof(*elem))((__uint16_t)(sizeof(*elem))); |
4464 | ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch)((&sc->sc_scratch)->ixm_map->dm_segs[0].ds_addr)); |
4465 | |
4466 | param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param; |
4467 | param->num_addrs = htole16(1)((__uint16_t)(1)); |
4468 | param->seid0 = htole16(0x8000)((__uint16_t)(0x8000)) | sc->sc_seid; |
4469 | param->seid1 = 0; |
4470 | param->seid2 = 0; |
4471 | |
4472 | elem = IXL_DMA_KVA(&sc->sc_scratch)((void *)(&sc->sc_scratch)->ixm_kva); |
4473 | memset(elem, 0, sizeof(*elem))__builtin_memset((elem), (0), (sizeof(*elem))); |
4474 | memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN)__builtin_memcpy((elem->macaddr), (macaddr), (6)); |
4475 | elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags)((__uint16_t)(0x0001 | flags)); |
4476 | elem->vlan = htole16(vlan)((__uint16_t)(vlan)); |
4477 | |
4478 | if (ixl_atq_poll(sc, &iaq, 250) != 0) { |
4479 | printf("%s: REMOVE_MACVLAN timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4480 | return (IXL_AQ_RC_EINVAL14); |
4481 | } |
4482 | |
4483 | return letoh16(iaq.iaq_retval)((__uint16_t)(iaq.iaq_retval)); |
4484 | } |
4485 | |
4486 | static int |
4487 | ixl_hmc(struct ixl_softc *sc) |
4488 | { |
4489 | struct { |
4490 | uint32_t count; |
4491 | uint32_t minsize; |
4492 | bus_size_t maxcnt; |
4493 | bus_size_t setoff; |
4494 | bus_size_t setcnt; |
4495 | } regs[] = { |
4496 | { |
4497 | 0, |
4498 | IXL_HMC_TXQ_MINSIZE(94 + (7*128) + 1), |
4499 | I40E_GLHMC_LANTXOBJSZ0x000C2004, |
4500 | I40E_GLHMC_LANTXBASE(sc->sc_pf_id)(0x000C6200 + ((sc->sc_pf_id) * 4)), |
4501 | I40E_GLHMC_LANTXCNT(sc->sc_pf_id)(0x000C6300 + ((sc->sc_pf_id) * 4)), |
4502 | }, |
4503 | { |
4504 | 0, |
4505 | IXL_HMC_RXQ_MINSIZE(201 + 1), |
4506 | I40E_GLHMC_LANRXOBJSZ0x000C200c, |
4507 | I40E_GLHMC_LANRXBASE(sc->sc_pf_id)(0x000C6400 + ((sc->sc_pf_id) * 4)), |
4508 | I40E_GLHMC_LANRXCNT(sc->sc_pf_id)(0x000C6500 + ((sc->sc_pf_id) * 4)), |
4509 | }, |
4510 | { |
4511 | 0, |
4512 | 0, |
4513 | I40E_GLHMC_FCOEMAX0x000C2014, |
4514 | I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id)(0x000C6600 + ((sc->sc_pf_id) * 4)), |
4515 | I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id)(0x000C6700 + ((sc->sc_pf_id) * 4)), |
4516 | }, |
4517 | { |
4518 | 0, |
4519 | 0, |
4520 | I40E_GLHMC_FCOEFMAX0x000C20D0, |
4521 | I40E_GLHMC_FCOEFBASE(sc->sc_pf_id)(0x000C6800 + ((sc->sc_pf_id) * 4)), |
4522 | I40E_GLHMC_FCOEFCNT(sc->sc_pf_id)(0x000C6900 + ((sc->sc_pf_id) * 4)), |
4523 | }, |
4524 | }; |
4525 | struct ixl_hmc_entry *e; |
4526 | uint64_t size, dva; |
4527 | uint8_t *kva; |
4528 | uint64_t *sdpage; |
4529 | unsigned int i; |
4530 | int npages, tables; |
4531 | |
4532 | CTASSERT(nitems(regs) <= nitems(sc->sc_hmc_entries))extern char _ctassert[((sizeof((regs)) / sizeof((regs)[0])) <= (sizeof((sc->sc_hmc_entries)) / sizeof((sc->sc_hmc_entries )[0]))) ? 1 : -1 ] __attribute__((__unused__)); |
4533 | |
4534 | regs[IXL_HMC_LAN_TX0].count = regs[IXL_HMC_LAN_RX1].count = |
4535 | ixl_rd(sc, I40E_GLHMC_LANQMAX)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x000C2008 )))); |
4536 | |
4537 | size = 0; |
4538 | for (i = 0; i < nitems(regs)(sizeof((regs)) / sizeof((regs)[0])); i++) { |
4539 | e = &sc->sc_hmc_entries[i]; |
4540 | |
4541 | e->hmc_count = regs[i].count; |
4542 | e->hmc_size = 1U << ixl_rd(sc, regs[i].maxcnt)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((regs[i]. maxcnt)))); |
4543 | e->hmc_base = size; |
4544 | |
4545 | if ((e->hmc_size * 8) < regs[i].minsize) { |
4546 | printf("%s: kernel hmc entry is too big\n", |
4547 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4548 | return (-1); |
4549 | } |
4550 | |
4551 | size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP)((((e->hmc_size * e->hmc_count)+((512)-1))/(512))*(512) ); |
4552 | } |
4553 | size = roundup(size, IXL_HMC_PGSIZE)((((size)+((4096)-1))/(4096))*(4096)); |
4554 | npages = size / IXL_HMC_PGSIZE4096; |
4555 | |
4556 | tables = roundup(size, IXL_HMC_L2SZ)((((size)+(((4096 * (4096 / sizeof(uint64_t))))-1))/((4096 * ( 4096 / sizeof(uint64_t)))))*((4096 * (4096 / sizeof(uint64_t) )))) / IXL_HMC_L2SZ(4096 * (4096 / sizeof(uint64_t))); |
4557 | |
4558 | if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE4096) != 0) { |
4559 | printf("%s: unable to allocate hmc pd memory\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4560 | return (-1); |
4561 | } |
4562 | |
4563 | if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE4096, |
4564 | IXL_HMC_PGSIZE4096) != 0) { |
4565 | printf("%s: unable to allocate hmc sd memory\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4566 | ixl_dmamem_free(sc, &sc->sc_hmc_pd); |
4567 | return (-1); |
4568 | } |
4569 | |
4570 | kva = IXL_DMA_KVA(&sc->sc_hmc_pd)((void *)(&sc->sc_hmc_pd)->ixm_kva); |
4571 | memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd))__builtin_memset((kva), (0), (((&sc->sc_hmc_pd)->ixm_size ))); |
4572 | |
4573 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_hmc_pd)->ixm_map)), (0), (((&sc->sc_hmc_pd )->ixm_size)), (0x01|0x04)) |
4574 | 0, IXL_DMA_LEN(&sc->sc_hmc_pd),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_hmc_pd)->ixm_map)), (0), (((&sc->sc_hmc_pd )->ixm_size)), (0x01|0x04)) |
4575 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_hmc_pd)->ixm_map)), (0), (((&sc->sc_hmc_pd )->ixm_size)), (0x01|0x04)); |
4576 | |
4577 | dva = IXL_DMA_DVA(&sc->sc_hmc_pd)((&sc->sc_hmc_pd)->ixm_map->dm_segs[0].ds_addr); |
4578 | sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd)((void *)(&sc->sc_hmc_sd)->ixm_kva); |
4579 | for (i = 0; i < npages; i++) { |
4580 | htolem64(sdpage++, dva | IXL_HMC_PDVALID)(*(__uint64_t *)(sdpage++) = ((__uint64_t)(dva | 1ULL))); |
4581 | |
4582 | dva += IXL_HMC_PGSIZE4096; |
4583 | } |
4584 | |
4585 | bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_hmc_sd)->ixm_map)), (0), (((&sc->sc_hmc_sd )->ixm_size)), (0x01|0x04)) |
4586 | 0, IXL_DMA_LEN(&sc->sc_hmc_sd),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_hmc_sd)->ixm_map)), (0), (((&sc->sc_hmc_sd )->ixm_size)), (0x01|0x04)) |
4587 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_hmc_sd)->ixm_map)), (0), (((&sc->sc_hmc_sd )->ixm_size)), (0x01|0x04)); |
4588 | |
4589 | dva = IXL_DMA_DVA(&sc->sc_hmc_sd)((&sc->sc_hmc_sd)->ixm_map->dm_segs[0].ds_addr); |
4590 | for (i = 0; i < tables; i++) { |
4591 | uint32_t count; |
4592 | |
4593 | KASSERT(npages >= 0)((npages >= 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_ixl.c" , 4593, "npages >= 0")); |
4594 | |
4595 | count = (npages > IXL_HMC_PGS(4096 / sizeof(uint64_t))) ? IXL_HMC_PGS(4096 / sizeof(uint64_t)) : npages; |
4596 | |
4597 | ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x000C0200 )), ((dva >> 32)))); |
4598 | ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x000C0100 )), ((dva | (count << 2) | (1U << 0))))) |
4599 | (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x000C0100 )), ((dva | (count << 2) | (1U << 0))))) |
4600 | (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT))(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x000C0100 )), ((dva | (count << 2) | (1U << 0))))); |
4601 | ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE)bus_space_barrier((sc)->sc_memt, (sc)->sc_memh, (0), (sc ->sc_mems), (0x02)); |
4602 | ixl_wr(sc, I40E_PFHMC_SDCMD,(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x000C0000 )), (((1U << 31) | i)))) |
4603 | (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x000C0000 )), (((1U << 31) | i)))); |
4604 | |
4605 | npages -= IXL_HMC_PGS(4096 / sizeof(uint64_t)); |
4606 | dva += IXL_HMC_PGSIZE4096; |
4607 | } |
4608 | |
4609 | for (i = 0; i < nitems(regs)(sizeof((regs)) / sizeof((regs)[0])); i++) { |
4610 | e = &sc->sc_hmc_entries[i]; |
4611 | |
4612 | ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((regs[i] .setoff)), ((e->hmc_base / 512)))); |
4613 | ixl_wr(sc, regs[i].setcnt, e->hmc_count)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((regs[i] .setcnt)), ((e->hmc_count)))); |
4614 | } |
4615 | |
4616 | return (0); |
4617 | } |
4618 | |
4619 | static void |
4620 | ixl_hmc_free(struct ixl_softc *sc) |
4621 | { |
4622 | ixl_dmamem_free(sc, &sc->sc_hmc_sd); |
4623 | ixl_dmamem_free(sc, &sc->sc_hmc_pd); |
4624 | } |
4625 | |
4626 | static void |
4627 | ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing, |
4628 | unsigned int npacking) |
4629 | { |
4630 | uint8_t *dst = d; |
4631 | const uint8_t *src = s; |
4632 | unsigned int i; |
4633 | |
4634 | for (i = 0; i < npacking; i++) { |
4635 | const struct ixl_hmc_pack *pack = &packing[i]; |
4636 | unsigned int offset = pack->lsb / 8; |
4637 | unsigned int align = pack->lsb % 8; |
4638 | const uint8_t *in = src + pack->offset; |
4639 | uint8_t *out = dst + offset; |
4640 | int width = pack->width; |
4641 | unsigned int inbits = 0; |
4642 | |
4643 | if (align) { |
4644 | inbits = (*in++) << align; |
4645 | *out++ |= (inbits & 0xff); |
4646 | inbits >>= 8; |
4647 | |
4648 | width -= 8 - align; |
4649 | } |
4650 | |
4651 | while (width >= 8) { |
4652 | inbits |= (*in++) << align; |
4653 | *out++ = (inbits & 0xff); |
4654 | inbits >>= 8; |
4655 | |
4656 | width -= 8; |
4657 | } |
4658 | |
4659 | if (width > 0) { |
4660 | inbits |= (*in) << align; |
4661 | *out |= (inbits & ((1 << width) - 1)); |
4662 | } |
4663 | } |
4664 | } |
4665 | |
4666 | static struct ixl_aq_buf * |
4667 | ixl_aqb_alloc(struct ixl_softc *sc) |
4668 | { |
4669 | struct ixl_aq_buf *aqb; |
4670 | |
4671 | aqb = malloc(sizeof(*aqb), M_DEVBUF2, M_WAITOK0x0001); |
4672 | if (aqb == NULL((void *)0)) |
4673 | return (NULL((void *)0)); |
4674 | |
4675 | aqb->aqb_data = dma_alloc(IXL_AQ_BUFLEN4096, PR_WAITOK0x0001); |
4676 | if (aqb->aqb_data == NULL((void *)0)) |
4677 | goto free; |
4678 | |
4679 | if (bus_dmamap_create(sc->sc_dmat, IXL_AQ_BUFLEN, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096 ), (1), (4096), (0), (0x0000 | 0x0002 | 0x2000), (&aqb-> aqb_map)) |
4680 | IXL_AQ_BUFLEN, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096 ), (1), (4096), (0), (0x0000 | 0x0002 | 0x2000), (&aqb-> aqb_map)) |
4681 | BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096 ), (1), (4096), (0), (0x0000 | 0x0002 | 0x2000), (&aqb-> aqb_map)) |
4682 | &aqb->aqb_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096 ), (1), (4096), (0), (0x0000 | 0x0002 | 0x2000), (&aqb-> aqb_map)) != 0) |
4683 | goto dma_free; |
4684 | |
4685 | if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (aqb-> aqb_map), (aqb->aqb_data), (4096), (((void *)0)), (0x0000) ) |
4686 | IXL_AQ_BUFLEN, NULL, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (aqb-> aqb_map), (aqb->aqb_data), (4096), (((void *)0)), (0x0000) ) != 0) |
4687 | goto destroy; |
4688 | |
4689 | return (aqb); |
4690 | |
4691 | destroy: |
4692 | bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (aqb ->aqb_map)); |
4693 | dma_free: |
4694 | dma_free(aqb->aqb_data, IXL_AQ_BUFLEN4096); |
4695 | free: |
4696 | free(aqb, M_DEVBUF2, sizeof(*aqb)); |
4697 | |
4698 | return (NULL((void *)0)); |
4699 | } |
4700 | |
4701 | static void |
4702 | ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb) |
4703 | { |
4704 | bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (aqb ->aqb_map)); |
4705 | bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (aqb ->aqb_map)); |
4706 | dma_free(aqb->aqb_data, IXL_AQ_BUFLEN4096); |
4707 | free(aqb, M_DEVBUF2, sizeof(*aqb)); |
4708 | } |
4709 | |
4710 | static int |
4711 | ixl_arq_fill(struct ixl_softc *sc) |
4712 | { |
4713 | struct ixl_aq_buf *aqb; |
4714 | struct ixl_aq_desc *arq, *iaq; |
4715 | unsigned int prod = sc->sc_arq_prod; |
4716 | unsigned int n; |
4717 | int post = 0; |
4718 | |
4719 | n = if_rxr_get(&sc->sc_arq_ring, IXL_AQ_NUM256); |
4720 | arq = IXL_DMA_KVA(&sc->sc_arq)((void *)(&sc->sc_arq)->ixm_kva); |
4721 | |
4722 | while (n > 0) { |
4723 | aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)((&sc->sc_arq_idle)->sqh_first); |
4724 | if (aqb != NULL((void *)0)) |
4725 | SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_idle, aqb_entry)do { if (((&sc->sc_arq_idle)->sqh_first = (&sc-> sc_arq_idle)->sqh_first->aqb_entry.sqe_next) == ((void * )0)) (&sc->sc_arq_idle)->sqh_last = &(&sc-> sc_arq_idle)->sqh_first; } while (0); |
4726 | else if ((aqb = ixl_aqb_alloc(sc)) == NULL((void *)0)) |
4727 | break; |
4728 | |
4729 | memset(aqb->aqb_data, 0, IXL_AQ_BUFLEN)__builtin_memset((aqb->aqb_data), (0), (4096)); |
4730 | |
4731 | bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (aqb-> aqb_map), (0), (4096), (0x01)) |
4732 | BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (aqb-> aqb_map), (0), (4096), (0x01)); |
4733 | |
4734 | iaq = &arq[prod]; |
4735 | iaq->iaq_flags = htole16(IXL_AQ_BUF |((__uint16_t)((1U << 12) | (4096 > 512 ? (1U << 9) : 0))) |
4736 | (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0))((__uint16_t)((1U << 12) | (4096 > 512 ? (1U << 9) : 0))); |
4737 | iaq->iaq_opcode = 0; |
4738 | iaq->iaq_datalen = htole16(IXL_AQ_BUFLEN)((__uint16_t)(4096)); |
4739 | iaq->iaq_retval = 0; |
4740 | iaq->iaq_cookie = 0; |
4741 | iaq->iaq_param[0] = 0; |
4742 | iaq->iaq_param[1] = 0; |
4743 | ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr); |
4744 | |
4745 | SIMPLEQ_INSERT_TAIL(&sc->sc_arq_live, aqb, aqb_entry)do { (aqb)->aqb_entry.sqe_next = ((void *)0); *(&sc-> sc_arq_live)->sqh_last = (aqb); (&sc->sc_arq_live)-> sqh_last = &(aqb)->aqb_entry.sqe_next; } while (0); |
4746 | |
4747 | prod++; |
4748 | prod &= IXL_AQ_MASK(256 - 1); |
4749 | |
4750 | post = 1; |
4751 | |
4752 | n--; |
4753 | } |
4754 | |
4755 | if_rxr_put(&sc->sc_arq_ring, n)do { (&sc->sc_arq_ring)->rxr_alive -= (n); } while ( 0); |
4756 | sc->sc_arq_prod = prod; |
4757 | |
4758 | return (post); |
4759 | } |
4760 | |
4761 | static void |
4762 | ixl_arq_unfill(struct ixl_softc *sc) |
4763 | { |
4764 | struct ixl_aq_buf *aqb; |
4765 | |
4766 | while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_live)((&sc->sc_arq_live)->sqh_first)) != NULL((void *)0)) { |
4767 | SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry)do { if (((&sc->sc_arq_live)->sqh_first = (&sc-> sc_arq_live)->sqh_first->aqb_entry.sqe_next) == ((void * )0)) (&sc->sc_arq_live)->sqh_last = &(&sc-> sc_arq_live)->sqh_first; } while (0); |
4768 | |
4769 | bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (aqb-> aqb_map), (0), (4096), (0x02)) |
4770 | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (aqb-> aqb_map), (0), (4096), (0x02)); |
4771 | ixl_aqb_free(sc, aqb); |
4772 | } |
4773 | } |
4774 | |
4775 | static void |
4776 | ixl_clear_hw(struct ixl_softc *sc) |
4777 | { |
4778 | uint32_t num_queues, base_queue; |
4779 | uint32_t num_pf_int; |
4780 | uint32_t num_vf_int; |
4781 | uint32_t num_vfs; |
4782 | uint32_t i, j; |
4783 | uint32_t val; |
4784 | |
4785 | /* get number of interrupts, queues, and vfs */ |
4786 | val = ixl_rd(sc, I40E_GLPCI_CNF2)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x000BE494 )))); |
4787 | num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK((0x7FF) << (2))) >> |
4788 | I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT2; |
4789 | num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK((0x7FF) << (13))) >> |
4790 | I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT13; |
4791 | |
4792 | val = ixl_rd(sc, I40E_PFLAN_QALLOC)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x001C0400 )))); |
4793 | base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK((0x7FF) << (0))) >> |
4794 | I40E_PFLAN_QALLOC_FIRSTQ_SHIFT0; |
4795 | j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK((0x7FF) << (16))) >> |
4796 | I40E_PFLAN_QALLOC_LASTQ_SHIFT16; |
4797 | if (val & I40E_PFLAN_QALLOC_VALID_MASK((0x1) << (31))) |
4798 | num_queues = (j - base_queue) + 1; |
4799 | else |
4800 | num_queues = 0; |
4801 | |
4802 | val = ixl_rd(sc, I40E_PF_VT_PFALLOC)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x001C0500 )))); |
4803 | i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK((0xFF) << (0))) >> |
4804 | I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT0; |
4805 | j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK((0xFF) << (8))) >> |
4806 | I40E_PF_VT_PFALLOC_LASTVF_SHIFT8; |
4807 | if (val & I40E_PF_VT_PFALLOC_VALID_MASK((0x1) << (31))) |
4808 | num_vfs = (j - i) + 1; |
4809 | else |
4810 | num_vfs = 0; |
4811 | |
4812 | /* stop all the interrupts */ |
4813 | ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00038800 )), ((0)))); |
4814 | val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT3; |
4815 | for (i = 0; i < num_pf_int - 2; i++) |
4816 | ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00034800 + ((i) * 4)))), ((val)))); |
4817 | |
4818 | /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ |
4819 | val = I40E_QUEUE_TYPE_EOL0x7ff << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT0; |
4820 | ixl_wr(sc, I40E_PFINT_LNKLST0, val)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00038500 )), ((val)))); |
4821 | for (i = 0; i < num_pf_int - 2; i++) |
4822 | ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00035000 + ((i) * 4)))), ((val)))); |
4823 | val = I40E_QUEUE_TYPE_EOL0x7ff << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT0; |
4824 | for (i = 0; i < num_vfs; i++) |
4825 | ixl_wr(sc, I40E_VPINT_LNKLST0(i), val)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0002A800 + ((i) * 4)))), ((val)))); |
4826 | for (i = 0; i < num_vf_int - 2; i++) |
4827 | ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00025000 + ((i) * 4)))), ((val)))); |
4828 | |
4829 | /* warn the HW of the coming Tx disables */ |
4830 | for (i = 0; i < num_queues; i++) { |
4831 | uint32_t abs_queue_idx = base_queue + i; |
4832 | uint32_t reg_block = 0; |
4833 | |
4834 | if (abs_queue_idx >= 128) { |
4835 | reg_block = abs_queue_idx / 128; |
4836 | abs_queue_idx %= 128; |
4837 | } |
4838 | |
4839 | val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block))(((sc)->sc_memt)->read_4(((sc)->sc_memh), (((0x000e6500 + ((reg_block) * 4)))))); |
4840 | val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK((0x7FF) << (0)); |
4841 | val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT0); |
4842 | val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK((0x1) << (30)); |
4843 | |
4844 | ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x000e6500 + ((reg_block) * 4)))), ((val)))); |
4845 | } |
4846 | delaymsec(400)(*delay_func)(1000 * (400)); |
4847 | |
4848 | /* stop all the queues */ |
4849 | for (i = 0; i < num_queues; i++) { |
4850 | ixl_wr(sc, I40E_QINT_TQCTL(i), 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003C000 + ((i) * 4)))), ((0)))); |
4851 | ixl_wr(sc, I40E_QTX_ENA(i), 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00100000 + ((i) * 4)))), ((0)))); |
4852 | ixl_wr(sc, I40E_QINT_RQCTL(i), 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x0003A000 + ((i) * 4)))), ((0)))); |
4853 | ixl_wr(sc, I40E_QRX_ENA(i), 0)(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00120000 + ((i) * 4)))), ((0)))); |
4854 | } |
4855 | |
4856 | /* short wait for all queue disables to settle */ |
4857 | delaymsec(50)(*delay_func)(1000 * (50)); |
4858 | } |
4859 | |
4860 | static int |
4861 | ixl_pf_reset(struct ixl_softc *sc) |
4862 | { |
4863 | uint32_t cnt = 0; |
4864 | uint32_t cnt1 = 0; |
4865 | uint32_t reg = 0; |
4866 | uint32_t grst_del; |
4867 | |
4868 | /* |
4869 | * Poll for Global Reset steady state in case of recent GRST. |
4870 | * The grst delay value is in 100ms units, and we'll wait a |
4871 | * couple counts longer to be sure we don't just miss the end. |
4872 | */ |
4873 | grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x000B8180 )))); |
4874 | grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK((0x3F) << (0)); |
4875 | grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT0; |
4876 | grst_del += 10; |
4877 | |
4878 | for (cnt = 0; cnt < grst_del; cnt++) { |
4879 | reg = ixl_rd(sc, I40E_GLGEN_RSTAT)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x000B8188 )))); |
4880 | if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK((0x3) << (0)))) |
4881 | break; |
4882 | delaymsec(100)(*delay_func)(1000 * (100)); |
4883 | } |
4884 | if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK((0x3) << (0))) { |
4885 | printf(", Global reset polling failed to complete\n"); |
4886 | return (-1); |
4887 | } |
4888 | |
4889 | /* Now Wait for the FW to be ready */ |
4890 | for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT200; cnt1++) { |
4891 | reg = ixl_rd(sc, I40E_GLNVM_ULD)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x000B6008 )))); |
4892 | reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK((0x1) << (3)) | |
4893 | I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK((0x1) << (4))); |
4894 | if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK((0x1) << (3)) | |
4895 | I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK((0x1) << (4)))) |
4896 | break; |
4897 | |
4898 | delaymsec(10)(*delay_func)(1000 * (10)); |
4899 | } |
4900 | if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK((0x1) << (3)) | |
4901 | I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK((0x1) << (4))))) { |
4902 | printf(", wait for FW Reset complete timed out " |
4903 | "(I40E_GLNVM_ULD = 0x%x)\n", reg); |
4904 | return (-1); |
4905 | } |
4906 | |
4907 | /* |
4908 | * If there was a Global Reset in progress when we got here, |
4909 | * we don't need to do the PF Reset |
4910 | */ |
4911 | if (cnt == 0) { |
4912 | reg = ixl_rd(sc, I40E_PFGEN_CTRL)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x00092400 )))); |
4913 | ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((0x00092400 )), ((reg | ((0x1) << (0)))))); |
4914 | for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT200; cnt++) { |
4915 | reg = ixl_rd(sc, I40E_PFGEN_CTRL)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((0x00092400 )))); |
4916 | if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK((0x1) << (0)))) |
4917 | break; |
4918 | delaymsec(1)(*delay_func)(1000 * (1)); |
4919 | } |
4920 | if (reg & I40E_PFGEN_CTRL_PFSWR_MASK((0x1) << (0))) { |
4921 | printf(", PF reset polling failed to complete" |
4922 | "(I40E_PFGEN_CTRL= 0x%x)\n", reg); |
4923 | return (-1); |
4924 | } |
4925 | } |
4926 | |
4927 | return (0); |
4928 | } |
4929 | |
4930 | static uint32_t |
4931 | ixl_710_rd_ctl(struct ixl_softc *sc, uint32_t r) |
4932 | { |
4933 | struct ixl_atq iatq; |
4934 | struct ixl_aq_desc *iaq; |
4935 | uint16_t retval; |
4936 | |
4937 | memset(&iatq, 0, sizeof(iatq))__builtin_memset((&iatq), (0), (sizeof(iatq))); |
4938 | iaq = &iatq.iatq_desc; |
4939 | iaq->iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_READ)((__uint16_t)(0x0206)); |
4940 | htolem32(&iaq->iaq_param[1], r)(*(__uint32_t *)(&iaq->iaq_param[1]) = ((__uint32_t)(r ))); |
4941 | |
4942 | ixl_atq_exec(sc, &iatq, "ixl710rd"); |
4943 | |
4944 | retval = lemtoh16(&iaq->iaq_retval)((__uint16_t)(*(__uint16_t *)(&iaq->iaq_retval))); |
4945 | if (retval != IXL_AQ_RC_OK0) { |
4946 | printf("%s: %s failed (%u)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), __func__, retval); |
4947 | return (~0U); |
4948 | } |
4949 | |
4950 | return (lemtoh32(&iaq->iaq_param[3])((__uint32_t)(*(__uint32_t *)(&iaq->iaq_param[3])))); |
4951 | } |
4952 | |
4953 | static void |
4954 | ixl_710_wr_ctl(struct ixl_softc *sc, uint32_t r, uint32_t v) |
4955 | { |
4956 | struct ixl_atq iatq; |
4957 | struct ixl_aq_desc *iaq; |
4958 | uint16_t retval; |
4959 | |
4960 | memset(&iatq, 0, sizeof(iatq))__builtin_memset((&iatq), (0), (sizeof(iatq))); |
4961 | iaq = &iatq.iatq_desc; |
4962 | iaq->iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_WRITE)((__uint16_t)(0x0207)); |
4963 | htolem32(&iaq->iaq_param[1], r)(*(__uint32_t *)(&iaq->iaq_param[1]) = ((__uint32_t)(r ))); |
4964 | htolem32(&iaq->iaq_param[3], v)(*(__uint32_t *)(&iaq->iaq_param[3]) = ((__uint32_t)(v ))); |
4965 | |
4966 | ixl_atq_exec(sc, &iatq, "ixl710wr"); |
4967 | |
4968 | retval = lemtoh16(&iaq->iaq_retval)((__uint16_t)(*(__uint16_t *)(&iaq->iaq_retval))); |
4969 | if (retval != IXL_AQ_RC_OK0) { |
4970 | printf("%s: %s %08x=%08x failed (%u)\n", |
4971 | DEVNAME(sc)((sc)->sc_dev.dv_xname), __func__, r, v, retval); |
4972 | } |
4973 | } |
4974 | |
4975 | static int |
4976 | ixl_710_set_rss_key(struct ixl_softc *sc, const struct ixl_rss_key *rsskey) |
4977 | { |
4978 | unsigned int i; |
4979 | |
4980 | for (i = 0; i < nitems(rsskey->key)(sizeof((rsskey->key)) / sizeof((rsskey->key)[0])); i++) |
4981 | ixl_wr_ctl(sc, I40E_PFQF_HKEY(i)(0x00244800 + ((i) * 128)), rsskey->key[i]); |
4982 | |
4983 | return (0); |
4984 | } |
4985 | |
4986 | static int |
4987 | ixl_710_set_rss_lut(struct ixl_softc *sc, const struct ixl_rss_lut_128 *lut) |
4988 | { |
4989 | unsigned int i; |
4990 | |
4991 | for (i = 0; i < nitems(lut->entries)(sizeof((lut->entries)) / sizeof((lut->entries)[0])); i++) |
4992 | ixl_wr(sc, I40E_PFQF_HLUT(i), lut->entries[i])(((sc)->sc_memt)->write_4(((sc)->sc_memh), (((0x00240000 + ((i) * 128)))), ((lut->entries[i])))); |
4993 | |
4994 | return (0); |
4995 | } |
4996 | |
4997 | static uint32_t |
4998 | ixl_722_rd_ctl(struct ixl_softc *sc, uint32_t r) |
4999 | { |
5000 | return (ixl_rd(sc, r)(((sc)->sc_memt)->read_4(((sc)->sc_memh), ((r))))); |
5001 | } |
5002 | |
5003 | static void |
5004 | ixl_722_wr_ctl(struct ixl_softc *sc, uint32_t r, uint32_t v) |
5005 | { |
5006 | ixl_wr(sc, r, v)(((sc)->sc_memt)->write_4(((sc)->sc_memh), ((r)), (( v)))); |
5007 | } |
5008 | |
5009 | static int |
5010 | ixl_722_set_rss_key(struct ixl_softc *sc, const struct ixl_rss_key *rsskey) |
5011 | { |
5012 | /* XXX */ |
5013 | |
5014 | return (0); |
5015 | } |
5016 | |
5017 | static int |
5018 | ixl_722_set_rss_lut(struct ixl_softc *sc, const struct ixl_rss_lut_128 *lut) |
5019 | { |
5020 | /* XXX */ |
5021 | |
5022 | return (0); |
5023 | } |
5024 | |
5025 | static int |
5026 | ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm, |
5027 | bus_size_t size, u_int align) |
5028 | { |
5029 | ixm->ixm_size = size; |
5030 | |
5031 | if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (ixm ->ixm_size), (1), (ixm->ixm_size), (0), (0x0000 | 0x0002 | 0x2000), (&ixm->ixm_map)) |
5032 | ixm->ixm_size, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (ixm ->ixm_size), (1), (ixm->ixm_size), (0), (0x0000 | 0x0002 | 0x2000), (&ixm->ixm_map)) |
5033 | BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (ixm ->ixm_size), (1), (ixm->ixm_size), (0), (0x0000 | 0x0002 | 0x2000), (&ixm->ixm_map)) |
5034 | &ixm->ixm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (ixm ->ixm_size), (1), (ixm->ixm_size), (0), (0x0000 | 0x0002 | 0x2000), (&ixm->ixm_map)) != 0) |
5035 | return (1); |
5036 | if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (ixm-> ixm_size), (align), (0), (&ixm->ixm_seg), (1), (&ixm ->ixm_nsegs), (0x0000 | 0x1000)) |
5037 | align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (ixm-> ixm_size), (align), (0), (&ixm->ixm_seg), (1), (&ixm ->ixm_nsegs), (0x0000 | 0x1000)) |
5038 | BUS_DMA_WAITOK | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (ixm-> ixm_size), (align), (0), (&ixm->ixm_seg), (1), (&ixm ->ixm_nsegs), (0x0000 | 0x1000)) != 0) |
5039 | goto destroy; |
5040 | if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&ixm ->ixm_seg), (ixm->ixm_nsegs), (ixm->ixm_size), (& ixm->ixm_kva), (0x0000)) |
5041 | ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&ixm ->ixm_seg), (ixm->ixm_nsegs), (ixm->ixm_size), (& ixm->ixm_kva), (0x0000)) != 0) |
5042 | goto free; |
5043 | if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (ixm-> ixm_map), (ixm->ixm_kva), (ixm->ixm_size), (((void *)0) ), (0x0000)) |
5044 | ixm->ixm_size, NULL, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (ixm-> ixm_map), (ixm->ixm_kva), (ixm->ixm_size), (((void *)0) ), (0x0000)) != 0) |
5045 | goto unmap; |
5046 | |
5047 | return (0); |
5048 | unmap: |
5049 | bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (ixm-> ixm_kva), (ixm->ixm_size)); |
5050 | free: |
5051 | bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& ixm->ixm_seg), (1)); |
5052 | destroy: |
5053 | bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ixm ->ixm_map)); |
5054 | return (1); |
5055 | } |
5056 | |
5057 | static void |
5058 | ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm) |
5059 | { |
5060 | bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (ixm ->ixm_map)); |
5061 | bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (ixm-> ixm_kva), (ixm->ixm_size)); |
5062 | bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& ixm->ixm_seg), (1)); |
5063 | bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ixm ->ixm_map)); |
5064 | } |
5065 | |
5066 | #if NKSTAT0 > 0 |
5067 | |
5068 | CTASSERT(KSTAT_KV_U_NONE <= 0xffU)extern char _ctassert[(KSTAT_KV_U_NONE <= 0xffU) ? 1 : -1 ] __attribute__((__unused__)); |
5069 | CTASSERT(KSTAT_KV_U_PACKETS <= 0xffU)extern char _ctassert[(KSTAT_KV_U_PACKETS <= 0xffU) ? 1 : - 1 ] __attribute__((__unused__)); |
5070 | CTASSERT(KSTAT_KV_U_BYTES <= 0xffU)extern char _ctassert[(KSTAT_KV_U_BYTES <= 0xffU) ? 1 : -1 ] __attribute__((__unused__)); |
5071 | |
5072 | struct ixl_counter { |
5073 | const char *c_name; |
5074 | uint32_t c_base; |
5075 | uint8_t c_width; |
5076 | uint8_t c_type; |
5077 | }; |
5078 | |
5079 | const struct ixl_counter ixl_port_counters[] = { |
5080 | /* GORC */ |
5081 | { "rx bytes", 0x00300000, 48, KSTAT_KV_U_BYTES }, |
5082 | /* MLFC */ |
5083 | { "mac local errs", 0x00300020, 32, KSTAT_KV_U_NONE }, |
5084 | /* MRFC */ |
5085 | { "mac remote errs", 0x00300040, 32, KSTAT_KV_U_NONE }, |
5086 | /* MSPDC */ |
5087 | { "mac short", 0x00300060, 32, KSTAT_KV_U_PACKETS }, |
5088 | /* CRCERRS */ |
5089 | { "crc errs", 0x00300080, 32, KSTAT_KV_U_PACKETS }, |
5090 | /* RLEC */ |
5091 | { "rx len errs", 0x003000a0, 32, KSTAT_KV_U_PACKETS }, |
5092 | /* ERRBC */ |
5093 | { "byte errs", 0x003000c0, 32, KSTAT_KV_U_PACKETS }, |
5094 | /* ILLERRC */ |
5095 | { "illegal byte", 0x003000d0, 32, KSTAT_KV_U_PACKETS }, |
5096 | /* RUC */ |
5097 | { "rx undersize", 0x00300100, 32, KSTAT_KV_U_PACKETS }, |
5098 | /* ROC */ |
5099 | { "rx oversize", 0x00300120, 32, KSTAT_KV_U_PACKETS }, |
5100 | /* LXONRXCNT */ |
5101 | { "rx link xon", 0x00300140, 32, KSTAT_KV_U_PACKETS }, |
5102 | /* LXOFFRXCNT */ |
5103 | { "rx link xoff", 0x00300160, 32, KSTAT_KV_U_PACKETS }, |
5104 | |
5105 | /* Priority XON Received Count */ |
5106 | /* Priority XOFF Received Count */ |
5107 | /* Priority XON to XOFF Count */ |
5108 | |
5109 | /* PRC64 */ |
5110 | { "rx 64B", 0x00300480, 48, KSTAT_KV_U_PACKETS }, |
5111 | /* PRC127 */ |
5112 | { "rx 65-127B", 0x003004A0, 48, KSTAT_KV_U_PACKETS }, |
5113 | /* PRC255 */ |
5114 | { "rx 128-255B", 0x003004C0, 48, KSTAT_KV_U_PACKETS }, |
5115 | /* PRC511 */ |
5116 | { "rx 256-511B", 0x003004E0, 48, KSTAT_KV_U_PACKETS }, |
5117 | /* PRC1023 */ |
5118 | { "rx 512-1023B", 0x00300500, 48, KSTAT_KV_U_PACKETS }, |
5119 | /* PRC1522 */ |
5120 | { "rx 1024-1522B", 0x00300520, 48, KSTAT_KV_U_PACKETS }, |
5121 | /* PRC9522 */ |
5122 | { "rx 1523-9522B", 0x00300540, 48, KSTAT_KV_U_PACKETS }, |
5123 | /* ROC */ |
5124 | { "rx fragment", 0x00300560, 32, KSTAT_KV_U_PACKETS }, |
5125 | /* RJC */ |
5126 | { "rx jabber", 0x00300580, 32, KSTAT_KV_U_PACKETS }, |
5127 | /* UPRC */ |
5128 | { "rx ucasts", 0x003005a0, 48, KSTAT_KV_U_PACKETS }, |
5129 | /* MPRC */ |
5130 | { "rx mcasts", 0x003005c0, 48, KSTAT_KV_U_PACKETS }, |
5131 | /* BPRC */ |
5132 | { "rx bcasts", 0x003005e0, 48, KSTAT_KV_U_PACKETS }, |
5133 | /* RDPC */ |
5134 | { "rx discards", 0x00300600, 32, KSTAT_KV_U_PACKETS }, |
5135 | /* LDPC */ |
5136 | { "rx lo discards", 0x00300620, 32, KSTAT_KV_U_PACKETS }, |
5137 | /* RUPP */ |
5138 | { "rx no dest", 0x00300660, 32, KSTAT_KV_U_PACKETS }, |
5139 | |
5140 | /* GOTC */ |
5141 | { "tx bytes", 0x00300680, 48, KSTAT_KV_U_BYTES }, |
5142 | /* PTC64 */ |
5143 | { "tx 64B", 0x003006A0, 48, KSTAT_KV_U_PACKETS }, |
5144 | /* PTC127 */ |
5145 | { "tx 65-127B", 0x003006C0, 48, KSTAT_KV_U_PACKETS }, |
5146 | /* PTC255 */ |
5147 | { "tx 128-255B", 0x003006E0, 48, KSTAT_KV_U_PACKETS }, |
5148 | /* PTC511 */ |
5149 | { "tx 256-511B", 0x00300700, 48, KSTAT_KV_U_PACKETS }, |
5150 | /* PTC1023 */ |
5151 | { "tx 512-1023B", 0x00300720, 48, KSTAT_KV_U_PACKETS }, |
5152 | /* PTC1522 */ |
5153 | { "tx 1024-1522B", 0x00300740, 48, KSTAT_KV_U_PACKETS }, |
5154 | /* PTC9522 */ |
5155 | { "tx 1523-9522B", 0x00300760, 48, KSTAT_KV_U_PACKETS }, |
5156 | |
5157 | /* Priority XON Transmitted Count */ |
5158 | /* Priority XOFF Transmitted Count */ |
5159 | |
5160 | /* LXONTXC */ |
5161 | { "tx link xon", 0x00300980, 48, KSTAT_KV_U_PACKETS }, |
5162 | /* LXOFFTXC */ |
5163 | { "tx link xoff", 0x003009a0, 48, KSTAT_KV_U_PACKETS }, |
5164 | /* UPTC */ |
5165 | { "tx ucasts", 0x003009c0, 48, KSTAT_KV_U_PACKETS }, |
5166 | /* MPTC */ |
5167 | { "tx mcasts", 0x003009e0, 48, KSTAT_KV_U_PACKETS }, |
5168 | /* BPTC */ |
5169 | { "tx bcasts", 0x00300a00, 48, KSTAT_KV_U_PACKETS }, |
5170 | /* TDOLD */ |
5171 | { "tx link down", 0x00300a20, 48, KSTAT_KV_U_PACKETS }, |
5172 | }; |
5173 | |
5174 | const struct ixl_counter ixl_vsi_counters[] = { |
5175 | /* VSI RDPC */ |
5176 | { "rx discards", 0x00310000, 32, KSTAT_KV_U_PACKETS }, |
5177 | /* VSI GOTC */ |
5178 | { "tx bytes", 0x00328000, 48, KSTAT_KV_U_BYTES }, |
5179 | /* VSI UPTC */ |
5180 | { "tx ucasts", 0x0033c000, 48, KSTAT_KV_U_PACKETS }, |
5181 | /* VSI MPTC */ |
5182 | { "tx mcasts", 0x0033cc00, 48, KSTAT_KV_U_PACKETS }, |
5183 | /* VSI BPTC */ |
5184 | { "tx bcasts", 0x0033d800, 48, KSTAT_KV_U_PACKETS }, |
5185 | /* VSI TEPC */ |
5186 | { "tx errs", 0x00344000, 48, KSTAT_KV_U_PACKETS }, |
5187 | /* VSI TDPC */ |
5188 | { "tx discards", 0x00348000, 48, KSTAT_KV_U_PACKETS }, |
5189 | /* VSI GORC */ |
5190 | { "rx bytes", 0x00358000, 48, KSTAT_KV_U_BYTES }, |
5191 | /* VSI UPRC */ |
5192 | { "rx ucasts", 0x0036c000, 48, KSTAT_KV_U_PACKETS }, |
5193 | /* VSI MPRC */ |
5194 | { "rx mcasts", 0x0036cc00, 48, KSTAT_KV_U_PACKETS }, |
5195 | /* VSI BPRC */ |
5196 | { "rx bcasts", 0x0036d800, 48, KSTAT_KV_U_PACKETS }, |
5197 | /* VSI RUPP */ |
5198 | { "rx noproto", 0x0036e400, 32, KSTAT_KV_U_PACKETS }, |
5199 | }; |
5200 | |
5201 | struct ixl_counter_state { |
5202 | const struct ixl_counter |
5203 | *counters; |
5204 | uint64_t *values; |
5205 | size_t n; |
5206 | uint32_t index; |
5207 | unsigned int gen; |
5208 | }; |
5209 | |
5210 | static void |
5211 | ixl_rd_counters(struct ixl_softc *sc, const struct ixl_counter_state *state, |
5212 | uint64_t *vs) |
5213 | { |
5214 | const struct ixl_counter *c; |
5215 | bus_addr_t r; |
5216 | uint64_t v; |
5217 | size_t i; |
5218 | |
5219 | for (i = 0; i < state->n; i++) { |
5220 | c = &state->counters[i]; |
5221 | |
5222 | r = c->c_base + (state->index * 8); |
5223 | |
5224 | if (c->c_width == 32) |
5225 | v = bus_space_read_4(sc->sc_memt, sc->sc_memh, r)((sc->sc_memt)->read_4((sc->sc_memh), (r))); |
5226 | else |
5227 | v = bus_space_read_8(sc->sc_memt, sc->sc_memh, r)((sc->sc_memt)->read_8((sc->sc_memh), (r))); |
5228 | |
5229 | vs[i] = v; |
5230 | } |
5231 | } |
5232 | |
5233 | static int |
5234 | ixl_kstat_read(struct kstat *ks) |
5235 | { |
5236 | struct ixl_softc *sc = ks->ks_softc; |
5237 | struct kstat_kv *kvs = ks->ks_data; |
5238 | struct ixl_counter_state *state = ks->ks_ptr; |
5239 | unsigned int gen = (state->gen++) & 1; |
5240 | uint64_t *ovs = state->values + (gen * state->n); |
5241 | uint64_t *nvs = state->values + (!gen * state->n); |
5242 | size_t i; |
5243 | |
5244 | ixl_rd_counters(sc, state, nvs); |
5245 | getnanouptime(&ks->ks_updated); |
5246 | |
5247 | for (i = 0; i < state->n; i++) { |
5248 | const struct ixl_counter *c = &state->counters[i]; |
5249 | uint64_t n = nvs[i], o = ovs[i]; |
5250 | |
5251 | if (c->c_width < 64) { |
5252 | if (n < o) |
5253 | n += (1ULL << c->c_width); |
5254 | } |
5255 | |
5256 | kstat_kv_u64(&kvs[i]) += (n - o); |
5257 | } |
5258 | |
5259 | return (0); |
5260 | } |
5261 | |
5262 | static void |
5263 | ixl_kstat_tick(void *arg) |
5264 | { |
5265 | struct ixl_softc *sc = arg; |
5266 | |
5267 | timeout_add_sec(&sc->sc_kstat_tmo, 4); |
5268 | |
5269 | mtx_enter(&sc->sc_kstat_mtx); |
5270 | |
5271 | ixl_kstat_read(sc->sc_port_kstat); |
5272 | ixl_kstat_read(sc->sc_vsi_kstat); |
5273 | |
5274 | mtx_leave(&sc->sc_kstat_mtx); |
5275 | } |
5276 | |
5277 | static struct kstat * |
5278 | ixl_kstat_create(struct ixl_softc *sc, const char *name, |
5279 | const struct ixl_counter *counters, size_t n, uint32_t index) |
5280 | { |
5281 | struct kstat *ks; |
5282 | struct kstat_kv *kvs; |
5283 | struct ixl_counter_state *state; |
5284 | const struct ixl_counter *c; |
5285 | unsigned int i; |
5286 | |
5287 | ks = kstat_create(DEVNAME(sc)((sc)->sc_dev.dv_xname), 0, name, 0, KSTAT_T_KV, 0); |
5288 | if (ks == NULL((void *)0)) { |
5289 | /* unable to create kstats */ |
5290 | return (NULL((void *)0)); |
5291 | } |
5292 | |
5293 | kvs = mallocarray(n, sizeof(*kvs), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008); |
5294 | for (i = 0; i < n; i++) { |
5295 | c = &counters[i]; |
5296 | |
5297 | kstat_kv_unit_init(&kvs[i], c->c_name, |
5298 | KSTAT_KV_T_COUNTER64, c->c_type); |
5299 | } |
5300 | |
5301 | ks->ks_data = kvs; |
5302 | ks->ks_datalen = n * sizeof(*kvs); |
5303 | ks->ks_read = ixl_kstat_read; |
5304 | |
5305 | state = malloc(sizeof(*state), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008); |
5306 | state->counters = counters; |
5307 | state->n = n; |
5308 | state->values = mallocarray(n * 2, sizeof(*state->values), |
5309 | M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008); |
5310 | state->index = index; |
5311 | ks->ks_ptr = state; |
5312 | |
5313 | kstat_set_mutex(ks, &sc->sc_kstat_mtx); |
5314 | ks->ks_softc = sc; |
5315 | kstat_install(ks); |
5316 | |
5317 | /* fetch a baseline */ |
5318 | ixl_rd_counters(sc, state, state->values); |
5319 | |
5320 | return (ks); |
5321 | } |
5322 | |
5323 | static void |
5324 | ixl_kstat_attach(struct ixl_softc *sc) |
5325 | { |
5326 | mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc-> sc_kstat_mtx), ((((0x4)) > 0x0 && ((0x4)) < 0x9 ) ? 0x9 : ((0x4)))); } while (0); |
5327 | timeout_set(&sc->sc_kstat_tmo, ixl_kstat_tick, sc); |
5328 | |
5329 | sc->sc_port_kstat = ixl_kstat_create(sc, "ixl-port", |
5330 | ixl_port_counters, nitems(ixl_port_counters)(sizeof((ixl_port_counters)) / sizeof((ixl_port_counters)[0]) ), sc->sc_port); |
5331 | sc->sc_vsi_kstat = ixl_kstat_create(sc, "ixl-vsi", |
5332 | ixl_vsi_counters, nitems(ixl_vsi_counters)(sizeof((ixl_vsi_counters)) / sizeof((ixl_vsi_counters)[0])), |
5333 | lemtoh16(&sc->sc_vsi_number)((__uint16_t)(*(__uint16_t *)(&sc->sc_vsi_number)))); |
5334 | |
5335 | /* ixl counters go up even when the interface is down */ |
5336 | timeout_add_sec(&sc->sc_kstat_tmo, 4); |
5337 | } |
5338 | |
5339 | #endif /* NKSTAT > 0 */ |