File: | dev/pci/if_mcx.c |
Warning: | line 7005, column 4 Value stored to 'cqp' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: if_mcx.c,v 1.111 2023/11/10 15:51:20 bluhm Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2017 David Gwynne <dlg@openbsd.org> |
5 | * Copyright (c) 2019 Jonathan Matthew <jmatthew@openbsd.org> |
6 | * |
7 | * Permission to use, copy, modify, and distribute this software for any |
8 | * purpose with or without fee is hereby granted, provided that the above |
9 | * copyright notice and this permission notice appear in all copies. |
10 | * |
11 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
12 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
13 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
14 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
15 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
16 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
17 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
18 | */ |
19 | |
20 | #include "bpfilter.h" |
21 | #include "vlan.h" |
22 | #include "kstat.h" |
23 | |
24 | #include <sys/param.h> |
25 | #include <sys/systm.h> |
26 | #include <sys/sockio.h> |
27 | #include <sys/mbuf.h> |
28 | #include <sys/kernel.h> |
29 | #include <sys/socket.h> |
30 | #include <sys/device.h> |
31 | #include <sys/pool.h> |
32 | #include <sys/queue.h> |
33 | #include <sys/timeout.h> |
34 | #include <sys/task.h> |
35 | #include <sys/atomic.h> |
36 | #include <sys/timetc.h> |
37 | #include <sys/intrmap.h> |
38 | |
39 | #include <machine/bus.h> |
40 | #include <machine/intr.h> |
41 | |
42 | #include <net/if.h> |
43 | #include <net/if_dl.h> |
44 | #include <net/if_media.h> |
45 | #include <net/toeplitz.h> |
46 | |
47 | #if NBPFILTER1 > 0 |
48 | #include <net/bpf.h> |
49 | #endif |
50 | |
51 | #if NKSTAT1 > 0 |
52 | #include <sys/kstat.h> |
53 | #endif |
54 | |
55 | #include <netinet/in.h> |
56 | #include <netinet/if_ether.h> |
57 | |
58 | #include <dev/pci/pcireg.h> |
59 | #include <dev/pci/pcivar.h> |
60 | #include <dev/pci/pcidevs.h> |
61 | |
62 | #define BUS_DMASYNC_PRERW(0x01|0x04) (BUS_DMASYNC_PREREAD0x01|BUS_DMASYNC_PREWRITE0x04) |
63 | #define BUS_DMASYNC_POSTRW(0x02|0x08) (BUS_DMASYNC_POSTREAD0x02|BUS_DMASYNC_POSTWRITE0x08) |
64 | |
65 | #define MCX_HCA_BAR0x10 PCI_MAPREG_START0x10 /* BAR 0 */ |
66 | |
67 | #define MCX_FW_VER0x0000 0x0000 |
68 | #define MCX_FW_VER_MAJOR(_v)((_v) & 0xffff) ((_v) & 0xffff) |
69 | #define MCX_FW_VER_MINOR(_v)((_v) >> 16) ((_v) >> 16) |
70 | #define MCX_CMDIF_FW_SUBVER0x0004 0x0004 |
71 | #define MCX_FW_VER_SUBMINOR(_v)((_v) & 0xffff) ((_v) & 0xffff) |
72 | #define MCX_CMDIF(_v)((_v) >> 16) ((_v) >> 16) |
73 | |
74 | #define MCX_ISSI1 1 /* as per the PRM */ |
75 | #define MCX_CMD_IF_SUPPORTED5 5 |
76 | |
77 | #define MCX_HARDMTU9500 9500 |
78 | |
79 | enum mcx_cmdq_slot { |
80 | MCX_CMDQ_SLOT_POLL = 0, |
81 | MCX_CMDQ_SLOT_IOCTL, |
82 | MCX_CMDQ_SLOT_KSTAT, |
83 | MCX_CMDQ_SLOT_LINK, |
84 | |
85 | MCX_CMDQ_NUM_SLOTS |
86 | }; |
87 | |
88 | #define MCX_PAGE_SHIFT12 12 |
89 | #define MCX_PAGE_SIZE(1 << 12) (1 << MCX_PAGE_SHIFT12) |
90 | |
91 | /* queue sizes */ |
92 | #define MCX_LOG_EQ_SIZE7 7 |
93 | #define MCX_LOG_CQ_SIZE12 12 |
94 | #define MCX_LOG_RQ_SIZE10 10 |
95 | #define MCX_LOG_SQ_SIZE11 11 |
96 | |
97 | #define MCX_MAX_QUEUES16 16 |
98 | |
99 | /* completion event moderation - about 10khz, or 90% of the cq */ |
100 | #define MCX_CQ_MOD_PERIOD50 50 |
101 | #define MCX_CQ_MOD_COUNTER(((1 << (12 - 1)) * 9) / 10) \ |
102 | (((1 << (MCX_LOG_CQ_SIZE12 - 1)) * 9) / 10) |
103 | |
104 | #define MCX_LOG_SQ_ENTRY_SIZE6 6 |
105 | #define MCX_SQ_ENTRY_MAX_SLOTS4 4 |
106 | #define MCX_SQ_SEGS_PER_SLOT(sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg )) \ |
107 | (sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg)) |
108 | #define MCX_SQ_MAX_SEGMENTS1 + ((4 -1) * (sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg ))) \ |
109 | 1 + ((MCX_SQ_ENTRY_MAX_SLOTS4-1) * MCX_SQ_SEGS_PER_SLOT(sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg ))) |
110 | |
111 | #define MCX_LOG_FLOW_TABLE_SIZE5 5 |
112 | #define MCX_NUM_STATIC_FLOWS4 4 /* promisc, allmulti, ucast, bcast */ |
113 | #define MCX_NUM_MCAST_FLOWS((1 << 5) - 4) \ |
114 | ((1 << MCX_LOG_FLOW_TABLE_SIZE5) - MCX_NUM_STATIC_FLOWS4) |
115 | |
116 | #define MCX_SQ_INLINE_SIZE18 18 |
117 | CTASSERT(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN == MCX_SQ_INLINE_SIZE)extern char _ctassert[(((6 * 2) + 2) + 4 == 18) ? 1 : -1 ] __attribute__ ((__unused__)); |
118 | |
119 | /* doorbell offsets */ |
120 | #define MCX_DOORBELL_AREA_SIZE(1 << 12) MCX_PAGE_SIZE(1 << 12) |
121 | |
122 | #define MCX_CQ_DOORBELL_BASE0 0 |
123 | #define MCX_CQ_DOORBELL_STRIDE64 64 |
124 | |
125 | #define MCX_WQ_DOORBELL_BASE(1 << 12)/2 MCX_PAGE_SIZE(1 << 12)/2 |
126 | #define MCX_WQ_DOORBELL_STRIDE64 64 |
127 | /* make sure the doorbells fit */ |
128 | CTASSERT(MCX_MAX_QUEUES * MCX_CQ_DOORBELL_STRIDE < MCX_WQ_DOORBELL_BASE)extern char _ctassert[(16 * 64 < (1 << 12)/2) ? 1 : - 1 ] __attribute__((__unused__)); |
129 | CTASSERT(MCX_MAX_QUEUES * MCX_WQ_DOORBELL_STRIDE <extern char _ctassert[(16 * 64 < (1 << 12) - (1 << 12)/2) ? 1 : -1 ] __attribute__((__unused__)) |
130 | MCX_DOORBELL_AREA_SIZE - MCX_WQ_DOORBELL_BASE)extern char _ctassert[(16 * 64 < (1 << 12) - (1 << 12)/2) ? 1 : -1 ] __attribute__((__unused__)); |
131 | |
132 | #define MCX_WQ_DOORBELL_MASK0xffff 0xffff |
133 | |
134 | /* uar registers */ |
135 | #define MCX_UAR_CQ_DOORBELL0x20 0x20 |
136 | #define MCX_UAR_EQ_DOORBELL_ARM0x40 0x40 |
137 | #define MCX_UAR_EQ_DOORBELL0x48 0x48 |
138 | #define MCX_UAR_BF0x800 0x800 |
139 | |
140 | #define MCX_CMDQ_ADDR_HI0x0010 0x0010 |
141 | #define MCX_CMDQ_ADDR_LO0x0014 0x0014 |
142 | #define MCX_CMDQ_ADDR_NMASK0xfff 0xfff |
143 | #define MCX_CMDQ_LOG_SIZE(_v)((_v) >> 4 & 0xf) ((_v) >> 4 & 0xf) |
144 | #define MCX_CMDQ_LOG_STRIDE(_v)((_v) >> 0 & 0xf) ((_v) >> 0 & 0xf) |
145 | #define MCX_CMDQ_INTERFACE_MASK(0x3 << 8) (0x3 << 8) |
146 | #define MCX_CMDQ_INTERFACE_FULL_DRIVER(0x0 << 8) (0x0 << 8) |
147 | #define MCX_CMDQ_INTERFACE_DISABLED(0x1 << 8) (0x1 << 8) |
148 | |
149 | #define MCX_CMDQ_DOORBELL0x0018 0x0018 |
150 | |
151 | #define MCX_STATE0x01fc 0x01fc |
152 | #define MCX_STATE_MASK(1U << 31) (1U << 31) |
153 | #define MCX_STATE_INITIALIZING(1U << 31) (1U << 31) |
154 | #define MCX_STATE_READY(0 << 31) (0 << 31) |
155 | #define MCX_STATE_INTERFACE_MASK(0x3 << 24) (0x3 << 24) |
156 | #define MCX_STATE_INTERFACE_FULL_DRIVER(0x0 << 24) (0x0 << 24) |
157 | #define MCX_STATE_INTERFACE_DISABLED(0x1 << 24) (0x1 << 24) |
158 | |
159 | #define MCX_INTERNAL_TIMER0x1000 0x1000 |
160 | #define MCX_INTERNAL_TIMER_H0x1000 0x1000 |
161 | #define MCX_INTERNAL_TIMER_L0x1004 0x1004 |
162 | |
163 | #define MCX_CLEAR_INT0x100c 0x100c |
164 | |
165 | #define MCX_REG_OP_WRITE0 0 |
166 | #define MCX_REG_OP_READ1 1 |
167 | |
168 | #define MCX_REG_PMLP0x5002 0x5002 |
169 | #define MCX_REG_PMTU0x5003 0x5003 |
170 | #define MCX_REG_PTYS0x5004 0x5004 |
171 | #define MCX_REG_PAOS0x5006 0x5006 |
172 | #define MCX_REG_PFCC0x5007 0x5007 |
173 | #define MCX_REG_PPCNT0x5008 0x5008 |
174 | #define MCX_REG_MTCAP0x9009 0x9009 /* mgmt temp capabilities */ |
175 | #define MCX_REG_MTMP0x900a 0x900a /* mgmt temp */ |
176 | #define MCX_REG_MCIA0x9014 0x9014 |
177 | #define MCX_REG_MCAM0x907f 0x907f |
178 | |
179 | #define MCX_ETHER_CAP_SGMII0 0 |
180 | #define MCX_ETHER_CAP_1000_KX1 1 |
181 | #define MCX_ETHER_CAP_10G_CX42 2 |
182 | #define MCX_ETHER_CAP_10G_KX43 3 |
183 | #define MCX_ETHER_CAP_10G_KR4 4 |
184 | #define MCX_ETHER_CAP_40G_CR46 6 |
185 | #define MCX_ETHER_CAP_40G_KR47 7 |
186 | #define MCX_ETHER_CAP_10G_CR12 12 |
187 | #define MCX_ETHER_CAP_10G_SR13 13 |
188 | #define MCX_ETHER_CAP_10G_LR14 14 |
189 | #define MCX_ETHER_CAP_40G_SR415 15 |
190 | #define MCX_ETHER_CAP_40G_LR416 16 |
191 | #define MCX_ETHER_CAP_50G_SR218 18 |
192 | #define MCX_ETHER_CAP_100G_CR420 20 |
193 | #define MCX_ETHER_CAP_100G_SR421 21 |
194 | #define MCX_ETHER_CAP_100G_KR422 22 |
195 | #define MCX_ETHER_CAP_100G_LR423 23 |
196 | #define MCX_ETHER_CAP_25G_CR27 27 |
197 | #define MCX_ETHER_CAP_25G_KR28 28 |
198 | #define MCX_ETHER_CAP_25G_SR29 29 |
199 | #define MCX_ETHER_CAP_50G_CR230 30 |
200 | #define MCX_ETHER_CAP_50G_KR231 31 |
201 | |
202 | #define MCX_MAX_CQE32 32 |
203 | |
204 | #define MCX_CMD_QUERY_HCA_CAP0x100 0x100 |
205 | #define MCX_CMD_QUERY_ADAPTER0x101 0x101 |
206 | #define MCX_CMD_INIT_HCA0x102 0x102 |
207 | #define MCX_CMD_TEARDOWN_HCA0x103 0x103 |
208 | #define MCX_CMD_ENABLE_HCA0x104 0x104 |
209 | #define MCX_CMD_DISABLE_HCA0x105 0x105 |
210 | #define MCX_CMD_QUERY_PAGES0x107 0x107 |
211 | #define MCX_CMD_MANAGE_PAGES0x108 0x108 |
212 | #define MCX_CMD_SET_HCA_CAP0x109 0x109 |
213 | #define MCX_CMD_QUERY_ISSI0x10a 0x10a |
214 | #define MCX_CMD_SET_ISSI0x10b 0x10b |
215 | #define MCX_CMD_SET_DRIVER_VERSION0x10d 0x10d |
216 | #define MCX_CMD_QUERY_SPECIAL_CONTEXTS0x203 0x203 |
217 | #define MCX_CMD_CREATE_EQ0x301 0x301 |
218 | #define MCX_CMD_DESTROY_EQ0x302 0x302 |
219 | #define MCX_CMD_QUERY_EQ0x303 0x303 |
220 | #define MCX_CMD_CREATE_CQ0x400 0x400 |
221 | #define MCX_CMD_DESTROY_CQ0x401 0x401 |
222 | #define MCX_CMD_QUERY_CQ0x402 0x402 |
223 | #define MCX_CMD_QUERY_NIC_VPORT_CONTEXT0x754 0x754 |
224 | #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT0x755 \ |
225 | 0x755 |
226 | #define MCX_CMD_QUERY_VPORT_COUNTERS0x770 0x770 |
227 | #define MCX_CMD_ALLOC_PD0x800 0x800 |
228 | #define MCX_CMD_ALLOC_UAR0x802 0x802 |
229 | #define MCX_CMD_ACCESS_REG0x805 0x805 |
230 | #define MCX_CMD_ALLOC_TRANSPORT_DOMAIN0x816 0x816 |
231 | #define MCX_CMD_CREATE_TIR0x900 0x900 |
232 | #define MCX_CMD_DESTROY_TIR0x902 0x902 |
233 | #define MCX_CMD_CREATE_SQ0x904 0x904 |
234 | #define MCX_CMD_MODIFY_SQ0x905 0x905 |
235 | #define MCX_CMD_DESTROY_SQ0x906 0x906 |
236 | #define MCX_CMD_QUERY_SQ0x907 0x907 |
237 | #define MCX_CMD_CREATE_RQ0x908 0x908 |
238 | #define MCX_CMD_MODIFY_RQ0x909 0x909 |
239 | #define MCX_CMD_DESTROY_RQ0x90a 0x90a |
240 | #define MCX_CMD_QUERY_RQ0x90b 0x90b |
241 | #define MCX_CMD_CREATE_TIS0x912 0x912 |
242 | #define MCX_CMD_DESTROY_TIS0x914 0x914 |
243 | #define MCX_CMD_CREATE_RQT0x916 0x916 |
244 | #define MCX_CMD_DESTROY_RQT0x918 0x918 |
245 | #define MCX_CMD_SET_FLOW_TABLE_ROOT0x92f 0x92f |
246 | #define MCX_CMD_CREATE_FLOW_TABLE0x930 0x930 |
247 | #define MCX_CMD_DESTROY_FLOW_TABLE0x931 0x931 |
248 | #define MCX_CMD_QUERY_FLOW_TABLE0x932 0x932 |
249 | #define MCX_CMD_CREATE_FLOW_GROUP0x933 0x933 |
250 | #define MCX_CMD_DESTROY_FLOW_GROUP0x934 0x934 |
251 | #define MCX_CMD_QUERY_FLOW_GROUP0x935 0x935 |
252 | #define MCX_CMD_SET_FLOW_TABLE_ENTRY0x936 0x936 |
253 | #define MCX_CMD_QUERY_FLOW_TABLE_ENTRY0x937 0x937 |
254 | #define MCX_CMD_DELETE_FLOW_TABLE_ENTRY0x938 0x938 |
255 | #define MCX_CMD_ALLOC_FLOW_COUNTER0x939 0x939 |
256 | #define MCX_CMD_QUERY_FLOW_COUNTER0x93b 0x93b |
257 | |
258 | #define MCX_QUEUE_STATE_RST0 0 |
259 | #define MCX_QUEUE_STATE_RDY1 1 |
260 | #define MCX_QUEUE_STATE_ERR3 3 |
261 | |
262 | #define MCX_FLOW_TABLE_TYPE_RX0 0 |
263 | #define MCX_FLOW_TABLE_TYPE_TX1 1 |
264 | |
265 | #define MCX_CMDQ_INLINE_DATASIZE16 16 |
266 | |
267 | struct mcx_cmdq_entry { |
268 | uint8_t cq_type; |
269 | #define MCX_CMDQ_TYPE_PCIE0x7 0x7 |
270 | uint8_t cq_reserved0[3]; |
271 | |
272 | uint32_t cq_input_length; |
273 | uint64_t cq_input_ptr; |
274 | uint8_t cq_input_data[MCX_CMDQ_INLINE_DATASIZE16]; |
275 | |
276 | uint8_t cq_output_data[MCX_CMDQ_INLINE_DATASIZE16]; |
277 | uint64_t cq_output_ptr; |
278 | uint32_t cq_output_length; |
279 | |
280 | uint8_t cq_token; |
281 | uint8_t cq_signature; |
282 | uint8_t cq_reserved1[1]; |
283 | uint8_t cq_status; |
284 | #define MCX_CQ_STATUS_SHIFT1 1 |
285 | #define MCX_CQ_STATUS_MASK(0x7f << 1) (0x7f << MCX_CQ_STATUS_SHIFT1) |
286 | #define MCX_CQ_STATUS_OK(0x00 << 1) (0x00 << MCX_CQ_STATUS_SHIFT1) |
287 | #define MCX_CQ_STATUS_INT_ERR(0x01 << 1) (0x01 << MCX_CQ_STATUS_SHIFT1) |
288 | #define MCX_CQ_STATUS_BAD_OPCODE(0x02 << 1) (0x02 << MCX_CQ_STATUS_SHIFT1) |
289 | #define MCX_CQ_STATUS_BAD_PARAM(0x03 << 1) (0x03 << MCX_CQ_STATUS_SHIFT1) |
290 | #define MCX_CQ_STATUS_BAD_SYS_STATE(0x04 << 1) (0x04 << MCX_CQ_STATUS_SHIFT1) |
291 | #define MCX_CQ_STATUS_BAD_RESOURCE(0x05 << 1) (0x05 << MCX_CQ_STATUS_SHIFT1) |
292 | #define MCX_CQ_STATUS_RESOURCE_BUSY(0x06 << 1) (0x06 << MCX_CQ_STATUS_SHIFT1) |
293 | #define MCX_CQ_STATUS_EXCEED_LIM(0x08 << 1) (0x08 << MCX_CQ_STATUS_SHIFT1) |
294 | #define MCX_CQ_STATUS_BAD_RES_STATE(0x09 << 1) (0x09 << MCX_CQ_STATUS_SHIFT1) |
295 | #define MCX_CQ_STATUS_BAD_INDEX(0x0a << 1) (0x0a << MCX_CQ_STATUS_SHIFT1) |
296 | #define MCX_CQ_STATUS_NO_RESOURCES(0x0f << 1) (0x0f << MCX_CQ_STATUS_SHIFT1) |
297 | #define MCX_CQ_STATUS_BAD_INPUT_LEN(0x50 << 1) (0x50 << MCX_CQ_STATUS_SHIFT1) |
298 | #define MCX_CQ_STATUS_BAD_OUTPUT_LEN(0x51 << 1) (0x51 << MCX_CQ_STATUS_SHIFT1) |
299 | #define MCX_CQ_STATUS_BAD_RESOURCE_STATE(0x10 << 1) \ |
300 | (0x10 << MCX_CQ_STATUS_SHIFT1) |
301 | #define MCX_CQ_STATUS_BAD_SIZE(0x40 << 1) (0x40 << MCX_CQ_STATUS_SHIFT1) |
302 | #define MCX_CQ_STATUS_OWN_MASK0x1 0x1 |
303 | #define MCX_CQ_STATUS_OWN_SW0x0 0x0 |
304 | #define MCX_CQ_STATUS_OWN_HW0x1 0x1 |
305 | } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8))); |
306 | |
307 | #define MCX_CMDQ_MAILBOX_DATASIZE512 512 |
308 | |
309 | struct mcx_cmdq_mailbox { |
310 | uint8_t mb_data[MCX_CMDQ_MAILBOX_DATASIZE512]; |
311 | uint8_t mb_reserved0[48]; |
312 | uint64_t mb_next_ptr; |
313 | uint32_t mb_block_number; |
314 | uint8_t mb_reserved1[1]; |
315 | uint8_t mb_token; |
316 | uint8_t mb_ctrl_signature; |
317 | uint8_t mb_signature; |
318 | } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8))); |
319 | |
320 | #define MCX_CMDQ_MAILBOX_ALIGN(1 << 10) (1 << 10) |
321 | #define MCX_CMDQ_MAILBOX_SIZE((((sizeof(struct mcx_cmdq_mailbox))+(((1 << 10))-1))/( (1 << 10)))*((1 << 10))) roundup(sizeof(struct mcx_cmdq_mailbox), \((((sizeof(struct mcx_cmdq_mailbox))+(((1 << 10))-1))/( (1 << 10)))*((1 << 10))) |
322 | MCX_CMDQ_MAILBOX_ALIGN)((((sizeof(struct mcx_cmdq_mailbox))+(((1 << 10))-1))/( (1 << 10)))*((1 << 10))) |
323 | /* |
324 | * command mailbox structures |
325 | */ |
326 | |
327 | struct mcx_cmd_enable_hca_in { |
328 | uint16_t cmd_opcode; |
329 | uint8_t cmd_reserved0[4]; |
330 | uint16_t cmd_op_mod; |
331 | uint8_t cmd_reserved1[2]; |
332 | uint16_t cmd_function_id; |
333 | uint8_t cmd_reserved2[4]; |
334 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
335 | |
336 | struct mcx_cmd_enable_hca_out { |
337 | uint8_t cmd_status; |
338 | uint8_t cmd_reserved0[3]; |
339 | uint32_t cmd_syndrome; |
340 | uint8_t cmd_reserved1[4]; |
341 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
342 | |
343 | struct mcx_cmd_init_hca_in { |
344 | uint16_t cmd_opcode; |
345 | uint8_t cmd_reserved0[4]; |
346 | uint16_t cmd_op_mod; |
347 | uint8_t cmd_reserved1[8]; |
348 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
349 | |
350 | struct mcx_cmd_init_hca_out { |
351 | uint8_t cmd_status; |
352 | uint8_t cmd_reserved0[3]; |
353 | uint32_t cmd_syndrome; |
354 | uint8_t cmd_reserved1[8]; |
355 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
356 | |
357 | struct mcx_cmd_teardown_hca_in { |
358 | uint16_t cmd_opcode; |
359 | uint8_t cmd_reserved0[4]; |
360 | uint16_t cmd_op_mod; |
361 | uint8_t cmd_reserved1[2]; |
362 | #define MCX_CMD_TEARDOWN_HCA_GRACEFUL0x0 0x0 |
363 | #define MCX_CMD_TEARDOWN_HCA_PANIC0x1 0x1 |
364 | uint16_t cmd_profile; |
365 | uint8_t cmd_reserved2[4]; |
366 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
367 | |
368 | struct mcx_cmd_teardown_hca_out { |
369 | uint8_t cmd_status; |
370 | uint8_t cmd_reserved0[3]; |
371 | uint32_t cmd_syndrome; |
372 | uint8_t cmd_reserved1[8]; |
373 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
374 | |
375 | struct mcx_cmd_access_reg_in { |
376 | uint16_t cmd_opcode; |
377 | uint8_t cmd_reserved0[4]; |
378 | uint16_t cmd_op_mod; |
379 | uint8_t cmd_reserved1[2]; |
380 | uint16_t cmd_register_id; |
381 | uint32_t cmd_argument; |
382 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
383 | |
384 | struct mcx_cmd_access_reg_out { |
385 | uint8_t cmd_status; |
386 | uint8_t cmd_reserved0[3]; |
387 | uint32_t cmd_syndrome; |
388 | uint8_t cmd_reserved1[8]; |
389 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
390 | |
391 | struct mcx_reg_pmtu { |
392 | uint8_t rp_reserved1; |
393 | uint8_t rp_local_port; |
394 | uint8_t rp_reserved2[2]; |
395 | uint16_t rp_max_mtu; |
396 | uint8_t rp_reserved3[2]; |
397 | uint16_t rp_admin_mtu; |
398 | uint8_t rp_reserved4[2]; |
399 | uint16_t rp_oper_mtu; |
400 | uint8_t rp_reserved5[2]; |
401 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
402 | |
403 | struct mcx_reg_ptys { |
404 | uint8_t rp_reserved1; |
405 | uint8_t rp_local_port; |
406 | uint8_t rp_reserved2; |
407 | uint8_t rp_proto_mask; |
408 | #define MCX_REG_PTYS_PROTO_MASK_ETH(1 << 2) (1 << 2) |
409 | uint8_t rp_reserved3[8]; |
410 | uint32_t rp_eth_proto_cap; |
411 | uint8_t rp_reserved4[8]; |
412 | uint32_t rp_eth_proto_admin; |
413 | uint8_t rp_reserved5[8]; |
414 | uint32_t rp_eth_proto_oper; |
415 | uint8_t rp_reserved6[24]; |
416 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
417 | |
418 | struct mcx_reg_paos { |
419 | uint8_t rp_reserved1; |
420 | uint8_t rp_local_port; |
421 | uint8_t rp_admin_status; |
422 | #define MCX_REG_PAOS_ADMIN_STATUS_UP1 1 |
423 | #define MCX_REG_PAOS_ADMIN_STATUS_DOWN2 2 |
424 | #define MCX_REG_PAOS_ADMIN_STATUS_UP_ONCE3 3 |
425 | #define MCX_REG_PAOS_ADMIN_STATUS_DISABLED4 4 |
426 | uint8_t rp_oper_status; |
427 | #define MCX_REG_PAOS_OPER_STATUS_UP1 1 |
428 | #define MCX_REG_PAOS_OPER_STATUS_DOWN2 2 |
429 | #define MCX_REG_PAOS_OPER_STATUS_FAILED4 4 |
430 | uint8_t rp_admin_state_update; |
431 | #define MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN(1 << 7) (1 << 7) |
432 | uint8_t rp_reserved2[11]; |
433 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
434 | |
435 | struct mcx_reg_pfcc { |
436 | uint8_t rp_reserved1; |
437 | uint8_t rp_local_port; |
438 | uint8_t rp_reserved2[3]; |
439 | uint8_t rp_prio_mask_tx; |
440 | uint8_t rp_reserved3; |
441 | uint8_t rp_prio_mask_rx; |
442 | uint8_t rp_pptx_aptx; |
443 | uint8_t rp_pfctx; |
444 | uint8_t rp_fctx_dis; |
445 | uint8_t rp_reserved4; |
446 | uint8_t rp_pprx_aprx; |
447 | uint8_t rp_pfcrx; |
448 | uint8_t rp_reserved5[2]; |
449 | uint16_t rp_dev_stall_min; |
450 | uint16_t rp_dev_stall_crit; |
451 | uint8_t rp_reserved6[12]; |
452 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
453 | |
454 | #define MCX_PMLP_MODULE_NUM_MASK0xff 0xff |
455 | struct mcx_reg_pmlp { |
456 | uint8_t rp_rxtx; |
457 | uint8_t rp_local_port; |
458 | uint8_t rp_reserved0; |
459 | uint8_t rp_width; |
460 | uint32_t rp_lane0_mapping; |
461 | uint32_t rp_lane1_mapping; |
462 | uint32_t rp_lane2_mapping; |
463 | uint32_t rp_lane3_mapping; |
464 | uint8_t rp_reserved1[44]; |
465 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
466 | |
467 | struct mcx_reg_ppcnt { |
468 | uint8_t ppcnt_swid; |
469 | uint8_t ppcnt_local_port; |
470 | uint8_t ppcnt_pnat; |
471 | uint8_t ppcnt_grp; |
472 | #define MCX_REG_PPCNT_GRP_IEEE80230x00 0x00 |
473 | #define MCX_REG_PPCNT_GRP_RFC28630x01 0x01 |
474 | #define MCX_REG_PPCNT_GRP_RFC28190x02 0x02 |
475 | #define MCX_REG_PPCNT_GRP_RFC36350x03 0x03 |
476 | #define MCX_REG_PPCNT_GRP_PER_PRIO0x10 0x10 |
477 | #define MCX_REG_PPCNT_GRP_PER_TC0x11 0x11 |
478 | #define MCX_REG_PPCNT_GRP_PER_RX_BUFFER0x11 0x11 |
479 | |
480 | uint8_t ppcnt_clr; |
481 | uint8_t ppcnt_reserved1[2]; |
482 | uint8_t ppcnt_prio_tc; |
483 | #define MCX_REG_PPCNT_CLR(1 << 7) (1 << 7) |
484 | |
485 | uint8_t ppcnt_counter_set[248]; |
486 | } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8))); |
487 | CTASSERT(sizeof(struct mcx_reg_ppcnt) == 256)extern char _ctassert[(sizeof(struct mcx_reg_ppcnt) == 256) ? 1 : -1 ] __attribute__((__unused__)); |
488 | CTASSERT((offsetof(struct mcx_reg_ppcnt, ppcnt_counter_set) %extern char _ctassert[((__builtin_offsetof(struct mcx_reg_ppcnt , ppcnt_counter_set) % sizeof(uint64_t)) == 0) ? 1 : -1 ] __attribute__ ((__unused__)) |
489 | sizeof(uint64_t)) == 0)extern char _ctassert[((__builtin_offsetof(struct mcx_reg_ppcnt , ppcnt_counter_set) % sizeof(uint64_t)) == 0) ? 1 : -1 ] __attribute__ ((__unused__)); |
490 | |
491 | enum mcx_ppcnt_ieee8023 { |
492 | frames_transmitted_ok, |
493 | frames_received_ok, |
494 | frame_check_sequence_errors, |
495 | alignment_errors, |
496 | octets_transmitted_ok, |
497 | octets_received_ok, |
498 | multicast_frames_xmitted_ok, |
499 | broadcast_frames_xmitted_ok, |
500 | multicast_frames_received_ok, |
501 | broadcast_frames_received_ok, |
502 | in_range_length_errors, |
503 | out_of_range_length_field, |
504 | frame_too_long_errors, |
505 | symbol_error_during_carrier, |
506 | mac_control_frames_transmitted, |
507 | mac_control_frames_received, |
508 | unsupported_opcodes_received, |
509 | pause_mac_ctrl_frames_received, |
510 | pause_mac_ctrl_frames_transmitted, |
511 | |
512 | mcx_ppcnt_ieee8023_count |
513 | }; |
514 | CTASSERT(mcx_ppcnt_ieee8023_count * sizeof(uint64_t) == 0x98)extern char _ctassert[(mcx_ppcnt_ieee8023_count * sizeof(uint64_t ) == 0x98) ? 1 : -1 ] __attribute__((__unused__)); |
515 | |
516 | enum mcx_ppcnt_rfc2863 { |
517 | in_octets, |
518 | in_ucast_pkts, |
519 | in_discards, |
520 | in_errors, |
521 | in_unknown_protos, |
522 | out_octets, |
523 | out_ucast_pkts, |
524 | out_discards, |
525 | out_errors, |
526 | in_multicast_pkts, |
527 | in_broadcast_pkts, |
528 | out_multicast_pkts, |
529 | out_broadcast_pkts, |
530 | |
531 | mcx_ppcnt_rfc2863_count |
532 | }; |
533 | CTASSERT(mcx_ppcnt_rfc2863_count * sizeof(uint64_t) == 0x68)extern char _ctassert[(mcx_ppcnt_rfc2863_count * sizeof(uint64_t ) == 0x68) ? 1 : -1 ] __attribute__((__unused__)); |
534 | |
535 | enum mcx_ppcnt_rfc2819 { |
536 | drop_events, |
537 | octets, |
538 | pkts, |
539 | broadcast_pkts, |
540 | multicast_pkts, |
541 | crc_align_errors, |
542 | undersize_pkts, |
543 | oversize_pkts, |
544 | fragments, |
545 | jabbers, |
546 | collisions, |
547 | pkts64octets, |
548 | pkts65to127octets, |
549 | pkts128to255octets, |
550 | pkts256to511octets, |
551 | pkts512to1023octets, |
552 | pkts1024to1518octets, |
553 | pkts1519to2047octets, |
554 | pkts2048to4095octets, |
555 | pkts4096to8191octets, |
556 | pkts8192to10239octets, |
557 | |
558 | mcx_ppcnt_rfc2819_count |
559 | }; |
560 | CTASSERT((mcx_ppcnt_rfc2819_count * sizeof(uint64_t)) == 0xa8)extern char _ctassert[((mcx_ppcnt_rfc2819_count * sizeof(uint64_t )) == 0xa8) ? 1 : -1 ] __attribute__((__unused__)); |
561 | |
562 | enum mcx_ppcnt_rfc3635 { |
563 | dot3stats_alignment_errors, |
564 | dot3stats_fcs_errors, |
565 | dot3stats_single_collision_frames, |
566 | dot3stats_multiple_collision_frames, |
567 | dot3stats_sqe_test_errors, |
568 | dot3stats_deferred_transmissions, |
569 | dot3stats_late_collisions, |
570 | dot3stats_excessive_collisions, |
571 | dot3stats_internal_mac_transmit_errors, |
572 | dot3stats_carrier_sense_errors, |
573 | dot3stats_frame_too_longs, |
574 | dot3stats_internal_mac_receive_errors, |
575 | dot3stats_symbol_errors, |
576 | dot3control_in_unknown_opcodes, |
577 | dot3in_pause_frames, |
578 | dot3out_pause_frames, |
579 | |
580 | mcx_ppcnt_rfc3635_count |
581 | }; |
582 | CTASSERT((mcx_ppcnt_rfc3635_count * sizeof(uint64_t)) == 0x80)extern char _ctassert[((mcx_ppcnt_rfc3635_count * sizeof(uint64_t )) == 0x80) ? 1 : -1 ] __attribute__((__unused__)); |
583 | |
584 | struct mcx_reg_mcam { |
585 | uint8_t _reserved1[1]; |
586 | uint8_t mcam_feature_group; |
587 | uint8_t _reserved2[1]; |
588 | uint8_t mcam_access_reg_group; |
589 | uint8_t _reserved3[4]; |
590 | uint8_t mcam_access_reg_cap_mask[16]; |
591 | uint8_t _reserved4[16]; |
592 | uint8_t mcam_feature_cap_mask[16]; |
593 | uint8_t _reserved5[16]; |
594 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
595 | |
596 | #define MCX_BITFIELD_BIT(bf, b)(bf[(sizeof bf - 1) - (b / 8)] & (b % 8)) (bf[(sizeof bf - 1) - (b / 8)] & (b % 8)) |
597 | |
598 | #define MCX_MCAM_FEATURE_CAP_SENSOR_MAP6 6 |
599 | |
600 | struct mcx_reg_mtcap { |
601 | uint8_t _reserved1[3]; |
602 | uint8_t mtcap_sensor_count; |
603 | uint8_t _reserved2[4]; |
604 | |
605 | uint64_t mtcap_sensor_map; |
606 | }; |
607 | |
608 | struct mcx_reg_mtmp { |
609 | uint8_t _reserved1[2]; |
610 | uint16_t mtmp_sensor_index; |
611 | |
612 | uint8_t _reserved2[2]; |
613 | uint16_t mtmp_temperature; |
614 | |
615 | uint16_t mtmp_mte_mtr; |
616 | #define MCX_REG_MTMP_MTE(1 << 15) (1 << 15) |
617 | #define MCX_REG_MTMP_MTR(1 << 14) (1 << 14) |
618 | uint16_t mtmp_max_temperature; |
619 | |
620 | uint16_t mtmp_tee; |
621 | #define MCX_REG_MTMP_TEE_NOPE(0 << 14) (0 << 14) |
622 | #define MCX_REG_MTMP_TEE_GENERATE(1 << 14) (1 << 14) |
623 | #define MCX_REG_MTMP_TEE_GENERATE_ONE(2 << 14) (2 << 14) |
624 | uint16_t mtmp_temperature_threshold_hi; |
625 | |
626 | uint8_t _reserved3[2]; |
627 | uint16_t mtmp_temperature_threshold_lo; |
628 | |
629 | uint8_t _reserved4[4]; |
630 | |
631 | uint8_t mtmp_sensor_name[8]; |
632 | }; |
633 | CTASSERT(sizeof(struct mcx_reg_mtmp) == 0x20)extern char _ctassert[(sizeof(struct mcx_reg_mtmp) == 0x20) ? 1 : -1 ] __attribute__((__unused__)); |
634 | CTASSERT(offsetof(struct mcx_reg_mtmp, mtmp_sensor_name) == 0x18)extern char _ctassert[(__builtin_offsetof(struct mcx_reg_mtmp , mtmp_sensor_name) == 0x18) ? 1 : -1 ] __attribute__((__unused__ )); |
635 | |
636 | #define MCX_MCIA_EEPROM_BYTES32 32 |
637 | struct mcx_reg_mcia { |
638 | uint8_t rm_l; |
639 | uint8_t rm_module; |
640 | uint8_t rm_reserved0; |
641 | uint8_t rm_status; |
642 | uint8_t rm_i2c_addr; |
643 | uint8_t rm_page_num; |
644 | uint16_t rm_dev_addr; |
645 | uint16_t rm_reserved1; |
646 | uint16_t rm_size; |
647 | uint32_t rm_reserved2; |
648 | uint8_t rm_data[48]; |
649 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
650 | |
651 | struct mcx_cmd_query_issi_in { |
652 | uint16_t cmd_opcode; |
653 | uint8_t cmd_reserved0[4]; |
654 | uint16_t cmd_op_mod; |
655 | uint8_t cmd_reserved1[8]; |
656 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
657 | |
658 | struct mcx_cmd_query_issi_il_out { |
659 | uint8_t cmd_status; |
660 | uint8_t cmd_reserved0[3]; |
661 | uint32_t cmd_syndrome; |
662 | uint8_t cmd_reserved1[2]; |
663 | uint16_t cmd_current_issi; |
664 | uint8_t cmd_reserved2[4]; |
665 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
666 | |
667 | CTASSERT(sizeof(struct mcx_cmd_query_issi_il_out) == MCX_CMDQ_INLINE_DATASIZE)extern char _ctassert[(sizeof(struct mcx_cmd_query_issi_il_out ) == 16) ? 1 : -1 ] __attribute__((__unused__)); |
668 | |
669 | struct mcx_cmd_query_issi_mb_out { |
670 | uint8_t cmd_reserved2[16]; |
671 | uint8_t cmd_supported_issi[80]; /* very big endian */ |
672 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
673 | |
674 | CTASSERT(sizeof(struct mcx_cmd_query_issi_mb_out) <= MCX_CMDQ_MAILBOX_DATASIZE)extern char _ctassert[(sizeof(struct mcx_cmd_query_issi_mb_out ) <= 512) ? 1 : -1 ] __attribute__((__unused__)); |
675 | |
676 | struct mcx_cmd_set_issi_in { |
677 | uint16_t cmd_opcode; |
678 | uint8_t cmd_reserved0[4]; |
679 | uint16_t cmd_op_mod; |
680 | uint8_t cmd_reserved1[2]; |
681 | uint16_t cmd_current_issi; |
682 | uint8_t cmd_reserved2[4]; |
683 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
684 | |
685 | CTASSERT(sizeof(struct mcx_cmd_set_issi_in) <= MCX_CMDQ_INLINE_DATASIZE)extern char _ctassert[(sizeof(struct mcx_cmd_set_issi_in) <= 16) ? 1 : -1 ] __attribute__((__unused__)); |
686 | |
687 | struct mcx_cmd_set_issi_out { |
688 | uint8_t cmd_status; |
689 | uint8_t cmd_reserved0[3]; |
690 | uint32_t cmd_syndrome; |
691 | uint8_t cmd_reserved1[8]; |
692 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
693 | |
694 | CTASSERT(sizeof(struct mcx_cmd_set_issi_out) <= MCX_CMDQ_INLINE_DATASIZE)extern char _ctassert[(sizeof(struct mcx_cmd_set_issi_out) <= 16) ? 1 : -1 ] __attribute__((__unused__)); |
695 | |
696 | struct mcx_cmd_query_pages_in { |
697 | uint16_t cmd_opcode; |
698 | uint8_t cmd_reserved0[4]; |
699 | uint16_t cmd_op_mod; |
700 | #define MCX_CMD_QUERY_PAGES_BOOT0x01 0x01 |
701 | #define MCX_CMD_QUERY_PAGES_INIT0x02 0x02 |
702 | #define MCX_CMD_QUERY_PAGES_REGULAR0x03 0x03 |
703 | uint8_t cmd_reserved1[8]; |
704 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
705 | |
706 | struct mcx_cmd_query_pages_out { |
707 | uint8_t cmd_status; |
708 | uint8_t cmd_reserved0[3]; |
709 | uint32_t cmd_syndrome; |
710 | uint8_t cmd_reserved1[2]; |
711 | uint16_t cmd_func_id; |
712 | int32_t cmd_num_pages; |
713 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
714 | |
715 | struct mcx_cmd_manage_pages_in { |
716 | uint16_t cmd_opcode; |
717 | uint8_t cmd_reserved0[4]; |
718 | uint16_t cmd_op_mod; |
719 | #define MCX_CMD_MANAGE_PAGES_ALLOC_FAIL0x00 \ |
720 | 0x00 |
721 | #define MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS0x01 \ |
722 | 0x01 |
723 | #define MCX_CMD_MANAGE_PAGES_HCA_RETURN_PAGES0x02 \ |
724 | 0x02 |
725 | uint8_t cmd_reserved1[2]; |
726 | uint16_t cmd_func_id; |
727 | uint32_t cmd_input_num_entries; |
728 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
729 | |
730 | CTASSERT(sizeof(struct mcx_cmd_manage_pages_in) == MCX_CMDQ_INLINE_DATASIZE)extern char _ctassert[(sizeof(struct mcx_cmd_manage_pages_in) == 16) ? 1 : -1 ] __attribute__((__unused__)); |
731 | |
732 | struct mcx_cmd_manage_pages_out { |
733 | uint8_t cmd_status; |
734 | uint8_t cmd_reserved0[3]; |
735 | uint32_t cmd_syndrome; |
736 | uint32_t cmd_output_num_entries; |
737 | uint8_t cmd_reserved1[4]; |
738 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
739 | |
740 | CTASSERT(sizeof(struct mcx_cmd_manage_pages_out) == MCX_CMDQ_INLINE_DATASIZE)extern char _ctassert[(sizeof(struct mcx_cmd_manage_pages_out ) == 16) ? 1 : -1 ] __attribute__((__unused__)); |
741 | |
742 | struct mcx_cmd_query_hca_cap_in { |
743 | uint16_t cmd_opcode; |
744 | uint8_t cmd_reserved0[4]; |
745 | uint16_t cmd_op_mod; |
746 | #define MCX_CMD_QUERY_HCA_CAP_MAX(0x0 << 0) (0x0 << 0) |
747 | #define MCX_CMD_QUERY_HCA_CAP_CURRENT(0x1 << 0) (0x1 << 0) |
748 | #define MCX_CMD_QUERY_HCA_CAP_DEVICE(0x0 << 1) (0x0 << 1) |
749 | #define MCX_CMD_QUERY_HCA_CAP_OFFLOAD(0x1 << 1) (0x1 << 1) |
750 | #define MCX_CMD_QUERY_HCA_CAP_FLOW(0x7 << 1) (0x7 << 1) |
751 | uint8_t cmd_reserved1[8]; |
752 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
753 | |
754 | struct mcx_cmd_query_hca_cap_out { |
755 | uint8_t cmd_status; |
756 | uint8_t cmd_reserved0[3]; |
757 | uint32_t cmd_syndrome; |
758 | uint8_t cmd_reserved1[8]; |
759 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
760 | |
761 | #define MCX_HCA_CAP_LEN0x1000 0x1000 |
762 | #define MCX_HCA_CAP_NMAILBOXES(0x1000 / 512) \ |
763 | (MCX_HCA_CAP_LEN0x1000 / MCX_CMDQ_MAILBOX_DATASIZE512) |
764 | |
765 | #if __GNUC_PREREQ__(4, 3)((4 > (4)) || (4 == (4) && 2 >= (3))) |
766 | #define __counter__766 __COUNTER__0 |
767 | #else |
768 | #define __counter__768 __LINE__768 |
769 | #endif |
770 | |
771 | #define __token(_tok, _num)_tok_num _tok##_num |
772 | #define _token(_tok, _num)_tok_num __token(_tok, _num)_tok_num |
773 | #define __reserved____reserved773 _token(__reserved, __counter__)__reserved773 |
774 | |
775 | struct mcx_cap_device { |
776 | uint8_t reserved0[16]; |
777 | |
778 | uint8_t log_max_srq_sz; |
779 | uint8_t log_max_qp_sz; |
780 | uint8_t __reserved____reserved780[1]; |
781 | uint8_t log_max_qp; /* 5 bits */ |
782 | #define MCX_CAP_DEVICE_LOG_MAX_QP0x1f 0x1f |
783 | |
784 | uint8_t __reserved____reserved784[1]; |
785 | uint8_t log_max_srq; /* 5 bits */ |
786 | #define MCX_CAP_DEVICE_LOG_MAX_SRQ0x1f 0x1f |
787 | uint8_t __reserved____reserved787[2]; |
788 | |
789 | uint8_t __reserved____reserved789[1]; |
790 | uint8_t log_max_cq_sz; |
791 | uint8_t __reserved____reserved791[1]; |
792 | uint8_t log_max_cq; /* 5 bits */ |
793 | #define MCX_CAP_DEVICE_LOG_MAX_CQ0x1f 0x1f |
794 | |
795 | uint8_t log_max_eq_sz; |
796 | uint8_t log_max_mkey; /* 6 bits */ |
797 | #define MCX_CAP_DEVICE_LOG_MAX_MKEY0x3f 0x3f |
798 | uint8_t __reserved____reserved798[1]; |
799 | uint8_t log_max_eq; /* 4 bits */ |
800 | #define MCX_CAP_DEVICE_LOG_MAX_EQ0x0f 0x0f |
801 | |
802 | uint8_t max_indirection; |
803 | uint8_t log_max_mrw_sz; /* 7 bits */ |
804 | #define MCX_CAP_DEVICE_LOG_MAX_MRW_SZ0x7f 0x7f |
805 | uint8_t teardown_log_max_msf_list_size; |
806 | #define MCX_CAP_DEVICE_FORCE_TEARDOWN0x80 0x80 |
807 | #define MCX_CAP_DEVICE_LOG_MAX_MSF_LIST_SIZE0x3f \ |
808 | 0x3f |
809 | uint8_t log_max_klm_list_size; /* 6 bits */ |
810 | #define MCX_CAP_DEVICE_LOG_MAX_KLM_LIST_SIZE0x3f \ |
811 | 0x3f |
812 | |
813 | uint8_t __reserved____reserved813[1]; |
814 | uint8_t log_max_ra_req_dc; /* 6 bits */ |
815 | #define MCX_CAP_DEVICE_LOG_MAX_REQ_DC0x3f 0x3f |
816 | uint8_t __reserved____reserved816[1]; |
817 | uint8_t log_max_ra_res_dc; /* 6 bits */ |
818 | #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_DC0x3f \ |
819 | 0x3f |
820 | |
821 | uint8_t __reserved____reserved821[1]; |
822 | uint8_t log_max_ra_req_qp; /* 6 bits */ |
823 | #define MCX_CAP_DEVICE_LOG_MAX_RA_REQ_QP0x3f \ |
824 | 0x3f |
825 | uint8_t __reserved____reserved825[1]; |
826 | uint8_t log_max_ra_res_qp; /* 6 bits */ |
827 | #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_QP0x3f \ |
828 | 0x3f |
829 | |
830 | uint8_t flags1; |
831 | #define MCX_CAP_DEVICE_END_PAD0x80 0x80 |
832 | #define MCX_CAP_DEVICE_CC_QUERY_ALLOWED0x40 0x40 |
833 | #define MCX_CAP_DEVICE_CC_MODIFY_ALLOWED0x20 \ |
834 | 0x20 |
835 | #define MCX_CAP_DEVICE_START_PAD0x10 0x10 |
836 | #define MCX_CAP_DEVICE_128BYTE_CACHELINE0x08 \ |
837 | 0x08 |
838 | uint8_t __reserved____reserved838[1]; |
839 | uint16_t gid_table_size; |
840 | |
841 | uint16_t flags2; |
842 | #define MCX_CAP_DEVICE_OUT_OF_SEQ_CNT0x8000 0x8000 |
843 | #define MCX_CAP_DEVICE_VPORT_COUNTERS0x4000 0x4000 |
844 | #define MCX_CAP_DEVICE_RETRANSMISSION_Q_COUNTERS0x2000 \ |
845 | 0x2000 |
846 | #define MCX_CAP_DEVICE_DEBUG0x1000 0x1000 |
847 | #define MCX_CAP_DEVICE_MODIFY_RQ_COUNTERS_SET_ID0x8000 \ |
848 | 0x8000 |
849 | #define MCX_CAP_DEVICE_RQ_DELAY_DROP0x4000 0x4000 |
850 | #define MCX_CAP_DEVICe_MAX_QP_CNT_MASK0x03ff 0x03ff |
851 | uint16_t pkey_table_size; |
852 | |
853 | uint8_t flags3; |
854 | #define MCX_CAP_DEVICE_VPORT_GROUP_MANAGER0x80 \ |
855 | 0x80 |
856 | #define MCX_CAP_DEVICE_VHCA_GROUP_MANAGER0x40 \ |
857 | 0x40 |
858 | #define MCX_CAP_DEVICE_IB_VIRTUAL0x20 0x20 |
859 | #define MCX_CAP_DEVICE_ETH_VIRTUAL0x10 0x10 |
860 | #define MCX_CAP_DEVICE_ETS0x04 0x04 |
861 | #define MCX_CAP_DEVICE_NIC_FLOW_TABLE0x02 0x02 |
862 | #define MCX_CAP_DEVICE_ESWITCH_FLOW_TABLE0x01 \ |
863 | 0x01 |
864 | uint8_t local_ca_ack_delay; /* 5 bits */ |
865 | #define MCX_CAP_DEVICE_LOCAL_CA_ACK_DELAY0x1f \ |
866 | 0x1f |
867 | #define MCX_CAP_DEVICE_MCAM_REG0x40 0x40 |
868 | uint8_t port_type; |
869 | #define MCX_CAP_DEVICE_PORT_MODULE_EVENT0x80 \ |
870 | 0x80 |
871 | #define MCX_CAP_DEVICE_PORT_TYPE0x03 0x03 |
872 | #define MCX_CAP_DEVICE_PORT_TYPE_ETH0x01 0x01 |
873 | uint8_t num_ports; |
874 | |
875 | uint8_t snapshot_log_max_msg; |
876 | #define MCX_CAP_DEVICE_SNAPSHOT0x80 0x80 |
877 | #define MCX_CAP_DEVICE_LOG_MAX_MSG0x1f 0x1f |
878 | uint8_t max_tc; /* 4 bits */ |
879 | #define MCX_CAP_DEVICE_MAX_TC0x0f 0x0f |
880 | uint8_t flags4; |
881 | #define MCX_CAP_DEVICE_TEMP_WARN_EVENT0x80 0x80 |
882 | #define MCX_CAP_DEVICE_DCBX0x40 0x40 |
883 | #define MCX_CAP_DEVICE_ROL_S0x02 0x02 |
884 | #define MCX_CAP_DEVICE_ROL_G0x01 0x01 |
885 | uint8_t wol; |
886 | #define MCX_CAP_DEVICE_WOL_S0x40 0x40 |
887 | #define MCX_CAP_DEVICE_WOL_G0x20 0x20 |
888 | #define MCX_CAP_DEVICE_WOL_A0x10 0x10 |
889 | #define MCX_CAP_DEVICE_WOL_B0x08 0x08 |
890 | #define MCX_CAP_DEVICE_WOL_M0x04 0x04 |
891 | #define MCX_CAP_DEVICE_WOL_U0x02 0x02 |
892 | #define MCX_CAP_DEVICE_WOL_P0x01 0x01 |
893 | |
894 | uint16_t stat_rate_support; |
895 | uint8_t __reserved____reserved895[1]; |
896 | uint8_t cqe_version; /* 4 bits */ |
897 | #define MCX_CAP_DEVICE_CQE_VERSION0x0f 0x0f |
898 | |
899 | uint32_t flags5; |
900 | #define MCX_CAP_DEVICE_COMPACT_ADDRESS_VECTOR0x80000000 \ |
901 | 0x80000000 |
902 | #define MCX_CAP_DEVICE_STRIDING_RQ0x40000000 0x40000000 |
903 | #define MCX_CAP_DEVICE_IPOIP_ENHANCED_OFFLOADS0x10000000 \ |
904 | 0x10000000 |
905 | #define MCX_CAP_DEVICE_IPOIP_IPOIP_OFFLOADS0x08000000 \ |
906 | 0x08000000 |
907 | #define MCX_CAP_DEVICE_DC_CONNECT_CP0x00040000 0x00040000 |
908 | #define MCX_CAP_DEVICE_DC_CNAK_DRACE0x00020000 0x00020000 |
909 | #define MCX_CAP_DEVICE_DRAIN_SIGERR0x00010000 0x00010000 |
910 | #define MCX_CAP_DEVICE_DRAIN_SIGERR0x00010000 0x00010000 |
911 | #define MCX_CAP_DEVICE_CMDIF_CHECKSUM0x0000c000 0x0000c000 |
912 | #define MCX_CAP_DEVICE_SIGERR_QCE0x00002000 0x00002000 |
913 | #define MCX_CAP_DEVICE_WQ_SIGNATURE0x00000800 0x00000800 |
914 | #define MCX_CAP_DEVICE_SCTR_DATA_CQE0x00000400 0x00000400 |
915 | #define MCX_CAP_DEVICE_SHO0x00000100 0x00000100 |
916 | #define MCX_CAP_DEVICE_TPH0x00000080 0x00000080 |
917 | #define MCX_CAP_DEVICE_RF0x00000040 0x00000040 |
918 | #define MCX_CAP_DEVICE_DCT0x00000020 0x00000020 |
919 | #define MCX_CAP_DEVICE_QOS0x00000010 0x00000010 |
920 | #define MCX_CAP_DEVICe_ETH_NET_OFFLOADS0x00000008 0x00000008 |
921 | #define MCX_CAP_DEVICE_ROCE0x00000004 0x00000004 |
922 | #define MCX_CAP_DEVICE_ATOMIC0x00000002 0x00000002 |
923 | |
924 | uint32_t flags6; |
925 | #define MCX_CAP_DEVICE_CQ_OI0x80000000 0x80000000 |
926 | #define MCX_CAP_DEVICE_CQ_RESIZE0x40000000 0x40000000 |
927 | #define MCX_CAP_DEVICE_CQ_MODERATION0x20000000 0x20000000 |
928 | #define MCX_CAP_DEVICE_CQ_PERIOD_MODE_MODIFY0x10000000 \ |
929 | 0x10000000 |
930 | #define MCX_CAP_DEVICE_CQ_INVALIDATE0x08000000 0x08000000 |
931 | #define MCX_CAP_DEVICE_RESERVED_AT_2550x04000000 0x04000000 |
932 | #define MCX_CAP_DEVICE_CQ_EQ_REMAP0x02000000 0x02000000 |
933 | #define MCX_CAP_DEVICE_PG0x01000000 0x01000000 |
934 | #define MCX_CAP_DEVICE_BLOCK_LB_MC0x00800000 0x00800000 |
935 | #define MCX_CAP_DEVICE_EXPONENTIAL_BACKOFF0x00400000 \ |
936 | 0x00400000 |
937 | #define MCX_CAP_DEVICE_SCQE_BREAK_MODERATION0x00200000 \ |
938 | 0x00200000 |
939 | #define MCX_CAP_DEVICE_CQ_PERIOD_START_FROM_CQE0x00100000 \ |
940 | 0x00100000 |
941 | #define MCX_CAP_DEVICE_CD0x00080000 0x00080000 |
942 | #define MCX_CAP_DEVICE_ATM0x00040000 0x00040000 |
943 | #define MCX_CAP_DEVICE_APM0x00020000 0x00020000 |
944 | #define MCX_CAP_DEVICE_IMAICL0x00010000 0x00010000 |
945 | #define MCX_CAP_DEVICE_QKV0x00000200 0x00000200 |
946 | #define MCX_CAP_DEVICE_PKV0x00000100 0x00000100 |
947 | #define MCX_CAP_DEVICE_SET_DETH_SQPN0x00000080 0x00000080 |
948 | #define MCX_CAP_DEVICE_XRC0x00000008 0x00000008 |
949 | #define MCX_CAP_DEVICE_UD0x00000004 0x00000004 |
950 | #define MCX_CAP_DEVICE_UC0x00000002 0x00000002 |
951 | #define MCX_CAP_DEVICE_RC0x00000001 0x00000001 |
952 | |
953 | uint8_t uar_flags; |
954 | #define MCX_CAP_DEVICE_UAR_4K0x80 0x80 |
955 | uint8_t uar_sz; /* 6 bits */ |
956 | #define MCX_CAP_DEVICE_UAR_SZ0x3f 0x3f |
957 | uint8_t __reserved____reserved957[1]; |
958 | uint8_t log_pg_sz; |
959 | |
960 | uint8_t flags7; |
961 | #define MCX_CAP_DEVICE_BF0x80 0x80 |
962 | #define MCX_CAP_DEVICE_DRIVER_VERSION0x40 0x40 |
963 | #define MCX_CAP_DEVICE_PAD_TX_ETH_PACKET0x20 \ |
964 | 0x20 |
965 | uint8_t log_bf_reg_size; /* 5 bits */ |
966 | #define MCX_CAP_DEVICE_LOG_BF_REG_SIZE0x1f 0x1f |
967 | uint8_t __reserved____reserved967[2]; |
968 | |
969 | uint16_t num_of_diagnostic_counters; |
970 | uint16_t max_wqe_sz_sq; |
971 | |
972 | uint8_t __reserved____reserved972[2]; |
973 | uint16_t max_wqe_sz_rq; |
974 | |
975 | uint8_t __reserved____reserved975[2]; |
976 | uint16_t max_wqe_sz_sq_dc; |
977 | |
978 | uint32_t max_qp_mcg; /* 25 bits */ |
979 | #define MCX_CAP_DEVICE_MAX_QP_MCG0x1ffffff 0x1ffffff |
980 | |
981 | uint8_t __reserved____reserved981[3]; |
982 | uint8_t log_max_mcq; |
983 | |
984 | uint8_t log_max_transport_domain; /* 5 bits */ |
985 | #define MCX_CAP_DEVICE_LOG_MAX_TRANSORT_DOMAIN0x1f \ |
986 | 0x1f |
987 | uint8_t log_max_pd; /* 5 bits */ |
988 | #define MCX_CAP_DEVICE_LOG_MAX_PD0x1f 0x1f |
989 | uint8_t __reserved____reserved989[1]; |
990 | uint8_t log_max_xrcd; /* 5 bits */ |
991 | #define MCX_CAP_DEVICE_LOG_MAX_XRCD0x1f 0x1f |
992 | |
993 | uint8_t __reserved____reserved993[2]; |
994 | uint16_t max_flow_counter; |
995 | |
996 | uint8_t log_max_rq; /* 5 bits */ |
997 | #define MCX_CAP_DEVICE_LOG_MAX_RQ0x1f 0x1f |
998 | uint8_t log_max_sq; /* 5 bits */ |
999 | #define MCX_CAP_DEVICE_LOG_MAX_SQ0x1f 0x1f |
1000 | uint8_t log_max_tir; /* 5 bits */ |
1001 | #define MCX_CAP_DEVICE_LOG_MAX_TIR0x1f 0x1f |
1002 | uint8_t log_max_tis; /* 5 bits */ |
1003 | #define MCX_CAP_DEVICE_LOG_MAX_TIS0x1f 0x1f |
1004 | |
1005 | uint8_t flags8; |
1006 | #define MCX_CAP_DEVICE_BASIC_CYCLIC_RCV_WQE0x80 \ |
1007 | 0x80 |
1008 | #define MCX_CAP_DEVICE_LOG_MAX_RMP0x1f 0x1f |
1009 | uint8_t log_max_rqt; /* 5 bits */ |
1010 | #define MCX_CAP_DEVICE_LOG_MAX_RQT0x1f 0x1f |
1011 | uint8_t log_max_rqt_size; /* 5 bits */ |
1012 | #define MCX_CAP_DEVICE_LOG_MAX_RQT_SIZE0x1f 0x1f |
1013 | uint8_t log_max_tis_per_sq; /* 5 bits */ |
1014 | #define MCX_CAP_DEVICE_LOG_MAX_TIS_PER_SQ0x1f \ |
1015 | 0x1f |
1016 | |
1017 | uint8_t flags9; |
1018 | #define MXC_CAP_DEVICE_EXT_STRIDE_NUM_RANGES0x80 \ |
1019 | 0x80 |
1020 | #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_RQ0x1f \ |
1021 | 0x1f |
1022 | uint8_t log_min_stride_sz_rq; /* 5 bits */ |
1023 | #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_RQ0x1f \ |
1024 | 0x1f |
1025 | uint8_t log_max_stride_sz_sq; /* 5 bits */ |
1026 | #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_SQ0x1f \ |
1027 | 0x1f |
1028 | uint8_t log_min_stride_sz_sq; /* 5 bits */ |
1029 | #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_SQ0x1f \ |
1030 | 0x1f |
1031 | |
1032 | uint8_t log_max_hairpin_queues; |
1033 | #define MXC_CAP_DEVICE_HAIRPIN0x80 0x80 |
1034 | #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_QUEUES0x1f \ |
1035 | 0x1f |
1036 | uint8_t log_min_hairpin_queues; |
1037 | #define MXC_CAP_DEVICE_LOG_MIN_HAIRPIN_QUEUES0x1f \ |
1038 | 0x1f |
1039 | uint8_t log_max_hairpin_num_packets; |
1040 | #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_NUM_PACKETS0x1f \ |
1041 | 0x1f |
1042 | uint8_t log_max_mq_sz; |
1043 | #define MXC_CAP_DEVICE_LOG_MAX_WQ_SZ0x1f \ |
1044 | 0x1f |
1045 | |
1046 | uint8_t log_min_hairpin_wq_data_sz; |
1047 | #define MXC_CAP_DEVICE_NIC_VPORT_CHANGE_EVENT0x80 \ |
1048 | 0x80 |
1049 | #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_UC0x40 \ |
1050 | 0x40 |
1051 | #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_MC0x20 \ |
1052 | 0x20 |
1053 | #define MCX_CAP_DEVICE_LOG_MIN_HAIRPIN_WQ_DATA_SZ0x1f \ |
1054 | 0x1f |
1055 | uint8_t log_max_vlan_list; |
1056 | #define MXC_CAP_DEVICE_SYSTEM_IMAGE_GUID_MODIFIABLE0x80 \ |
1057 | 0x80 |
1058 | #define MXC_CAP_DEVICE_LOG_MAX_VLAN_LIST0x1f \ |
1059 | 0x1f |
1060 | uint8_t log_max_current_mc_list; |
1061 | #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_MC_LIST0x1f \ |
1062 | 0x1f |
1063 | uint8_t log_max_current_uc_list; |
1064 | #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_UC_LIST0x1f \ |
1065 | 0x1f |
1066 | |
1067 | uint8_t __reserved____reserved1067[4]; |
1068 | |
1069 | uint32_t create_qp_start_hint; /* 24 bits */ |
1070 | |
1071 | uint8_t log_max_uctx; /* 5 bits */ |
1072 | #define MXC_CAP_DEVICE_LOG_MAX_UCTX0x1f 0x1f |
1073 | uint8_t log_max_umem; /* 5 bits */ |
1074 | #define MXC_CAP_DEVICE_LOG_MAX_UMEM0x1f 0x1f |
1075 | uint16_t max_num_eqs; |
1076 | |
1077 | uint8_t log_max_l2_table; /* 5 bits */ |
1078 | #define MXC_CAP_DEVICE_LOG_MAX_L2_TABLE0x1f 0x1f |
1079 | uint8_t __reserved____reserved1079[1]; |
1080 | uint16_t log_uar_page_sz; |
1081 | |
1082 | uint8_t __reserved____reserved1082[8]; |
1083 | |
1084 | uint32_t device_frequency_mhz; |
1085 | uint32_t device_frequency_khz; |
1086 | } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8))); |
1087 | |
1088 | CTASSERT(offsetof(struct mcx_cap_device, max_indirection) == 0x20)extern char _ctassert[(__builtin_offsetof(struct mcx_cap_device , max_indirection) == 0x20) ? 1 : -1 ] __attribute__((__unused__ )); |
1089 | CTASSERT(offsetof(struct mcx_cap_device, flags1) == 0x2c)extern char _ctassert[(__builtin_offsetof(struct mcx_cap_device , flags1) == 0x2c) ? 1 : -1 ] __attribute__((__unused__)); |
1090 | CTASSERT(offsetof(struct mcx_cap_device, flags2) == 0x30)extern char _ctassert[(__builtin_offsetof(struct mcx_cap_device , flags2) == 0x30) ? 1 : -1 ] __attribute__((__unused__)); |
1091 | CTASSERT(offsetof(struct mcx_cap_device, snapshot_log_max_msg) == 0x38)extern char _ctassert[(__builtin_offsetof(struct mcx_cap_device , snapshot_log_max_msg) == 0x38) ? 1 : -1 ] __attribute__((__unused__ )); |
1092 | CTASSERT(offsetof(struct mcx_cap_device, flags5) == 0x40)extern char _ctassert[(__builtin_offsetof(struct mcx_cap_device , flags5) == 0x40) ? 1 : -1 ] __attribute__((__unused__)); |
1093 | CTASSERT(offsetof(struct mcx_cap_device, flags7) == 0x4c)extern char _ctassert[(__builtin_offsetof(struct mcx_cap_device , flags7) == 0x4c) ? 1 : -1 ] __attribute__((__unused__)); |
1094 | CTASSERT(offsetof(struct mcx_cap_device, device_frequency_mhz) == 0x98)extern char _ctassert[(__builtin_offsetof(struct mcx_cap_device , device_frequency_mhz) == 0x98) ? 1 : -1 ] __attribute__((__unused__ )); |
1095 | CTASSERT(offsetof(struct mcx_cap_device, device_frequency_khz) == 0x9c)extern char _ctassert[(__builtin_offsetof(struct mcx_cap_device , device_frequency_khz) == 0x9c) ? 1 : -1 ] __attribute__((__unused__ )); |
1096 | CTASSERT(sizeof(struct mcx_cap_device) <= MCX_CMDQ_MAILBOX_DATASIZE)extern char _ctassert[(sizeof(struct mcx_cap_device) <= 512 ) ? 1 : -1 ] __attribute__((__unused__)); |
1097 | |
1098 | struct mcx_cmd_set_driver_version_in { |
1099 | uint16_t cmd_opcode; |
1100 | uint8_t cmd_reserved0[4]; |
1101 | uint16_t cmd_op_mod; |
1102 | uint8_t cmd_reserved1[8]; |
1103 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1104 | |
1105 | struct mcx_cmd_set_driver_version_out { |
1106 | uint8_t cmd_status; |
1107 | uint8_t cmd_reserved0[3]; |
1108 | uint32_t cmd_syndrome; |
1109 | uint8_t cmd_reserved1[8]; |
1110 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1111 | |
1112 | struct mcx_cmd_set_driver_version { |
1113 | uint8_t cmd_driver_version[64]; |
1114 | } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8))); |
1115 | |
1116 | struct mcx_cmd_modify_nic_vport_context_in { |
1117 | uint16_t cmd_opcode; |
1118 | uint8_t cmd_reserved0[4]; |
1119 | uint16_t cmd_op_mod; |
1120 | uint8_t cmd_reserved1[4]; |
1121 | uint32_t cmd_field_select; |
1122 | #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_ADDR0x04 0x04 |
1123 | #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC0x10 0x10 |
1124 | #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU0x40 0x40 |
1125 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1126 | |
1127 | struct mcx_cmd_modify_nic_vport_context_out { |
1128 | uint8_t cmd_status; |
1129 | uint8_t cmd_reserved0[3]; |
1130 | uint32_t cmd_syndrome; |
1131 | uint8_t cmd_reserved1[8]; |
1132 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1133 | |
1134 | struct mcx_cmd_query_nic_vport_context_in { |
1135 | uint16_t cmd_opcode; |
1136 | uint8_t cmd_reserved0[4]; |
1137 | uint16_t cmd_op_mod; |
1138 | uint8_t cmd_reserved1[4]; |
1139 | uint8_t cmd_allowed_list_type; |
1140 | uint8_t cmd_reserved2[3]; |
1141 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1142 | |
1143 | struct mcx_cmd_query_nic_vport_context_out { |
1144 | uint8_t cmd_status; |
1145 | uint8_t cmd_reserved0[3]; |
1146 | uint32_t cmd_syndrome; |
1147 | uint8_t cmd_reserved1[8]; |
1148 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1149 | |
1150 | struct mcx_nic_vport_ctx { |
1151 | uint32_t vp_min_wqe_inline_mode; |
1152 | uint8_t vp_reserved0[32]; |
1153 | uint32_t vp_mtu; |
1154 | uint8_t vp_reserved1[200]; |
1155 | uint16_t vp_flags; |
1156 | #define MCX_NIC_VPORT_CTX_LIST_UC_MAC(0) (0) |
1157 | #define MCX_NIC_VPORT_CTX_LIST_MC_MAC(1 << 24) (1 << 24) |
1158 | #define MCX_NIC_VPORT_CTX_LIST_VLAN(2 << 24) (2 << 24) |
1159 | #define MCX_NIC_VPORT_CTX_PROMISC_ALL(1 << 13) (1 << 13) |
1160 | #define MCX_NIC_VPORT_CTX_PROMISC_MCAST(1 << 14) (1 << 14) |
1161 | #define MCX_NIC_VPORT_CTX_PROMISC_UCAST(1 << 15) (1 << 15) |
1162 | uint16_t vp_allowed_list_size; |
1163 | uint64_t vp_perm_addr; |
1164 | uint8_t vp_reserved2[4]; |
1165 | /* allowed list follows */ |
1166 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1167 | |
1168 | struct mcx_counter { |
1169 | uint64_t packets; |
1170 | uint64_t octets; |
1171 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1172 | |
1173 | struct mcx_nic_vport_counters { |
1174 | struct mcx_counter rx_err; |
1175 | struct mcx_counter tx_err; |
1176 | uint8_t reserved0[64]; /* 0x30 */ |
1177 | struct mcx_counter rx_bcast; |
1178 | struct mcx_counter tx_bcast; |
1179 | struct mcx_counter rx_ucast; |
1180 | struct mcx_counter tx_ucast; |
1181 | struct mcx_counter rx_mcast; |
1182 | struct mcx_counter tx_mcast; |
1183 | uint8_t reserved1[0x210 - 0xd0]; |
1184 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1185 | |
1186 | struct mcx_cmd_query_vport_counters_in { |
1187 | uint16_t cmd_opcode; |
1188 | uint8_t cmd_reserved0[4]; |
1189 | uint16_t cmd_op_mod; |
1190 | uint8_t cmd_reserved1[8]; |
1191 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1192 | |
1193 | struct mcx_cmd_query_vport_counters_mb_in { |
1194 | uint8_t cmd_reserved0[8]; |
1195 | uint8_t cmd_clear; |
1196 | uint8_t cmd_reserved1[7]; |
1197 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1198 | |
1199 | struct mcx_cmd_query_vport_counters_out { |
1200 | uint8_t cmd_status; |
1201 | uint8_t cmd_reserved0[3]; |
1202 | uint32_t cmd_syndrome; |
1203 | uint8_t cmd_reserved1[8]; |
1204 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1205 | |
1206 | struct mcx_cmd_query_flow_counter_in { |
1207 | uint16_t cmd_opcode; |
1208 | uint8_t cmd_reserved0[4]; |
1209 | uint16_t cmd_op_mod; |
1210 | uint8_t cmd_reserved1[8]; |
1211 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1212 | |
1213 | struct mcx_cmd_query_flow_counter_mb_in { |
1214 | uint8_t cmd_reserved0[8]; |
1215 | uint8_t cmd_clear; |
1216 | uint8_t cmd_reserved1[5]; |
1217 | uint16_t cmd_flow_counter_id; |
1218 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1219 | |
1220 | struct mcx_cmd_query_flow_counter_out { |
1221 | uint8_t cmd_status; |
1222 | uint8_t cmd_reserved0[3]; |
1223 | uint32_t cmd_syndrome; |
1224 | uint8_t cmd_reserved1[8]; |
1225 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1226 | |
1227 | struct mcx_cmd_alloc_uar_in { |
1228 | uint16_t cmd_opcode; |
1229 | uint8_t cmd_reserved0[4]; |
1230 | uint16_t cmd_op_mod; |
1231 | uint8_t cmd_reserved1[8]; |
1232 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1233 | |
1234 | struct mcx_cmd_alloc_uar_out { |
1235 | uint8_t cmd_status; |
1236 | uint8_t cmd_reserved0[3]; |
1237 | uint32_t cmd_syndrome; |
1238 | uint32_t cmd_uar; |
1239 | uint8_t cmd_reserved1[4]; |
1240 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1241 | |
1242 | struct mcx_cmd_query_special_ctx_in { |
1243 | uint16_t cmd_opcode; |
1244 | uint8_t cmd_reserved0[4]; |
1245 | uint16_t cmd_op_mod; |
1246 | uint8_t cmd_reserved1[8]; |
1247 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1248 | |
1249 | struct mcx_cmd_query_special_ctx_out { |
1250 | uint8_t cmd_status; |
1251 | uint8_t cmd_reserved0[3]; |
1252 | uint32_t cmd_syndrome; |
1253 | uint8_t cmd_reserved1[4]; |
1254 | uint32_t cmd_resd_lkey; |
1255 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1256 | |
1257 | struct mcx_eq_ctx { |
1258 | uint32_t eq_status; |
1259 | #define MCX_EQ_CTX_STATE_SHIFT8 8 |
1260 | #define MCX_EQ_CTX_STATE_MASK(0xf << 8) (0xf << MCX_EQ_CTX_STATE_SHIFT8) |
1261 | #define MCX_EQ_CTX_STATE_ARMED0x9 0x9 |
1262 | #define MCX_EQ_CTX_STATE_FIRED0xa 0xa |
1263 | #define MCX_EQ_CTX_OI_SHIFT17 17 |
1264 | #define MCX_EQ_CTX_OI(1 << 17) (1 << MCX_EQ_CTX_OI_SHIFT17) |
1265 | #define MCX_EQ_CTX_EC_SHIFT18 18 |
1266 | #define MCX_EQ_CTX_EC(1 << 18) (1 << MCX_EQ_CTX_EC_SHIFT18) |
1267 | #define MCX_EQ_CTX_STATUS_SHIFT28 28 |
1268 | #define MCX_EQ_CTX_STATUS_MASK(0xf << 28) (0xf << MCX_EQ_CTX_STATUS_SHIFT28) |
1269 | #define MCX_EQ_CTX_STATUS_OK0x0 0x0 |
1270 | #define MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE0xa 0xa |
1271 | uint32_t eq_reserved1; |
1272 | uint32_t eq_page_offset; |
1273 | #define MCX_EQ_CTX_PAGE_OFFSET_SHIFT5 5 |
1274 | uint32_t eq_uar_size; |
1275 | #define MCX_EQ_CTX_UAR_PAGE_MASK0xffffff 0xffffff |
1276 | #define MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT24 24 |
1277 | uint32_t eq_reserved2; |
1278 | uint8_t eq_reserved3[3]; |
1279 | uint8_t eq_intr; |
1280 | uint32_t eq_log_page_size; |
1281 | #define MCX_EQ_CTX_LOG_PAGE_SIZE_SHIFT24 24 |
1282 | uint32_t eq_reserved4[3]; |
1283 | uint32_t eq_consumer_counter; |
1284 | uint32_t eq_producer_counter; |
1285 | #define MCX_EQ_CTX_COUNTER_MASK0xffffff 0xffffff |
1286 | uint32_t eq_reserved5[4]; |
1287 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1288 | |
1289 | CTASSERT(sizeof(struct mcx_eq_ctx) == 64)extern char _ctassert[(sizeof(struct mcx_eq_ctx) == 64) ? 1 : -1 ] __attribute__((__unused__)); |
1290 | |
1291 | struct mcx_cmd_create_eq_in { |
1292 | uint16_t cmd_opcode; |
1293 | uint8_t cmd_reserved0[4]; |
1294 | uint16_t cmd_op_mod; |
1295 | uint8_t cmd_reserved1[8]; |
1296 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1297 | |
1298 | struct mcx_cmd_create_eq_mb_in { |
1299 | struct mcx_eq_ctx cmd_eq_ctx; |
1300 | uint8_t cmd_reserved0[8]; |
1301 | uint64_t cmd_event_bitmask; |
1302 | #define MCX_EVENT_TYPE_COMPLETION0x00 0x00 |
1303 | #define MCX_EVENT_TYPE_CQ_ERROR0x04 0x04 |
1304 | #define MCX_EVENT_TYPE_INTERNAL_ERROR0x08 0x08 |
1305 | #define MCX_EVENT_TYPE_PORT_CHANGE0x09 0x09 |
1306 | #define MCX_EVENT_TYPE_CMD_COMPLETION0x0a 0x0a |
1307 | #define MCX_EVENT_TYPE_PAGE_REQUEST0x0b 0x0b |
1308 | #define MCX_EVENT_TYPE_LAST_WQE0x13 0x13 |
1309 | uint8_t cmd_reserved1[176]; |
1310 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1311 | |
1312 | struct mcx_cmd_create_eq_out { |
1313 | uint8_t cmd_status; |
1314 | uint8_t cmd_reserved0[3]; |
1315 | uint32_t cmd_syndrome; |
1316 | uint32_t cmd_eqn; |
1317 | uint8_t cmd_reserved1[4]; |
1318 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1319 | |
1320 | struct mcx_cmd_query_eq_in { |
1321 | uint16_t cmd_opcode; |
1322 | uint8_t cmd_reserved0[4]; |
1323 | uint16_t cmd_op_mod; |
1324 | uint32_t cmd_eqn; |
1325 | uint8_t cmd_reserved1[4]; |
1326 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1327 | |
1328 | struct mcx_cmd_query_eq_out { |
1329 | uint8_t cmd_status; |
1330 | uint8_t cmd_reserved0[3]; |
1331 | uint32_t cmd_syndrome; |
1332 | uint8_t cmd_reserved1[8]; |
1333 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1334 | |
1335 | struct mcx_eq_entry { |
1336 | uint8_t eq_reserved1; |
1337 | uint8_t eq_event_type; |
1338 | uint8_t eq_reserved2; |
1339 | uint8_t eq_event_sub_type; |
1340 | |
1341 | uint8_t eq_reserved3[28]; |
1342 | uint32_t eq_event_data[7]; |
1343 | uint8_t eq_reserved4[2]; |
1344 | uint8_t eq_signature; |
1345 | uint8_t eq_owner; |
1346 | #define MCX_EQ_ENTRY_OWNER_INIT1 1 |
1347 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1348 | |
1349 | CTASSERT(sizeof(struct mcx_eq_entry) == 64)extern char _ctassert[(sizeof(struct mcx_eq_entry) == 64) ? 1 : -1 ] __attribute__((__unused__)); |
1350 | |
1351 | struct mcx_cmd_alloc_pd_in { |
1352 | uint16_t cmd_opcode; |
1353 | uint8_t cmd_reserved0[4]; |
1354 | uint16_t cmd_op_mod; |
1355 | uint8_t cmd_reserved1[8]; |
1356 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1357 | |
1358 | struct mcx_cmd_alloc_pd_out { |
1359 | uint8_t cmd_status; |
1360 | uint8_t cmd_reserved0[3]; |
1361 | uint32_t cmd_syndrome; |
1362 | uint32_t cmd_pd; |
1363 | uint8_t cmd_reserved1[4]; |
1364 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1365 | |
1366 | struct mcx_cmd_alloc_td_in { |
1367 | uint16_t cmd_opcode; |
1368 | uint8_t cmd_reserved0[4]; |
1369 | uint16_t cmd_op_mod; |
1370 | uint8_t cmd_reserved1[8]; |
1371 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1372 | |
1373 | struct mcx_cmd_alloc_td_out { |
1374 | uint8_t cmd_status; |
1375 | uint8_t cmd_reserved0[3]; |
1376 | uint32_t cmd_syndrome; |
1377 | uint32_t cmd_tdomain; |
1378 | uint8_t cmd_reserved1[4]; |
1379 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1380 | |
1381 | struct mcx_cmd_create_tir_in { |
1382 | uint16_t cmd_opcode; |
1383 | uint8_t cmd_reserved0[4]; |
1384 | uint16_t cmd_op_mod; |
1385 | uint8_t cmd_reserved1[8]; |
1386 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1387 | |
1388 | struct mcx_cmd_create_tir_mb_in { |
1389 | uint8_t cmd_reserved0[20]; |
1390 | uint32_t cmd_disp_type; |
1391 | #define MCX_TIR_CTX_DISP_TYPE_DIRECT0 0 |
1392 | #define MCX_TIR_CTX_DISP_TYPE_INDIRECT1 1 |
1393 | #define MCX_TIR_CTX_DISP_TYPE_SHIFT28 28 |
1394 | uint8_t cmd_reserved1[8]; |
1395 | uint32_t cmd_lro; |
1396 | uint8_t cmd_reserved2[8]; |
1397 | uint32_t cmd_inline_rqn; |
1398 | uint32_t cmd_indir_table; |
1399 | uint32_t cmd_tdomain; |
1400 | #define MCX_TIR_CTX_HASH_TOEPLITZ2 2 |
1401 | #define MCX_TIR_CTX_HASH_SHIFT28 28 |
1402 | uint8_t cmd_rx_hash_key[40]; |
1403 | uint32_t cmd_rx_hash_sel_outer; |
1404 | #define MCX_TIR_CTX_HASH_SEL_SRC_IP(1 << 0) (1 << 0) |
1405 | #define MCX_TIR_CTX_HASH_SEL_DST_IP(1 << 1) (1 << 1) |
1406 | #define MCX_TIR_CTX_HASH_SEL_SPORT(1 << 2) (1 << 2) |
1407 | #define MCX_TIR_CTX_HASH_SEL_DPORT(1 << 3) (1 << 3) |
1408 | #define MCX_TIR_CTX_HASH_SEL_IPV4(0 << 31) (0 << 31) |
1409 | #define MCX_TIR_CTX_HASH_SEL_IPV6(1U << 31) (1U << 31) |
1410 | #define MCX_TIR_CTX_HASH_SEL_TCP(0 << 30) (0 << 30) |
1411 | #define MCX_TIR_CTX_HASH_SEL_UDP(1 << 30) (1 << 30) |
1412 | uint32_t cmd_rx_hash_sel_inner; |
1413 | uint8_t cmd_reserved3[152]; |
1414 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1415 | |
1416 | struct mcx_cmd_create_tir_out { |
1417 | uint8_t cmd_status; |
1418 | uint8_t cmd_reserved0[3]; |
1419 | uint32_t cmd_syndrome; |
1420 | uint32_t cmd_tirn; |
1421 | uint8_t cmd_reserved1[4]; |
1422 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1423 | |
1424 | struct mcx_cmd_destroy_tir_in { |
1425 | uint16_t cmd_opcode; |
1426 | uint8_t cmd_reserved0[4]; |
1427 | uint16_t cmd_op_mod; |
1428 | uint32_t cmd_tirn; |
1429 | uint8_t cmd_reserved1[4]; |
1430 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1431 | |
1432 | struct mcx_cmd_destroy_tir_out { |
1433 | uint8_t cmd_status; |
1434 | uint8_t cmd_reserved0[3]; |
1435 | uint32_t cmd_syndrome; |
1436 | uint8_t cmd_reserved1[8]; |
1437 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1438 | |
1439 | struct mcx_cmd_create_tis_in { |
1440 | uint16_t cmd_opcode; |
1441 | uint8_t cmd_reserved0[4]; |
1442 | uint16_t cmd_op_mod; |
1443 | uint8_t cmd_reserved1[8]; |
1444 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1445 | |
1446 | struct mcx_cmd_create_tis_mb_in { |
1447 | uint8_t cmd_reserved[16]; |
1448 | uint32_t cmd_prio; |
1449 | uint8_t cmd_reserved1[32]; |
1450 | uint32_t cmd_tdomain; |
1451 | uint8_t cmd_reserved2[120]; |
1452 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1453 | |
1454 | struct mcx_cmd_create_tis_out { |
1455 | uint8_t cmd_status; |
1456 | uint8_t cmd_reserved0[3]; |
1457 | uint32_t cmd_syndrome; |
1458 | uint32_t cmd_tisn; |
1459 | uint8_t cmd_reserved1[4]; |
1460 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1461 | |
1462 | struct mcx_cmd_destroy_tis_in { |
1463 | uint16_t cmd_opcode; |
1464 | uint8_t cmd_reserved0[4]; |
1465 | uint16_t cmd_op_mod; |
1466 | uint32_t cmd_tisn; |
1467 | uint8_t cmd_reserved1[4]; |
1468 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1469 | |
1470 | struct mcx_cmd_destroy_tis_out { |
1471 | uint8_t cmd_status; |
1472 | uint8_t cmd_reserved0[3]; |
1473 | uint32_t cmd_syndrome; |
1474 | uint8_t cmd_reserved1[8]; |
1475 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1476 | |
1477 | struct mcx_cmd_create_rqt_in { |
1478 | uint16_t cmd_opcode; |
1479 | uint8_t cmd_reserved0[4]; |
1480 | uint16_t cmd_op_mod; |
1481 | uint8_t cmd_reserved1[8]; |
1482 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1483 | |
1484 | struct mcx_rqt_ctx { |
1485 | uint8_t cmd_reserved0[20]; |
1486 | uint16_t cmd_reserved1; |
1487 | uint16_t cmd_rqt_max_size; |
1488 | uint16_t cmd_reserved2; |
1489 | uint16_t cmd_rqt_actual_size; |
1490 | uint8_t cmd_reserved3[212]; |
1491 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1492 | |
1493 | struct mcx_cmd_create_rqt_mb_in { |
1494 | uint8_t cmd_reserved0[16]; |
1495 | struct mcx_rqt_ctx cmd_rqt; |
1496 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1497 | |
1498 | struct mcx_cmd_create_rqt_out { |
1499 | uint8_t cmd_status; |
1500 | uint8_t cmd_reserved0[3]; |
1501 | uint32_t cmd_syndrome; |
1502 | uint32_t cmd_rqtn; |
1503 | uint8_t cmd_reserved1[4]; |
1504 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1505 | |
1506 | struct mcx_cmd_destroy_rqt_in { |
1507 | uint16_t cmd_opcode; |
1508 | uint8_t cmd_reserved0[4]; |
1509 | uint16_t cmd_op_mod; |
1510 | uint32_t cmd_rqtn; |
1511 | uint8_t cmd_reserved1[4]; |
1512 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1513 | |
1514 | struct mcx_cmd_destroy_rqt_out { |
1515 | uint8_t cmd_status; |
1516 | uint8_t cmd_reserved0[3]; |
1517 | uint32_t cmd_syndrome; |
1518 | uint8_t cmd_reserved1[8]; |
1519 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1520 | |
1521 | struct mcx_cq_ctx { |
1522 | uint32_t cq_status; |
1523 | #define MCX_CQ_CTX_STATUS_SHIFT28 28 |
1524 | #define MCX_CQ_CTX_STATUS_MASK(0xf << 28) (0xf << MCX_CQ_CTX_STATUS_SHIFT28) |
1525 | #define MCX_CQ_CTX_STATUS_OK0x0 0x0 |
1526 | #define MCX_CQ_CTX_STATUS_OVERFLOW0x9 0x9 |
1527 | #define MCX_CQ_CTX_STATUS_WRITE_FAIL0xa 0xa |
1528 | #define MCX_CQ_CTX_STATE_SHIFT8 8 |
1529 | #define MCX_CQ_CTX_STATE_MASK(0xf << 8) (0xf << MCX_CQ_CTX_STATE_SHIFT8) |
1530 | #define MCX_CQ_CTX_STATE_SOLICITED0x6 0x6 |
1531 | #define MCX_CQ_CTX_STATE_ARMED0x9 0x9 |
1532 | #define MCX_CQ_CTX_STATE_FIRED0xa 0xa |
1533 | uint32_t cq_reserved1; |
1534 | uint32_t cq_page_offset; |
1535 | uint32_t cq_uar_size; |
1536 | #define MCX_CQ_CTX_UAR_PAGE_MASK0xffffff 0xffffff |
1537 | #define MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT24 24 |
1538 | uint32_t cq_period_max_count; |
1539 | #define MCX_CQ_CTX_PERIOD_SHIFT16 16 |
1540 | uint32_t cq_eqn; |
1541 | uint32_t cq_log_page_size; |
1542 | #define MCX_CQ_CTX_LOG_PAGE_SIZE_SHIFT24 24 |
1543 | uint32_t cq_reserved2; |
1544 | uint32_t cq_last_notified; |
1545 | uint32_t cq_last_solicit; |
1546 | uint32_t cq_consumer_counter; |
1547 | uint32_t cq_producer_counter; |
1548 | uint8_t cq_reserved3[8]; |
1549 | uint64_t cq_doorbell; |
1550 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1551 | |
1552 | CTASSERT(sizeof(struct mcx_cq_ctx) == 64)extern char _ctassert[(sizeof(struct mcx_cq_ctx) == 64) ? 1 : -1 ] __attribute__((__unused__)); |
1553 | |
1554 | struct mcx_cmd_create_cq_in { |
1555 | uint16_t cmd_opcode; |
1556 | uint8_t cmd_reserved0[4]; |
1557 | uint16_t cmd_op_mod; |
1558 | uint8_t cmd_reserved1[8]; |
1559 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1560 | |
1561 | struct mcx_cmd_create_cq_mb_in { |
1562 | struct mcx_cq_ctx cmd_cq_ctx; |
1563 | uint8_t cmd_reserved1[192]; |
1564 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1565 | |
1566 | struct mcx_cmd_create_cq_out { |
1567 | uint8_t cmd_status; |
1568 | uint8_t cmd_reserved0[3]; |
1569 | uint32_t cmd_syndrome; |
1570 | uint32_t cmd_cqn; |
1571 | uint8_t cmd_reserved1[4]; |
1572 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1573 | |
1574 | struct mcx_cmd_destroy_cq_in { |
1575 | uint16_t cmd_opcode; |
1576 | uint8_t cmd_reserved0[4]; |
1577 | uint16_t cmd_op_mod; |
1578 | uint32_t cmd_cqn; |
1579 | uint8_t cmd_reserved1[4]; |
1580 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1581 | |
1582 | struct mcx_cmd_destroy_cq_out { |
1583 | uint8_t cmd_status; |
1584 | uint8_t cmd_reserved0[3]; |
1585 | uint32_t cmd_syndrome; |
1586 | uint8_t cmd_reserved1[8]; |
1587 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1588 | |
1589 | struct mcx_cmd_query_cq_in { |
1590 | uint16_t cmd_opcode; |
1591 | uint8_t cmd_reserved0[4]; |
1592 | uint16_t cmd_op_mod; |
1593 | uint32_t cmd_cqn; |
1594 | uint8_t cmd_reserved1[4]; |
1595 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1596 | |
1597 | struct mcx_cmd_query_cq_out { |
1598 | uint8_t cmd_status; |
1599 | uint8_t cmd_reserved0[3]; |
1600 | uint32_t cmd_syndrome; |
1601 | uint8_t cmd_reserved1[8]; |
1602 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1603 | |
1604 | struct mcx_cq_entry { |
1605 | uint32_t __reserved____reserved1605; |
1606 | uint32_t cq_lro; |
1607 | uint32_t cq_lro_ack_seq_num; |
1608 | uint32_t cq_rx_hash; |
1609 | uint8_t cq_rx_hash_type; |
1610 | uint8_t cq_ml_path; |
1611 | uint16_t __reserved____reserved1611; |
1612 | uint32_t cq_checksum; |
1613 | uint32_t __reserved____reserved1613; |
1614 | uint32_t cq_flags; |
1615 | #define MCX_CQ_ENTRY_FLAGS_L4_OK(1 << 26) (1 << 26) |
1616 | #define MCX_CQ_ENTRY_FLAGS_L3_OK(1 << 25) (1 << 25) |
1617 | #define MCX_CQ_ENTRY_FLAGS_L2_OK(1 << 24) (1 << 24) |
1618 | #define MCX_CQ_ENTRY_FLAGS_CV(1 << 16) (1 << 16) |
1619 | #define MCX_CQ_ENTRY_FLAGS_VLAN_MASK(0xffff) (0xffff) |
1620 | |
1621 | uint32_t cq_lro_srqn; |
1622 | uint32_t __reserved____reserved1622[2]; |
1623 | uint32_t cq_byte_cnt; |
1624 | uint64_t cq_timestamp; |
1625 | uint8_t cq_rx_drops; |
1626 | uint8_t cq_flow_tag[3]; |
1627 | uint16_t cq_wqe_count; |
1628 | uint8_t cq_signature; |
1629 | uint8_t cq_opcode_owner; |
1630 | #define MCX_CQ_ENTRY_FLAG_OWNER(1 << 0) (1 << 0) |
1631 | #define MCX_CQ_ENTRY_FLAG_SE(1 << 1) (1 << 1) |
1632 | #define MCX_CQ_ENTRY_FORMAT_SHIFT2 2 |
1633 | #define MCX_CQ_ENTRY_OPCODE_SHIFT4 4 |
1634 | |
1635 | #define MCX_CQ_ENTRY_FORMAT_NO_INLINE0 0 |
1636 | #define MCX_CQ_ENTRY_FORMAT_INLINE_321 1 |
1637 | #define MCX_CQ_ENTRY_FORMAT_INLINE_642 2 |
1638 | #define MCX_CQ_ENTRY_FORMAT_COMPRESSED3 3 |
1639 | |
1640 | #define MCX_CQ_ENTRY_OPCODE_REQ0 0 |
1641 | #define MCX_CQ_ENTRY_OPCODE_SEND2 2 |
1642 | #define MCX_CQ_ENTRY_OPCODE_REQ_ERR13 13 |
1643 | #define MCX_CQ_ENTRY_OPCODE_SEND_ERR14 14 |
1644 | #define MCX_CQ_ENTRY_OPCODE_INVALID15 15 |
1645 | |
1646 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1647 | |
1648 | CTASSERT(sizeof(struct mcx_cq_entry) == 64)extern char _ctassert[(sizeof(struct mcx_cq_entry) == 64) ? 1 : -1 ] __attribute__((__unused__)); |
1649 | |
1650 | struct mcx_cq_doorbell { |
1651 | uint32_t db_update_ci; |
1652 | uint32_t db_arm_ci; |
1653 | #define MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT28 28 |
1654 | #define MCX_CQ_DOORBELL_ARM_CMD(1 << 24) (1 << 24) |
1655 | #define MCX_CQ_DOORBELL_ARM_CI_MASK(0xffffff) (0xffffff) |
1656 | } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8))); |
1657 | |
1658 | struct mcx_wq_ctx { |
1659 | uint8_t wq_type; |
1660 | #define MCX_WQ_CTX_TYPE_CYCLIC(1 << 4) (1 << 4) |
1661 | #define MCX_WQ_CTX_TYPE_SIGNATURE(1 << 3) (1 << 3) |
1662 | uint8_t wq_reserved0[5]; |
1663 | uint16_t wq_lwm; |
1664 | uint32_t wq_pd; |
1665 | uint32_t wq_uar_page; |
1666 | uint64_t wq_doorbell; |
1667 | uint32_t wq_hw_counter; |
1668 | uint32_t wq_sw_counter; |
1669 | uint16_t wq_log_stride; |
1670 | uint8_t wq_log_page_sz; |
1671 | uint8_t wq_log_size; |
1672 | uint8_t wq_reserved1[156]; |
1673 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1674 | |
1675 | CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0)extern char _ctassert[(sizeof(struct mcx_wq_ctx) == 0xC0) ? 1 : -1 ] __attribute__((__unused__)); |
1676 | |
1677 | struct mcx_sq_ctx { |
1678 | uint32_t sq_flags; |
1679 | #define MCX_SQ_CTX_RLKEY(1U << 31) (1U << 31) |
1680 | #define MCX_SQ_CTX_FRE_SHIFT(1 << 29) (1 << 29) |
1681 | #define MCX_SQ_CTX_FLUSH_IN_ERROR(1 << 28) (1 << 28) |
1682 | #define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT24 24 |
1683 | #define MCX_SQ_CTX_STATE_SHIFT20 20 |
1684 | #define MCX_SQ_CTX_STATE_MASK(0xf << 20) (0xf << 20) |
1685 | #define MCX_SQ_CTX_STATE_RST0 0 |
1686 | #define MCX_SQ_CTX_STATE_RDY1 1 |
1687 | #define MCX_SQ_CTX_STATE_ERR3 3 |
1688 | uint32_t sq_user_index; |
1689 | uint32_t sq_cqn; |
1690 | uint32_t sq_reserved1[5]; |
1691 | uint32_t sq_tis_lst_sz; |
1692 | #define MCX_SQ_CTX_TIS_LST_SZ_SHIFT16 16 |
1693 | uint32_t sq_reserved2[2]; |
1694 | uint32_t sq_tis_num; |
1695 | struct mcx_wq_ctx sq_wq; |
1696 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1697 | |
1698 | struct mcx_sq_entry_seg { |
1699 | uint32_t sqs_byte_count; |
1700 | uint32_t sqs_lkey; |
1701 | uint64_t sqs_addr; |
1702 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1703 | |
1704 | struct mcx_sq_entry { |
1705 | /* control segment */ |
1706 | uint32_t sqe_opcode_index; |
1707 | #define MCX_SQE_WQE_INDEX_SHIFT8 8 |
1708 | #define MCX_SQE_WQE_OPCODE_NOP0x00 0x00 |
1709 | #define MCX_SQE_WQE_OPCODE_SEND0x0a 0x0a |
1710 | uint32_t sqe_ds_sq_num; |
1711 | #define MCX_SQE_SQ_NUM_SHIFT8 8 |
1712 | uint32_t sqe_signature; |
1713 | #define MCX_SQE_SIGNATURE_SHIFT24 24 |
1714 | #define MCX_SQE_SOLICITED_EVENT0x02 0x02 |
1715 | #define MCX_SQE_CE_CQE_ON_ERR0x00 0x00 |
1716 | #define MCX_SQE_CE_CQE_FIRST_ERR0x04 0x04 |
1717 | #define MCX_SQE_CE_CQE_ALWAYS0x08 0x08 |
1718 | #define MCX_SQE_CE_CQE_SOLICIT0x0C 0x0C |
1719 | #define MCX_SQE_FM_NO_FENCE0x00 0x00 |
1720 | #define MCX_SQE_FM_SMALL_FENCE0x40 0x40 |
1721 | uint32_t sqe_mkey; |
1722 | |
1723 | /* ethernet segment */ |
1724 | uint32_t sqe_reserved1; |
1725 | uint32_t sqe_mss_csum; |
1726 | #define MCX_SQE_L4_CSUM(1U << 31) (1U << 31) |
1727 | #define MCX_SQE_L3_CSUM(1 << 30) (1 << 30) |
1728 | uint32_t sqe_reserved2; |
1729 | uint16_t sqe_inline_header_size; |
1730 | uint16_t sqe_inline_headers[9]; |
1731 | |
1732 | /* data segment */ |
1733 | struct mcx_sq_entry_seg sqe_segs[1]; |
1734 | } __packed__attribute__((__packed__)) __aligned(64)__attribute__((__aligned__(64))); |
1735 | |
1736 | CTASSERT(sizeof(struct mcx_sq_entry) == 64)extern char _ctassert[(sizeof(struct mcx_sq_entry) == 64) ? 1 : -1 ] __attribute__((__unused__)); |
1737 | |
1738 | struct mcx_cmd_create_sq_in { |
1739 | uint16_t cmd_opcode; |
1740 | uint8_t cmd_reserved0[4]; |
1741 | uint16_t cmd_op_mod; |
1742 | uint8_t cmd_reserved1[8]; |
1743 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1744 | |
1745 | struct mcx_cmd_create_sq_out { |
1746 | uint8_t cmd_status; |
1747 | uint8_t cmd_reserved0[3]; |
1748 | uint32_t cmd_syndrome; |
1749 | uint32_t cmd_sqn; |
1750 | uint8_t cmd_reserved1[4]; |
1751 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1752 | |
1753 | struct mcx_cmd_modify_sq_in { |
1754 | uint16_t cmd_opcode; |
1755 | uint8_t cmd_reserved0[4]; |
1756 | uint16_t cmd_op_mod; |
1757 | uint32_t cmd_sq_state; |
1758 | uint8_t cmd_reserved1[4]; |
1759 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1760 | |
1761 | struct mcx_cmd_modify_sq_mb_in { |
1762 | uint32_t cmd_modify_hi; |
1763 | uint32_t cmd_modify_lo; |
1764 | uint8_t cmd_reserved0[8]; |
1765 | struct mcx_sq_ctx cmd_sq_ctx; |
1766 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1767 | |
1768 | struct mcx_cmd_modify_sq_out { |
1769 | uint8_t cmd_status; |
1770 | uint8_t cmd_reserved0[3]; |
1771 | uint32_t cmd_syndrome; |
1772 | uint8_t cmd_reserved1[8]; |
1773 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1774 | |
1775 | struct mcx_cmd_destroy_sq_in { |
1776 | uint16_t cmd_opcode; |
1777 | uint8_t cmd_reserved0[4]; |
1778 | uint16_t cmd_op_mod; |
1779 | uint32_t cmd_sqn; |
1780 | uint8_t cmd_reserved1[4]; |
1781 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1782 | |
1783 | struct mcx_cmd_destroy_sq_out { |
1784 | uint8_t cmd_status; |
1785 | uint8_t cmd_reserved0[3]; |
1786 | uint32_t cmd_syndrome; |
1787 | uint8_t cmd_reserved1[8]; |
1788 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1789 | |
1790 | |
1791 | struct mcx_rq_ctx { |
1792 | uint32_t rq_flags; |
1793 | #define MCX_RQ_CTX_RLKEY(1U << 31) (1U << 31) |
1794 | #define MCX_RQ_CTX_VLAN_STRIP_DIS(1 << 28) (1 << 28) |
1795 | #define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT24 24 |
1796 | #define MCX_RQ_CTX_STATE_SHIFT20 20 |
1797 | #define MCX_RQ_CTX_STATE_MASK(0xf << 20) (0xf << 20) |
1798 | #define MCX_RQ_CTX_STATE_RST0 0 |
1799 | #define MCX_RQ_CTX_STATE_RDY1 1 |
1800 | #define MCX_RQ_CTX_STATE_ERR3 3 |
1801 | #define MCX_RQ_CTX_FLUSH_IN_ERROR(1 << 18) (1 << 18) |
1802 | uint32_t rq_user_index; |
1803 | uint32_t rq_cqn; |
1804 | uint32_t rq_reserved1; |
1805 | uint32_t rq_rmpn; |
1806 | uint32_t rq_reserved2[7]; |
1807 | struct mcx_wq_ctx rq_wq; |
1808 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1809 | |
1810 | struct mcx_rq_entry { |
1811 | uint32_t rqe_byte_count; |
1812 | uint32_t rqe_lkey; |
1813 | uint64_t rqe_addr; |
1814 | } __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16))); |
1815 | |
1816 | struct mcx_cmd_create_rq_in { |
1817 | uint16_t cmd_opcode; |
1818 | uint8_t cmd_reserved0[4]; |
1819 | uint16_t cmd_op_mod; |
1820 | uint8_t cmd_reserved1[8]; |
1821 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1822 | |
1823 | struct mcx_cmd_create_rq_out { |
1824 | uint8_t cmd_status; |
1825 | uint8_t cmd_reserved0[3]; |
1826 | uint32_t cmd_syndrome; |
1827 | uint32_t cmd_rqn; |
1828 | uint8_t cmd_reserved1[4]; |
1829 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1830 | |
1831 | struct mcx_cmd_modify_rq_in { |
1832 | uint16_t cmd_opcode; |
1833 | uint8_t cmd_reserved0[4]; |
1834 | uint16_t cmd_op_mod; |
1835 | uint32_t cmd_rq_state; |
1836 | uint8_t cmd_reserved1[4]; |
1837 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1838 | |
1839 | struct mcx_cmd_modify_rq_mb_in { |
1840 | uint32_t cmd_modify_hi; |
1841 | uint32_t cmd_modify_lo; |
1842 | uint8_t cmd_reserved0[8]; |
1843 | struct mcx_rq_ctx cmd_rq_ctx; |
1844 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1845 | |
1846 | struct mcx_cmd_modify_rq_out { |
1847 | uint8_t cmd_status; |
1848 | uint8_t cmd_reserved0[3]; |
1849 | uint32_t cmd_syndrome; |
1850 | uint8_t cmd_reserved1[8]; |
1851 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1852 | |
1853 | struct mcx_cmd_destroy_rq_in { |
1854 | uint16_t cmd_opcode; |
1855 | uint8_t cmd_reserved0[4]; |
1856 | uint16_t cmd_op_mod; |
1857 | uint32_t cmd_rqn; |
1858 | uint8_t cmd_reserved1[4]; |
1859 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1860 | |
1861 | struct mcx_cmd_destroy_rq_out { |
1862 | uint8_t cmd_status; |
1863 | uint8_t cmd_reserved0[3]; |
1864 | uint32_t cmd_syndrome; |
1865 | uint8_t cmd_reserved1[8]; |
1866 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1867 | |
1868 | struct mcx_cmd_create_flow_table_in { |
1869 | uint16_t cmd_opcode; |
1870 | uint8_t cmd_reserved0[4]; |
1871 | uint16_t cmd_op_mod; |
1872 | uint8_t cmd_reserved1[8]; |
1873 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1874 | |
1875 | struct mcx_flow_table_ctx { |
1876 | uint8_t ft_miss_action; |
1877 | uint8_t ft_level; |
1878 | uint8_t ft_reserved0; |
1879 | uint8_t ft_log_size; |
1880 | uint32_t ft_table_miss_id; |
1881 | uint8_t ft_reserved1[28]; |
1882 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1883 | |
1884 | struct mcx_cmd_create_flow_table_mb_in { |
1885 | uint8_t cmd_table_type; |
1886 | uint8_t cmd_reserved0[7]; |
1887 | struct mcx_flow_table_ctx cmd_ctx; |
1888 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1889 | |
1890 | struct mcx_cmd_create_flow_table_out { |
1891 | uint8_t cmd_status; |
1892 | uint8_t cmd_reserved0[3]; |
1893 | uint32_t cmd_syndrome; |
1894 | uint32_t cmd_table_id; |
1895 | uint8_t cmd_reserved1[4]; |
1896 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1897 | |
1898 | struct mcx_cmd_destroy_flow_table_in { |
1899 | uint16_t cmd_opcode; |
1900 | uint8_t cmd_reserved0[4]; |
1901 | uint16_t cmd_op_mod; |
1902 | uint8_t cmd_reserved1[8]; |
1903 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1904 | |
1905 | struct mcx_cmd_destroy_flow_table_mb_in { |
1906 | uint8_t cmd_table_type; |
1907 | uint8_t cmd_reserved0[3]; |
1908 | uint32_t cmd_table_id; |
1909 | uint8_t cmd_reserved1[40]; |
1910 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1911 | |
1912 | struct mcx_cmd_destroy_flow_table_out { |
1913 | uint8_t cmd_status; |
1914 | uint8_t cmd_reserved0[3]; |
1915 | uint32_t cmd_syndrome; |
1916 | uint8_t cmd_reserved1[8]; |
1917 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1918 | |
1919 | struct mcx_cmd_set_flow_table_root_in { |
1920 | uint16_t cmd_opcode; |
1921 | uint8_t cmd_reserved0[4]; |
1922 | uint16_t cmd_op_mod; |
1923 | uint8_t cmd_reserved1[8]; |
1924 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1925 | |
1926 | struct mcx_cmd_set_flow_table_root_mb_in { |
1927 | uint8_t cmd_table_type; |
1928 | uint8_t cmd_reserved0[3]; |
1929 | uint32_t cmd_table_id; |
1930 | uint8_t cmd_reserved1[56]; |
1931 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1932 | |
1933 | struct mcx_cmd_set_flow_table_root_out { |
1934 | uint8_t cmd_status; |
1935 | uint8_t cmd_reserved0[3]; |
1936 | uint32_t cmd_syndrome; |
1937 | uint8_t cmd_reserved1[8]; |
1938 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1939 | |
1940 | struct mcx_flow_match { |
1941 | /* outer headers */ |
1942 | uint8_t mc_src_mac[6]; |
1943 | uint16_t mc_ethertype; |
1944 | uint8_t mc_dest_mac[6]; |
1945 | uint16_t mc_first_vlan; |
1946 | uint8_t mc_ip_proto; |
1947 | uint8_t mc_ip_dscp_ecn; |
1948 | uint8_t mc_vlan_flags; |
1949 | #define MCX_FLOW_MATCH_IP_FRAG(1 << 5) (1 << 5) |
1950 | uint8_t mc_tcp_flags; |
1951 | uint16_t mc_tcp_sport; |
1952 | uint16_t mc_tcp_dport; |
1953 | uint32_t mc_reserved0; |
1954 | uint16_t mc_udp_sport; |
1955 | uint16_t mc_udp_dport; |
1956 | uint8_t mc_src_ip[16]; |
1957 | uint8_t mc_dest_ip[16]; |
1958 | |
1959 | /* misc parameters */ |
1960 | uint8_t mc_reserved1[8]; |
1961 | uint16_t mc_second_vlan; |
1962 | uint8_t mc_reserved2[2]; |
1963 | uint8_t mc_second_vlan_flags; |
1964 | uint8_t mc_reserved3[15]; |
1965 | uint32_t mc_outer_ipv6_flow_label; |
1966 | uint8_t mc_reserved4[32]; |
1967 | |
1968 | uint8_t mc_reserved[384]; |
1969 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1970 | |
1971 | CTASSERT(sizeof(struct mcx_flow_match) == 512)extern char _ctassert[(sizeof(struct mcx_flow_match) == 512) ? 1 : -1 ] __attribute__((__unused__)); |
1972 | |
1973 | struct mcx_cmd_create_flow_group_in { |
1974 | uint16_t cmd_opcode; |
1975 | uint8_t cmd_reserved0[4]; |
1976 | uint16_t cmd_op_mod; |
1977 | uint8_t cmd_reserved1[8]; |
1978 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1979 | |
1980 | struct mcx_cmd_create_flow_group_mb_in { |
1981 | uint8_t cmd_table_type; |
1982 | uint8_t cmd_reserved0[3]; |
1983 | uint32_t cmd_table_id; |
1984 | uint8_t cmd_reserved1[4]; |
1985 | uint32_t cmd_start_flow_index; |
1986 | uint8_t cmd_reserved2[4]; |
1987 | uint32_t cmd_end_flow_index; |
1988 | uint8_t cmd_reserved3[23]; |
1989 | uint8_t cmd_match_criteria_enable; |
1990 | #define MCX_CREATE_FLOW_GROUP_CRIT_OUTER(1 << 0) (1 << 0) |
1991 | #define MCX_CREATE_FLOW_GROUP_CRIT_MISC(1 << 1) (1 << 1) |
1992 | #define MCX_CREATE_FLOW_GROUP_CRIT_INNER(1 << 2) (1 << 2) |
1993 | struct mcx_flow_match cmd_match_criteria; |
1994 | uint8_t cmd_reserved4[448]; |
1995 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1996 | |
1997 | struct mcx_cmd_create_flow_group_out { |
1998 | uint8_t cmd_status; |
1999 | uint8_t cmd_reserved0[3]; |
2000 | uint32_t cmd_syndrome; |
2001 | uint32_t cmd_group_id; |
2002 | uint8_t cmd_reserved1[4]; |
2003 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2004 | |
2005 | struct mcx_flow_ctx { |
2006 | uint8_t fc_reserved0[4]; |
2007 | uint32_t fc_group_id; |
2008 | uint32_t fc_flow_tag; |
2009 | uint32_t fc_action; |
2010 | #define MCX_FLOW_CONTEXT_ACTION_ALLOW(1 << 0) (1 << 0) |
2011 | #define MCX_FLOW_CONTEXT_ACTION_DROP(1 << 1) (1 << 1) |
2012 | #define MCX_FLOW_CONTEXT_ACTION_FORWARD(1 << 2) (1 << 2) |
2013 | #define MCX_FLOW_CONTEXT_ACTION_COUNT(1 << 3) (1 << 3) |
2014 | uint32_t fc_dest_list_size; |
2015 | uint32_t fc_counter_list_size; |
2016 | uint8_t fc_reserved1[40]; |
2017 | struct mcx_flow_match fc_match_value; |
2018 | uint8_t fc_reserved2[192]; |
2019 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2020 | |
2021 | #define MCX_FLOW_CONTEXT_DEST_TYPE_TABLE(1 << 24) (1 << 24) |
2022 | #define MCX_FLOW_CONTEXT_DEST_TYPE_TIR(2 << 24) (2 << 24) |
2023 | |
2024 | struct mcx_cmd_destroy_flow_group_in { |
2025 | uint16_t cmd_opcode; |
2026 | uint8_t cmd_reserved0[4]; |
2027 | uint16_t cmd_op_mod; |
2028 | uint8_t cmd_reserved1[8]; |
2029 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2030 | |
2031 | struct mcx_cmd_destroy_flow_group_mb_in { |
2032 | uint8_t cmd_table_type; |
2033 | uint8_t cmd_reserved0[3]; |
2034 | uint32_t cmd_table_id; |
2035 | uint32_t cmd_group_id; |
2036 | uint8_t cmd_reserved1[36]; |
2037 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2038 | |
2039 | struct mcx_cmd_destroy_flow_group_out { |
2040 | uint8_t cmd_status; |
2041 | uint8_t cmd_reserved0[3]; |
2042 | uint32_t cmd_syndrome; |
2043 | uint8_t cmd_reserved1[8]; |
2044 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2045 | |
2046 | struct mcx_cmd_set_flow_table_entry_in { |
2047 | uint16_t cmd_opcode; |
2048 | uint8_t cmd_reserved0[4]; |
2049 | uint16_t cmd_op_mod; |
2050 | uint8_t cmd_reserved1[8]; |
2051 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2052 | |
2053 | struct mcx_cmd_set_flow_table_entry_mb_in { |
2054 | uint8_t cmd_table_type; |
2055 | uint8_t cmd_reserved0[3]; |
2056 | uint32_t cmd_table_id; |
2057 | uint32_t cmd_modify_enable_mask; |
2058 | uint8_t cmd_reserved1[4]; |
2059 | uint32_t cmd_flow_index; |
2060 | uint8_t cmd_reserved2[28]; |
2061 | struct mcx_flow_ctx cmd_flow_ctx; |
2062 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2063 | |
2064 | struct mcx_cmd_set_flow_table_entry_out { |
2065 | uint8_t cmd_status; |
2066 | uint8_t cmd_reserved0[3]; |
2067 | uint32_t cmd_syndrome; |
2068 | uint8_t cmd_reserved1[8]; |
2069 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2070 | |
2071 | struct mcx_cmd_query_flow_table_entry_in { |
2072 | uint16_t cmd_opcode; |
2073 | uint8_t cmd_reserved0[4]; |
2074 | uint16_t cmd_op_mod; |
2075 | uint8_t cmd_reserved1[8]; |
2076 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2077 | |
2078 | struct mcx_cmd_query_flow_table_entry_mb_in { |
2079 | uint8_t cmd_table_type; |
2080 | uint8_t cmd_reserved0[3]; |
2081 | uint32_t cmd_table_id; |
2082 | uint8_t cmd_reserved1[8]; |
2083 | uint32_t cmd_flow_index; |
2084 | uint8_t cmd_reserved2[28]; |
2085 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2086 | |
2087 | struct mcx_cmd_query_flow_table_entry_out { |
2088 | uint8_t cmd_status; |
2089 | uint8_t cmd_reserved0[3]; |
2090 | uint32_t cmd_syndrome; |
2091 | uint8_t cmd_reserved1[8]; |
2092 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2093 | |
2094 | struct mcx_cmd_query_flow_table_entry_mb_out { |
2095 | uint8_t cmd_reserved0[48]; |
2096 | struct mcx_flow_ctx cmd_flow_ctx; |
2097 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2098 | |
2099 | struct mcx_cmd_delete_flow_table_entry_in { |
2100 | uint16_t cmd_opcode; |
2101 | uint8_t cmd_reserved0[4]; |
2102 | uint16_t cmd_op_mod; |
2103 | uint8_t cmd_reserved1[8]; |
2104 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2105 | |
2106 | struct mcx_cmd_delete_flow_table_entry_mb_in { |
2107 | uint8_t cmd_table_type; |
2108 | uint8_t cmd_reserved0[3]; |
2109 | uint32_t cmd_table_id; |
2110 | uint8_t cmd_reserved1[8]; |
2111 | uint32_t cmd_flow_index; |
2112 | uint8_t cmd_reserved2[28]; |
2113 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2114 | |
2115 | struct mcx_cmd_delete_flow_table_entry_out { |
2116 | uint8_t cmd_status; |
2117 | uint8_t cmd_reserved0[3]; |
2118 | uint32_t cmd_syndrome; |
2119 | uint8_t cmd_reserved1[8]; |
2120 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2121 | |
2122 | struct mcx_cmd_query_flow_group_in { |
2123 | uint16_t cmd_opcode; |
2124 | uint8_t cmd_reserved0[4]; |
2125 | uint16_t cmd_op_mod; |
2126 | uint8_t cmd_reserved1[8]; |
2127 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2128 | |
2129 | struct mcx_cmd_query_flow_group_mb_in { |
2130 | uint8_t cmd_table_type; |
2131 | uint8_t cmd_reserved0[3]; |
2132 | uint32_t cmd_table_id; |
2133 | uint32_t cmd_group_id; |
2134 | uint8_t cmd_reserved1[36]; |
2135 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2136 | |
2137 | struct mcx_cmd_query_flow_group_out { |
2138 | uint8_t cmd_status; |
2139 | uint8_t cmd_reserved0[3]; |
2140 | uint32_t cmd_syndrome; |
2141 | uint8_t cmd_reserved1[8]; |
2142 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2143 | |
2144 | struct mcx_cmd_query_flow_group_mb_out { |
2145 | uint8_t cmd_reserved0[12]; |
2146 | uint32_t cmd_start_flow_index; |
2147 | uint8_t cmd_reserved1[4]; |
2148 | uint32_t cmd_end_flow_index; |
2149 | uint8_t cmd_reserved2[20]; |
2150 | uint32_t cmd_match_criteria_enable; |
2151 | uint8_t cmd_match_criteria[512]; |
2152 | uint8_t cmd_reserved4[448]; |
2153 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2154 | |
2155 | struct mcx_cmd_query_flow_table_in { |
2156 | uint16_t cmd_opcode; |
2157 | uint8_t cmd_reserved0[4]; |
2158 | uint16_t cmd_op_mod; |
2159 | uint8_t cmd_reserved1[8]; |
2160 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2161 | |
2162 | struct mcx_cmd_query_flow_table_mb_in { |
2163 | uint8_t cmd_table_type; |
2164 | uint8_t cmd_reserved0[3]; |
2165 | uint32_t cmd_table_id; |
2166 | uint8_t cmd_reserved1[40]; |
2167 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2168 | |
2169 | struct mcx_cmd_query_flow_table_out { |
2170 | uint8_t cmd_status; |
2171 | uint8_t cmd_reserved0[3]; |
2172 | uint32_t cmd_syndrome; |
2173 | uint8_t cmd_reserved1[8]; |
2174 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2175 | |
2176 | struct mcx_cmd_query_flow_table_mb_out { |
2177 | uint8_t cmd_reserved0[4]; |
2178 | struct mcx_flow_table_ctx cmd_ctx; |
2179 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2180 | |
2181 | struct mcx_cmd_alloc_flow_counter_in { |
2182 | uint16_t cmd_opcode; |
2183 | uint8_t cmd_reserved0[4]; |
2184 | uint16_t cmd_op_mod; |
2185 | uint8_t cmd_reserved1[8]; |
2186 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2187 | |
2188 | struct mcx_cmd_query_rq_in { |
2189 | uint16_t cmd_opcode; |
2190 | uint8_t cmd_reserved0[4]; |
2191 | uint16_t cmd_op_mod; |
2192 | uint32_t cmd_rqn; |
2193 | uint8_t cmd_reserved1[4]; |
2194 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2195 | |
2196 | struct mcx_cmd_query_rq_out { |
2197 | uint8_t cmd_status; |
2198 | uint8_t cmd_reserved0[3]; |
2199 | uint32_t cmd_syndrome; |
2200 | uint8_t cmd_reserved1[8]; |
2201 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2202 | |
2203 | struct mcx_cmd_query_rq_mb_out { |
2204 | uint8_t cmd_reserved0[16]; |
2205 | struct mcx_rq_ctx cmd_ctx; |
2206 | }; |
2207 | |
2208 | struct mcx_cmd_query_sq_in { |
2209 | uint16_t cmd_opcode; |
2210 | uint8_t cmd_reserved0[4]; |
2211 | uint16_t cmd_op_mod; |
2212 | uint32_t cmd_sqn; |
2213 | uint8_t cmd_reserved1[4]; |
2214 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2215 | |
2216 | struct mcx_cmd_query_sq_out { |
2217 | uint8_t cmd_status; |
2218 | uint8_t cmd_reserved0[3]; |
2219 | uint32_t cmd_syndrome; |
2220 | uint8_t cmd_reserved1[8]; |
2221 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2222 | |
2223 | struct mcx_cmd_query_sq_mb_out { |
2224 | uint8_t cmd_reserved0[16]; |
2225 | struct mcx_sq_ctx cmd_ctx; |
2226 | }; |
2227 | |
2228 | struct mcx_cmd_alloc_flow_counter_out { |
2229 | uint8_t cmd_status; |
2230 | uint8_t cmd_reserved0[3]; |
2231 | uint32_t cmd_syndrome; |
2232 | uint8_t cmd_reserved1[2]; |
2233 | uint16_t cmd_flow_counter_id; |
2234 | uint8_t cmd_reserved2[4]; |
2235 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2236 | |
2237 | struct mcx_wq_doorbell { |
2238 | uint32_t db_recv_counter; |
2239 | uint32_t db_send_counter; |
2240 | } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8))); |
2241 | |
2242 | struct mcx_dmamem { |
2243 | bus_dmamap_t mxm_map; |
2244 | bus_dma_segment_t mxm_seg; |
2245 | int mxm_nsegs; |
2246 | size_t mxm_size; |
2247 | caddr_t mxm_kva; |
2248 | }; |
2249 | #define MCX_DMA_MAP(_mxm)((_mxm)->mxm_map) ((_mxm)->mxm_map) |
2250 | #define MCX_DMA_DVA(_mxm)((_mxm)->mxm_map->dm_segs[0].ds_addr) ((_mxm)->mxm_map->dm_segs[0].ds_addr) |
2251 | #define MCX_DMA_KVA(_mxm)((void *)(_mxm)->mxm_kva) ((void *)(_mxm)->mxm_kva) |
2252 | #define MCX_DMA_OFF(_mxm, _off)((void *)((_mxm)->mxm_kva + (_off))) ((void *)((_mxm)->mxm_kva + (_off))) |
2253 | #define MCX_DMA_LEN(_mxm)((_mxm)->mxm_size) ((_mxm)->mxm_size) |
2254 | |
2255 | struct mcx_hwmem { |
2256 | bus_dmamap_t mhm_map; |
2257 | bus_dma_segment_t *mhm_segs; |
2258 | unsigned int mhm_seg_count; |
2259 | unsigned int mhm_npages; |
2260 | }; |
2261 | |
2262 | struct mcx_slot { |
2263 | bus_dmamap_t ms_map; |
2264 | struct mbuf *ms_m; |
2265 | }; |
2266 | |
2267 | struct mcx_eq { |
2268 | int eq_n; |
2269 | uint32_t eq_cons; |
2270 | struct mcx_dmamem eq_mem; |
2271 | }; |
2272 | |
2273 | struct mcx_cq { |
2274 | int cq_n; |
2275 | struct mcx_dmamem cq_mem; |
2276 | bus_addr_t cq_doorbell; |
2277 | uint32_t cq_cons; |
2278 | uint32_t cq_count; |
2279 | }; |
2280 | |
2281 | struct mcx_calibration { |
2282 | uint64_t c_timestamp; /* previous mcx chip time */ |
2283 | uint64_t c_uptime; /* previous kernel nanouptime */ |
2284 | uint64_t c_tbase; /* mcx chip time */ |
2285 | uint64_t c_ubase; /* kernel nanouptime */ |
2286 | uint64_t c_ratio; |
2287 | }; |
2288 | |
2289 | #define MCX_CALIBRATE_FIRST2 2 |
2290 | #define MCX_CALIBRATE_NORMAL32 32 |
2291 | |
2292 | struct mcx_rx { |
2293 | struct mcx_softc *rx_softc; |
2294 | struct ifiqueue *rx_ifiq; |
2295 | |
2296 | int rx_rqn; |
2297 | struct mcx_dmamem rx_rq_mem; |
2298 | struct mcx_slot *rx_slots; |
2299 | bus_addr_t rx_doorbell; |
2300 | |
2301 | uint32_t rx_prod; |
2302 | struct timeout rx_refill; |
2303 | struct if_rxring rx_rxr; |
2304 | } __aligned(64)__attribute__((__aligned__(64))); |
2305 | |
2306 | struct mcx_tx { |
2307 | struct mcx_softc *tx_softc; |
2308 | struct ifqueue *tx_ifq; |
2309 | |
2310 | int tx_uar; |
2311 | int tx_sqn; |
2312 | struct mcx_dmamem tx_sq_mem; |
2313 | struct mcx_slot *tx_slots; |
2314 | bus_addr_t tx_doorbell; |
2315 | int tx_bf_offset; |
2316 | |
2317 | uint32_t tx_cons; |
2318 | uint32_t tx_prod; |
2319 | } __aligned(64)__attribute__((__aligned__(64))); |
2320 | |
2321 | struct mcx_queues { |
2322 | char q_name[16]; |
2323 | void *q_ihc; |
2324 | struct mcx_softc *q_sc; |
2325 | int q_uar; |
2326 | int q_index; |
2327 | struct mcx_rx q_rx; |
2328 | struct mcx_tx q_tx; |
2329 | struct mcx_cq q_cq; |
2330 | struct mcx_eq q_eq; |
2331 | #if NKSTAT1 > 0 |
2332 | struct kstat *q_kstat; |
2333 | #endif |
2334 | }; |
2335 | |
2336 | struct mcx_flow_group { |
2337 | int g_id; |
2338 | int g_table; |
2339 | int g_start; |
2340 | int g_size; |
2341 | }; |
2342 | |
2343 | #define MCX_FLOW_GROUP_PROMISC0 0 |
2344 | #define MCX_FLOW_GROUP_ALLMULTI1 1 |
2345 | #define MCX_FLOW_GROUP_MAC2 2 |
2346 | #define MCX_FLOW_GROUP_RSS_L43 3 |
2347 | #define MCX_FLOW_GROUP_RSS_L34 4 |
2348 | #define MCX_FLOW_GROUP_RSS_NONE5 5 |
2349 | #define MCX_NUM_FLOW_GROUPS6 6 |
2350 | |
2351 | #define MCX_HASH_SEL_L3(1 << 0) | (1 << 1) MCX_TIR_CTX_HASH_SEL_SRC_IP(1 << 0) | \ |
2352 | MCX_TIR_CTX_HASH_SEL_DST_IP(1 << 1) |
2353 | #define MCX_HASH_SEL_L4(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) MCX_HASH_SEL_L3(1 << 0) | (1 << 1) | MCX_TIR_CTX_HASH_SEL_SPORT(1 << 2) | \ |
2354 | MCX_TIR_CTX_HASH_SEL_DPORT(1 << 3) |
2355 | |
2356 | #define MCX_RSS_HASH_SEL_V4_TCP(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (0 << 30) | (0 << 31) MCX_HASH_SEL_L4(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | MCX_TIR_CTX_HASH_SEL_TCP(0 << 30) |\ |
2357 | MCX_TIR_CTX_HASH_SEL_IPV4(0 << 31) |
2358 | #define MCX_RSS_HASH_SEL_V6_TCP(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (0 << 30) | (1U << 31) MCX_HASH_SEL_L4(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | MCX_TIR_CTX_HASH_SEL_TCP(0 << 30) | \ |
2359 | MCX_TIR_CTX_HASH_SEL_IPV6(1U << 31) |
2360 | #define MCX_RSS_HASH_SEL_V4_UDP(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 30) | (0 << 31) MCX_HASH_SEL_L4(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | MCX_TIR_CTX_HASH_SEL_UDP(1 << 30) | \ |
2361 | MCX_TIR_CTX_HASH_SEL_IPV4(0 << 31) |
2362 | #define MCX_RSS_HASH_SEL_V6_UDP(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 30) | (1U << 31) MCX_HASH_SEL_L4(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | MCX_TIR_CTX_HASH_SEL_UDP(1 << 30) | \ |
2363 | MCX_TIR_CTX_HASH_SEL_IPV6(1U << 31) |
2364 | #define MCX_RSS_HASH_SEL_V4(1 << 0) | (1 << 1) | (0 << 31) MCX_HASH_SEL_L3(1 << 0) | (1 << 1) | MCX_TIR_CTX_HASH_SEL_IPV4(0 << 31) |
2365 | #define MCX_RSS_HASH_SEL_V6(1 << 0) | (1 << 1) | (1U << 31) MCX_HASH_SEL_L3(1 << 0) | (1 << 1) | MCX_TIR_CTX_HASH_SEL_IPV6(1U << 31) |
2366 | |
2367 | /* |
2368 | * There are a few different pieces involved in configuring RSS. |
2369 | * A Receive Queue Table (RQT) is the indirection table that maps packets to |
2370 | * different rx queues based on a hash value. We only create one, because |
2371 | * we want to scatter any traffic we can apply RSS to across all our rx |
2372 | * queues. Anything else will only be delivered to the first rx queue, |
2373 | * which doesn't require an RQT. |
2374 | * |
2375 | * A Transport Interface Receive (TIR) delivers packets to either a single rx |
2376 | * queue or an RQT, and in the latter case, specifies the set of fields |
2377 | * hashed, the hash function, and the hash key. We need one of these for each |
2378 | * type of RSS traffic - v4 TCP, v6 TCP, v4 UDP, v6 UDP, other v4, other v6, |
2379 | * and one for non-RSS traffic. |
2380 | * |
2381 | * Flow tables hold flow table entries in sequence. The first entry that |
2382 | * matches a packet is applied, sending the packet to either another flow |
2383 | * table or a TIR. We use one flow table to select packets based on |
2384 | * destination MAC address, and a second to apply RSS. The entries in the |
2385 | * first table send matching packets to the second, and the entries in the |
2386 | * RSS table send packets to RSS TIRs if possible, or the non-RSS TIR. |
2387 | * |
2388 | * The flow table entry that delivers packets to an RSS TIR must include match |
2389 | * criteria that ensure packets delivered to the TIR include all the fields |
2390 | * that the TIR hashes on - so for a v4 TCP TIR, the flow table entry must |
2391 | * only accept v4 TCP packets. Accordingly, we need flow table entries for |
2392 | * each TIR. |
2393 | * |
2394 | * All of this is a lot more flexible than we need, and we can describe most |
2395 | * of the stuff we need with a simple array. |
2396 | * |
2397 | * An RSS config creates a TIR with hashing enabled on a set of fields, |
2398 | * pointing to either the first rx queue or the RQT containing all the rx |
2399 | * queues, and a flow table entry that matches on an ether type and |
2400 | * optionally an ip proto, that delivers packets to the TIR. |
2401 | */ |
2402 | static struct mcx_rss_rule { |
2403 | int hash_sel; |
2404 | int flow_group; |
2405 | int ethertype; |
2406 | int ip_proto; |
2407 | } mcx_rss_config[] = { |
2408 | /* udp and tcp for v4/v6 */ |
2409 | { MCX_RSS_HASH_SEL_V4_TCP(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (0 << 30) | (0 << 31), MCX_FLOW_GROUP_RSS_L43, |
2410 | ETHERTYPE_IP0x0800, IPPROTO_TCP6 }, |
2411 | { MCX_RSS_HASH_SEL_V6_TCP(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (0 << 30) | (1U << 31), MCX_FLOW_GROUP_RSS_L43, |
2412 | ETHERTYPE_IPV60x86DD, IPPROTO_TCP6 }, |
2413 | { MCX_RSS_HASH_SEL_V4_UDP(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 30) | (0 << 31), MCX_FLOW_GROUP_RSS_L43, |
2414 | ETHERTYPE_IP0x0800, IPPROTO_UDP17 }, |
2415 | { MCX_RSS_HASH_SEL_V6_UDP(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 30) | (1U << 31), MCX_FLOW_GROUP_RSS_L43, |
2416 | ETHERTYPE_IPV60x86DD, IPPROTO_UDP17 }, |
2417 | |
2418 | /* other v4/v6 */ |
2419 | { MCX_RSS_HASH_SEL_V4(1 << 0) | (1 << 1) | (0 << 31), MCX_FLOW_GROUP_RSS_L34, |
2420 | ETHERTYPE_IP0x0800, 0 }, |
2421 | { MCX_RSS_HASH_SEL_V6(1 << 0) | (1 << 1) | (1U << 31), MCX_FLOW_GROUP_RSS_L34, |
2422 | ETHERTYPE_IPV60x86DD, 0 }, |
2423 | |
2424 | /* non v4/v6 */ |
2425 | { 0, MCX_FLOW_GROUP_RSS_NONE5, 0, 0 } |
2426 | }; |
2427 | |
2428 | struct mcx_softc { |
2429 | struct device sc_dev; |
2430 | struct arpcom sc_ac; |
2431 | struct ifmedia sc_media; |
2432 | uint64_t sc_media_status; |
2433 | uint64_t sc_media_active; |
2434 | |
2435 | pci_chipset_tag_t sc_pc; |
2436 | pci_intr_handle_t sc_ih; |
2437 | void *sc_ihc; |
2438 | pcitag_t sc_tag; |
2439 | |
2440 | bus_dma_tag_t sc_dmat; |
2441 | bus_space_tag_t sc_memt; |
2442 | bus_space_handle_t sc_memh; |
2443 | bus_size_t sc_mems; |
2444 | |
2445 | struct mcx_dmamem sc_cmdq_mem; |
2446 | unsigned int sc_cmdq_mask; |
2447 | unsigned int sc_cmdq_size; |
2448 | |
2449 | unsigned int sc_cmdq_token; |
2450 | struct mutex sc_cmdq_mtx; |
2451 | struct rwlock sc_cmdq_kstat_lk; |
2452 | struct rwlock sc_cmdq_ioctl_lk; |
2453 | |
2454 | struct mcx_hwmem sc_boot_pages; |
2455 | struct mcx_hwmem sc_init_pages; |
2456 | struct mcx_hwmem sc_regular_pages; |
2457 | |
2458 | int sc_uar; |
2459 | int sc_pd; |
2460 | int sc_tdomain; |
2461 | uint32_t sc_lkey; |
2462 | int sc_tis; |
2463 | int sc_tir[nitems(mcx_rss_config)(sizeof((mcx_rss_config)) / sizeof((mcx_rss_config)[0]))]; |
2464 | int sc_rqt; |
2465 | |
2466 | struct mcx_dmamem sc_doorbell_mem; |
2467 | |
2468 | struct mcx_eq sc_admin_eq; |
2469 | struct mcx_eq sc_queue_eq; |
2470 | |
2471 | int sc_hardmtu; |
2472 | int sc_rxbufsz; |
2473 | |
2474 | int sc_bf_size; |
2475 | int sc_max_rqt_size; |
2476 | |
2477 | struct task sc_port_change; |
2478 | |
2479 | int sc_mac_flow_table_id; |
2480 | int sc_rss_flow_table_id; |
2481 | struct mcx_flow_group sc_flow_group[MCX_NUM_FLOW_GROUPS6]; |
2482 | int sc_promisc_flow_enabled; |
2483 | int sc_allmulti_flow_enabled; |
2484 | int sc_mcast_flow_base; |
2485 | int sc_extra_mcast; |
2486 | uint8_t sc_mcast_flows[MCX_NUM_MCAST_FLOWS((1 << 5) - 4)][ETHER_ADDR_LEN6]; |
2487 | |
2488 | struct mcx_calibration sc_calibration[2]; |
2489 | unsigned int sc_calibration_gen; |
2490 | struct timeout sc_calibrate; |
2491 | uint32_t sc_mhz; |
2492 | uint32_t sc_khz; |
2493 | |
2494 | struct intrmap *sc_intrmap; |
2495 | struct mcx_queues *sc_queues; |
2496 | |
2497 | int sc_mcam_reg; |
2498 | |
2499 | #if NKSTAT1 > 0 |
2500 | struct kstat *sc_kstat_ieee8023; |
2501 | struct kstat *sc_kstat_rfc2863; |
2502 | struct kstat *sc_kstat_rfc2819; |
2503 | struct kstat *sc_kstat_rfc3635; |
2504 | unsigned int sc_kstat_mtmp_count; |
2505 | struct kstat **sc_kstat_mtmp; |
2506 | #endif |
2507 | |
2508 | struct timecounter sc_timecounter; |
2509 | }; |
2510 | #define DEVNAME(_sc)((_sc)->sc_dev.dv_xname) ((_sc)->sc_dev.dv_xname) |
2511 | |
2512 | static int mcx_match(struct device *, void *, void *); |
2513 | static void mcx_attach(struct device *, struct device *, void *); |
2514 | |
2515 | #if NKSTAT1 > 0 |
2516 | static void mcx_kstat_attach(struct mcx_softc *); |
2517 | #endif |
2518 | |
2519 | static void mcx_timecounter_attach(struct mcx_softc *); |
2520 | |
2521 | static int mcx_version(struct mcx_softc *); |
2522 | static int mcx_init_wait(struct mcx_softc *); |
2523 | static int mcx_enable_hca(struct mcx_softc *); |
2524 | static int mcx_teardown_hca(struct mcx_softc *, uint16_t); |
2525 | static int mcx_access_hca_reg(struct mcx_softc *, uint16_t, int, void *, |
2526 | int, enum mcx_cmdq_slot); |
2527 | static int mcx_issi(struct mcx_softc *); |
2528 | static int mcx_pages(struct mcx_softc *, struct mcx_hwmem *, uint16_t); |
2529 | static int mcx_hca_max_caps(struct mcx_softc *); |
2530 | static int mcx_hca_set_caps(struct mcx_softc *); |
2531 | static int mcx_init_hca(struct mcx_softc *); |
2532 | static int mcx_set_driver_version(struct mcx_softc *); |
2533 | static int mcx_iff(struct mcx_softc *); |
2534 | static int mcx_alloc_uar(struct mcx_softc *, int *); |
2535 | static int mcx_alloc_pd(struct mcx_softc *); |
2536 | static int mcx_alloc_tdomain(struct mcx_softc *); |
2537 | static int mcx_create_eq(struct mcx_softc *, struct mcx_eq *, int, |
2538 | uint64_t, int); |
2539 | static int mcx_query_nic_vport_context(struct mcx_softc *); |
2540 | static int mcx_query_special_contexts(struct mcx_softc *); |
2541 | static int mcx_set_port_mtu(struct mcx_softc *, int); |
2542 | static int mcx_create_cq(struct mcx_softc *, struct mcx_cq *, int, int, |
2543 | int); |
2544 | static int mcx_destroy_cq(struct mcx_softc *, struct mcx_cq *); |
2545 | static int mcx_create_sq(struct mcx_softc *, struct mcx_tx *, int, int, |
2546 | int); |
2547 | static int mcx_destroy_sq(struct mcx_softc *, struct mcx_tx *); |
2548 | static int mcx_ready_sq(struct mcx_softc *, struct mcx_tx *); |
2549 | static int mcx_create_rq(struct mcx_softc *, struct mcx_rx *, int, int); |
2550 | static int mcx_destroy_rq(struct mcx_softc *, struct mcx_rx *); |
2551 | static int mcx_ready_rq(struct mcx_softc *, struct mcx_rx *); |
2552 | static int mcx_create_tir_direct(struct mcx_softc *, struct mcx_rx *, |
2553 | int *); |
2554 | static int mcx_create_tir_indirect(struct mcx_softc *, int, uint32_t, |
2555 | int *); |
2556 | static int mcx_destroy_tir(struct mcx_softc *, int); |
2557 | static int mcx_create_tis(struct mcx_softc *, int *); |
2558 | static int mcx_destroy_tis(struct mcx_softc *, int); |
2559 | static int mcx_create_rqt(struct mcx_softc *, int, int *, int *); |
2560 | static int mcx_destroy_rqt(struct mcx_softc *, int); |
2561 | static int mcx_create_flow_table(struct mcx_softc *, int, int, int *); |
2562 | static int mcx_set_flow_table_root(struct mcx_softc *, int); |
2563 | static int mcx_destroy_flow_table(struct mcx_softc *, int); |
2564 | static int mcx_create_flow_group(struct mcx_softc *, int, int, int, |
2565 | int, int, struct mcx_flow_match *); |
2566 | static int mcx_destroy_flow_group(struct mcx_softc *, int); |
2567 | static int mcx_set_flow_table_entry_mac(struct mcx_softc *, int, int, |
2568 | uint8_t *, uint32_t); |
2569 | static int mcx_set_flow_table_entry_proto(struct mcx_softc *, int, int, |
2570 | int, int, uint32_t); |
2571 | static int mcx_delete_flow_table_entry(struct mcx_softc *, int, int); |
2572 | |
2573 | #if NKSTAT1 > 0 |
2574 | static int mcx_query_rq(struct mcx_softc *, struct mcx_rx *, struct mcx_rq_ctx *); |
2575 | static int mcx_query_sq(struct mcx_softc *, struct mcx_tx *, struct mcx_sq_ctx *); |
2576 | static int mcx_query_cq(struct mcx_softc *, struct mcx_cq *, struct mcx_cq_ctx *); |
2577 | static int mcx_query_eq(struct mcx_softc *, struct mcx_eq *, struct mcx_eq_ctx *); |
2578 | #endif |
2579 | |
2580 | #if 0 |
2581 | static int mcx_dump_flow_table(struct mcx_softc *, int); |
2582 | static int mcx_dump_flow_table_entry(struct mcx_softc *, int, int); |
2583 | static int mcx_dump_flow_group(struct mcx_softc *, int); |
2584 | #endif |
2585 | |
2586 | |
2587 | /* |
2588 | static void mcx_cmdq_dump(const struct mcx_cmdq_entry *); |
2589 | static void mcx_cmdq_mbox_dump(struct mcx_dmamem *, int); |
2590 | */ |
2591 | static void mcx_refill(void *); |
2592 | static int mcx_process_rx(struct mcx_softc *, struct mcx_rx *, |
2593 | struct mcx_cq_entry *, struct mbuf_list *, |
2594 | const struct mcx_calibration *); |
2595 | static int mcx_process_txeof(struct mcx_softc *, struct mcx_tx *, |
2596 | struct mcx_cq_entry *); |
2597 | static void mcx_process_cq(struct mcx_softc *, struct mcx_queues *, |
2598 | struct mcx_cq *); |
2599 | |
2600 | static void mcx_arm_cq(struct mcx_softc *, struct mcx_cq *, int); |
2601 | static void mcx_arm_eq(struct mcx_softc *, struct mcx_eq *, int); |
2602 | static int mcx_admin_intr(void *); |
2603 | static int mcx_cq_intr(void *); |
2604 | |
2605 | static int mcx_up(struct mcx_softc *); |
2606 | static void mcx_down(struct mcx_softc *); |
2607 | static int mcx_ioctl(struct ifnet *, u_long, caddr_t); |
2608 | static int mcx_rxrinfo(struct mcx_softc *, struct if_rxrinfo *); |
2609 | static void mcx_start(struct ifqueue *); |
2610 | static void mcx_watchdog(struct ifnet *); |
2611 | static void mcx_media_add_types(struct mcx_softc *); |
2612 | static void mcx_media_status(struct ifnet *, struct ifmediareq *); |
2613 | static int mcx_media_change(struct ifnet *); |
2614 | static int mcx_get_sffpage(struct ifnet *, struct if_sffpage *); |
2615 | static void mcx_port_change(void *); |
2616 | |
2617 | static void mcx_calibrate_first(struct mcx_softc *); |
2618 | static void mcx_calibrate(void *); |
2619 | |
2620 | static inline uint32_t |
2621 | mcx_rd(struct mcx_softc *, bus_size_t); |
2622 | static inline void |
2623 | mcx_wr(struct mcx_softc *, bus_size_t, uint32_t); |
2624 | static inline void |
2625 | mcx_bar(struct mcx_softc *, bus_size_t, bus_size_t, int); |
2626 | |
2627 | static uint64_t mcx_timer(struct mcx_softc *); |
2628 | |
2629 | static int mcx_dmamem_alloc(struct mcx_softc *, struct mcx_dmamem *, |
2630 | bus_size_t, u_int align); |
2631 | static void mcx_dmamem_zero(struct mcx_dmamem *); |
2632 | static void mcx_dmamem_free(struct mcx_softc *, struct mcx_dmamem *); |
2633 | |
2634 | static int mcx_hwmem_alloc(struct mcx_softc *, struct mcx_hwmem *, |
2635 | unsigned int); |
2636 | static void mcx_hwmem_free(struct mcx_softc *, struct mcx_hwmem *); |
2637 | |
2638 | struct cfdriver mcx_cd = { |
2639 | NULL((void *)0), |
2640 | "mcx", |
2641 | DV_IFNET, |
2642 | }; |
2643 | |
2644 | const struct cfattach mcx_ca = { |
2645 | sizeof(struct mcx_softc), |
2646 | mcx_match, |
2647 | mcx_attach, |
2648 | }; |
2649 | |
2650 | static const struct pci_matchid mcx_devices[] = { |
2651 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT277000x1013 }, |
2652 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT27700VF0x1014 }, |
2653 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT277100x1015 }, |
2654 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT27710VF0x1016 }, |
2655 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT278000x1017 }, |
2656 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT27800VF0x1018 }, |
2657 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT288000x1019 }, |
2658 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT28800VF0x101a }, |
2659 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT289080x101b }, |
2660 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT28920x101d }, |
2661 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT28940x101f }, |
2662 | }; |
2663 | |
2664 | struct mcx_eth_proto_capability { |
2665 | uint64_t cap_media; |
2666 | uint64_t cap_baudrate; |
2667 | }; |
2668 | |
2669 | static const struct mcx_eth_proto_capability mcx_eth_cap_map[] = { |
2670 | [MCX_ETHER_CAP_SGMII0] = { IFM_1000_SGMII36, IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2671 | [MCX_ETHER_CAP_1000_KX1] = { IFM_1000_KX28, IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2672 | [MCX_ETHER_CAP_10G_CX42] = { IFM_10G_CX420, IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2673 | [MCX_ETHER_CAP_10G_KX43] = { IFM_10G_KX429, IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2674 | [MCX_ETHER_CAP_10G_KR4] = { IFM_10G_KR30, IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2675 | [MCX_ETHER_CAP_40G_CR46] = { IFM_40G_CR425, IF_Gbps(40)((((((40) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2676 | [MCX_ETHER_CAP_40G_KR47] = { IFM_40G_KR440, IF_Gbps(40)((((((40) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2677 | [MCX_ETHER_CAP_10G_CR12] = { IFM_10G_SFP_CU23, IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2678 | [MCX_ETHER_CAP_10G_SR13] = { IFM_10G_SR19, IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2679 | [MCX_ETHER_CAP_10G_LR14] = { IFM_10G_LR18, IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2680 | [MCX_ETHER_CAP_40G_SR415] = { IFM_40G_SR426, IF_Gbps(40)((((((40) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2681 | [MCX_ETHER_CAP_40G_LR416] = { IFM_40G_LR427, IF_Gbps(40)((((((40) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2682 | [MCX_ETHER_CAP_50G_SR218] = { 0 /*IFM_50G_SR2*/, IF_Gbps(50)((((((50) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2683 | [MCX_ETHER_CAP_100G_CR420] = { IFM_100G_CR442, IF_Gbps(100)((((((100) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2684 | [MCX_ETHER_CAP_100G_SR421] = { IFM_100G_SR443, IF_Gbps(100)((((((100) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2685 | [MCX_ETHER_CAP_100G_KR422] = { IFM_100G_KR444, IF_Gbps(100)((((((100) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2686 | [MCX_ETHER_CAP_100G_LR423] = { IFM_100G_LR445, IF_Gbps(100)((((((100) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2687 | [MCX_ETHER_CAP_25G_CR27] = { IFM_25G_CR47, IF_Gbps(25)((((((25) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2688 | [MCX_ETHER_CAP_25G_KR28] = { IFM_25G_KR48, IF_Gbps(25)((((((25) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2689 | [MCX_ETHER_CAP_25G_SR29] = { IFM_25G_SR49, IF_Gbps(25)((((((25) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2690 | [MCX_ETHER_CAP_50G_CR230] = { IFM_50G_CR250, IF_Gbps(50)((((((50) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2691 | [MCX_ETHER_CAP_50G_KR231] = { IFM_50G_KR251, IF_Gbps(50)((((((50) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2692 | }; |
2693 | |
2694 | static int |
2695 | mcx_get_id(uint32_t val) |
2696 | { |
2697 | return betoh32(val)(__uint32_t)(__builtin_constant_p(val) ? (__uint32_t)(((__uint32_t )(val) & 0xff) << 24 | ((__uint32_t)(val) & 0xff00 ) << 8 | ((__uint32_t)(val) & 0xff0000) >> 8 | ((__uint32_t)(val) & 0xff000000) >> 24) : __swap32md (val)) & 0x00ffffff; |
2698 | } |
2699 | |
2700 | static int |
2701 | mcx_match(struct device *parent, void *match, void *aux) |
2702 | { |
2703 | return (pci_matchbyid(aux, mcx_devices, nitems(mcx_devices)(sizeof((mcx_devices)) / sizeof((mcx_devices)[0])))); |
2704 | } |
2705 | |
2706 | void |
2707 | mcx_attach(struct device *parent, struct device *self, void *aux) |
2708 | { |
2709 | struct mcx_softc *sc = (struct mcx_softc *)self; |
2710 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
2711 | struct pci_attach_args *pa = aux; |
2712 | pcireg_t memtype; |
2713 | uint32_t r; |
2714 | unsigned int cq_stride; |
2715 | unsigned int cq_size; |
2716 | const char *intrstr; |
2717 | int i, msix; |
2718 | |
2719 | sc->sc_pc = pa->pa_pc; |
2720 | sc->sc_tag = pa->pa_tag; |
2721 | sc->sc_dmat = pa->pa_dmat; |
2722 | |
2723 | /* Map the PCI memory space */ |
2724 | memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MCX_HCA_BAR0x10); |
2725 | if (pci_mapreg_map(pa, MCX_HCA_BAR0x10, memtype, |
2726 | BUS_SPACE_MAP_PREFETCHABLE0x0008, &sc->sc_memt, &sc->sc_memh, |
2727 | NULL((void *)0), &sc->sc_mems, 0)) { |
2728 | printf(": unable to map register memory\n"); |
2729 | return; |
2730 | } |
2731 | |
2732 | if (mcx_version(sc) != 0) { |
2733 | /* error printed by mcx_version */ |
2734 | goto unmap; |
2735 | } |
2736 | |
2737 | r = mcx_rd(sc, MCX_CMDQ_ADDR_LO0x0014); |
2738 | cq_stride = 1 << MCX_CMDQ_LOG_STRIDE(r)((r) >> 0 & 0xf); /* size of the entries */ |
2739 | cq_size = 1 << MCX_CMDQ_LOG_SIZE(r)((r) >> 4 & 0xf); /* number of entries */ |
2740 | if (cq_size > MCX_MAX_CQE32) { |
2741 | printf(", command queue size overflow %u\n", cq_size); |
2742 | goto unmap; |
2743 | } |
2744 | if (cq_stride < sizeof(struct mcx_cmdq_entry)) { |
2745 | printf(", command queue entry size underflow %u\n", cq_stride); |
2746 | goto unmap; |
2747 | } |
2748 | if (cq_stride * cq_size > MCX_PAGE_SIZE(1 << 12)) { |
2749 | printf(", command queue page overflow\n"); |
2750 | goto unmap; |
2751 | } |
2752 | |
2753 | if (mcx_dmamem_alloc(sc, &sc->sc_doorbell_mem, MCX_DOORBELL_AREA_SIZE(1 << 12), |
2754 | MCX_PAGE_SIZE(1 << 12)) != 0) { |
2755 | printf(", unable to allocate doorbell memory\n"); |
2756 | goto unmap; |
2757 | } |
2758 | |
2759 | if (mcx_dmamem_alloc(sc, &sc->sc_cmdq_mem, MCX_PAGE_SIZE(1 << 12), |
2760 | MCX_PAGE_SIZE(1 << 12)) != 0) { |
2761 | printf(", unable to allocate command queue\n"); |
2762 | goto dbfree; |
2763 | } |
2764 | |
2765 | mcx_wr(sc, MCX_CMDQ_ADDR_HI0x0010, MCX_DMA_DVA(&sc->sc_cmdq_mem)((&sc->sc_cmdq_mem)->mxm_map->dm_segs[0].ds_addr ) >> 32); |
2766 | mcx_bar(sc, MCX_CMDQ_ADDR_HI0x0010, sizeof(uint32_t), |
2767 | BUS_SPACE_BARRIER_WRITE0x02); |
2768 | mcx_wr(sc, MCX_CMDQ_ADDR_LO0x0014, MCX_DMA_DVA(&sc->sc_cmdq_mem)((&sc->sc_cmdq_mem)->mxm_map->dm_segs[0].ds_addr )); |
2769 | mcx_bar(sc, MCX_CMDQ_ADDR_LO0x0014, sizeof(uint32_t), |
2770 | BUS_SPACE_BARRIER_WRITE0x02); |
2771 | |
2772 | if (mcx_init_wait(sc) != 0) { |
2773 | printf(", timeout waiting for init\n"); |
2774 | goto cqfree; |
2775 | } |
2776 | |
2777 | sc->sc_cmdq_mask = cq_size - 1; |
2778 | sc->sc_cmdq_size = cq_stride; |
2779 | rw_init(&sc->sc_cmdq_kstat_lk, "mcxkstat")_rw_init_flags(&sc->sc_cmdq_kstat_lk, "mcxkstat", 0, ( (void *)0)); |
2780 | rw_init(&sc->sc_cmdq_ioctl_lk, "mcxioctl")_rw_init_flags(&sc->sc_cmdq_ioctl_lk, "mcxioctl", 0, ( (void *)0)); |
2781 | mtx_init(&sc->sc_cmdq_mtx, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc-> sc_cmdq_mtx), ((((0x4)) > 0x0 && ((0x4)) < 0x9) ? 0x9 : ((0x4)))); } while (0); |
2782 | |
2783 | if (mcx_enable_hca(sc) != 0) { |
2784 | /* error printed by mcx_enable_hca */ |
2785 | goto cqfree; |
2786 | } |
2787 | |
2788 | if (mcx_issi(sc) != 0) { |
2789 | /* error printed by mcx_issi */ |
2790 | goto teardown; |
2791 | } |
2792 | |
2793 | if (mcx_pages(sc, &sc->sc_boot_pages, |
2794 | htobe16(MCX_CMD_QUERY_PAGES_BOOT)(__uint16_t)(__builtin_constant_p(0x01) ? (__uint16_t)(((__uint16_t )(0x01) & 0xffU) << 8 | ((__uint16_t)(0x01) & 0xff00U ) >> 8) : __swap16md(0x01))) != 0) { |
2795 | /* error printed by mcx_pages */ |
2796 | goto teardown; |
2797 | } |
2798 | |
2799 | if (mcx_hca_max_caps(sc) != 0) { |
2800 | /* error printed by mcx_hca_max_caps */ |
2801 | goto teardown; |
2802 | } |
2803 | |
2804 | if (mcx_hca_set_caps(sc) != 0) { |
2805 | /* error printed by mcx_hca_set_caps */ |
2806 | goto teardown; |
2807 | } |
2808 | |
2809 | if (mcx_pages(sc, &sc->sc_init_pages, |
2810 | htobe16(MCX_CMD_QUERY_PAGES_INIT)(__uint16_t)(__builtin_constant_p(0x02) ? (__uint16_t)(((__uint16_t )(0x02) & 0xffU) << 8 | ((__uint16_t)(0x02) & 0xff00U ) >> 8) : __swap16md(0x02))) != 0) { |
2811 | /* error printed by mcx_pages */ |
2812 | goto teardown; |
2813 | } |
2814 | |
2815 | if (mcx_init_hca(sc) != 0) { |
2816 | /* error printed by mcx_init_hca */ |
2817 | goto teardown; |
2818 | } |
2819 | |
2820 | if (mcx_pages(sc, &sc->sc_regular_pages, |
2821 | htobe16(MCX_CMD_QUERY_PAGES_REGULAR)(__uint16_t)(__builtin_constant_p(0x03) ? (__uint16_t)(((__uint16_t )(0x03) & 0xffU) << 8 | ((__uint16_t)(0x03) & 0xff00U ) >> 8) : __swap16md(0x03))) != 0) { |
2822 | /* error printed by mcx_pages */ |
2823 | goto teardown; |
2824 | } |
2825 | |
2826 | /* apparently not necessary? */ |
2827 | if (mcx_set_driver_version(sc) != 0) { |
2828 | /* error printed by mcx_set_driver_version */ |
2829 | goto teardown; |
2830 | } |
2831 | |
2832 | if (mcx_iff(sc) != 0) { /* modify nic vport context */ |
2833 | /* error printed by mcx_iff? */ |
2834 | goto teardown; |
2835 | } |
2836 | |
2837 | if (mcx_alloc_uar(sc, &sc->sc_uar) != 0) { |
2838 | /* error printed by mcx_alloc_uar */ |
2839 | goto teardown; |
2840 | } |
2841 | |
2842 | if (mcx_alloc_pd(sc) != 0) { |
2843 | /* error printed by mcx_alloc_pd */ |
2844 | goto teardown; |
2845 | } |
2846 | |
2847 | if (mcx_alloc_tdomain(sc) != 0) { |
2848 | /* error printed by mcx_alloc_tdomain */ |
2849 | goto teardown; |
2850 | } |
2851 | |
2852 | msix = pci_intr_msix_count(pa); |
2853 | if (msix < 2) { |
2854 | printf(": not enough msi-x vectors\n"); |
2855 | goto teardown; |
2856 | } |
2857 | |
2858 | /* |
2859 | * PRM makes no mention of msi interrupts, just legacy and msi-x. |
2860 | * mellanox support tells me legacy interrupts are not supported, |
2861 | * so we're stuck with just msi-x. |
2862 | */ |
2863 | if (pci_intr_map_msix(pa, 0, &sc->sc_ih) != 0) { |
2864 | printf(": unable to map interrupt\n"); |
2865 | goto teardown; |
2866 | } |
2867 | intrstr = pci_intr_string(sc->sc_pc, sc->sc_ih); |
2868 | sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih, |
2869 | IPL_NET0x4 | IPL_MPSAFE0x100, mcx_admin_intr, sc, DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
2870 | if (sc->sc_ihc == NULL((void *)0)) { |
2871 | printf(": unable to establish interrupt"); |
2872 | if (intrstr != NULL((void *)0)) |
2873 | printf(" at %s", intrstr); |
2874 | printf("\n"); |
2875 | goto teardown; |
2876 | } |
2877 | |
2878 | if (mcx_create_eq(sc, &sc->sc_admin_eq, sc->sc_uar, |
2879 | (1ull << MCX_EVENT_TYPE_INTERNAL_ERROR0x08) | |
2880 | (1ull << MCX_EVENT_TYPE_PORT_CHANGE0x09) | |
2881 | (1ull << MCX_EVENT_TYPE_CMD_COMPLETION0x0a) | |
2882 | (1ull << MCX_EVENT_TYPE_PAGE_REQUEST0x0b), 0) != 0) { |
2883 | /* error printed by mcx_create_eq */ |
2884 | goto teardown; |
2885 | } |
2886 | |
2887 | if (mcx_query_nic_vport_context(sc) != 0) { |
2888 | /* error printed by mcx_query_nic_vport_context */ |
2889 | goto teardown; |
2890 | } |
2891 | |
2892 | if (mcx_query_special_contexts(sc) != 0) { |
2893 | /* error printed by mcx_query_special_contexts */ |
2894 | goto teardown; |
2895 | } |
2896 | |
2897 | if (mcx_set_port_mtu(sc, MCX_HARDMTU9500) != 0) { |
2898 | /* error printed by mcx_set_port_mtu */ |
2899 | goto teardown; |
2900 | } |
2901 | |
2902 | printf(", %s, address %s\n", intrstr, |
2903 | ether_sprintf(sc->sc_ac.ac_enaddr)); |
2904 | |
2905 | msix--; /* admin ops took one */ |
2906 | sc->sc_intrmap = intrmap_create(&sc->sc_dev, msix, MCX_MAX_QUEUES16, |
2907 | INTRMAP_POWEROF2(1 << 0)); |
2908 | if (sc->sc_intrmap == NULL((void *)0)) { |
2909 | printf("%s: unable to create interrupt map\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
2910 | goto teardown; |
2911 | } |
2912 | sc->sc_queues = mallocarray(intrmap_count(sc->sc_intrmap), |
2913 | sizeof(*sc->sc_queues), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008); |
2914 | if (sc->sc_queues == NULL((void *)0)) { |
2915 | printf("%s: unable to create queues\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
2916 | goto intrunmap; |
2917 | } |
2918 | |
2919 | strlcpy(ifp->if_xname, DEVNAME(sc)((sc)->sc_dev.dv_xname), IFNAMSIZ16); |
2920 | ifp->if_softc = sc; |
2921 | ifp->if_flags = IFF_BROADCAST0x2 | IFF_MULTICAST0x8000 | IFF_SIMPLEX0x800; |
2922 | ifp->if_xflags = IFXF_MPSAFE0x1; |
2923 | ifp->if_ioctl = mcx_ioctl; |
2924 | ifp->if_qstart = mcx_start; |
2925 | ifp->if_watchdog = mcx_watchdog; |
2926 | ifp->if_hardmtu = sc->sc_hardmtu; |
2927 | ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010 | IFCAP_CSUM_IPv40x00000001 | |
2928 | IFCAP_CSUM_UDPv40x00000004 | IFCAP_CSUM_UDPv60x00000100 | IFCAP_CSUM_TCPv40x00000002 | |
2929 | IFCAP_CSUM_TCPv60x00000080; |
2930 | #if NVLAN1 > 0 |
2931 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020; |
2932 | #endif |
2933 | ifq_init_maxlen(&ifp->if_snd, 1024); |
2934 | |
2935 | ifmedia_init(&sc->sc_media, IFM_IMASK0xff00000000000000ULL, mcx_media_change, |
2936 | mcx_media_status); |
2937 | mcx_media_add_types(sc); |
2938 | ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0)); |
2939 | ifmedia_set(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL); |
2940 | |
2941 | if_attach(ifp); |
2942 | ether_ifattach(ifp); |
2943 | |
2944 | if_attach_iqueues(ifp, intrmap_count(sc->sc_intrmap)); |
2945 | if_attach_queues(ifp, intrmap_count(sc->sc_intrmap)); |
2946 | for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) { |
2947 | struct ifiqueue *ifiq = ifp->if_iqs[i]; |
2948 | struct ifqueue *ifq = ifp->if_ifqs[i]; |
2949 | struct mcx_queues *q = &sc->sc_queues[i]; |
2950 | struct mcx_rx *rx = &q->q_rx; |
2951 | struct mcx_tx *tx = &q->q_tx; |
2952 | pci_intr_handle_t ih; |
2953 | int vec; |
2954 | |
2955 | vec = i + 1; |
2956 | q->q_sc = sc; |
2957 | q->q_index = i; |
2958 | |
2959 | if (mcx_alloc_uar(sc, &q->q_uar) != 0) { |
2960 | printf("%s: unable to alloc uar %d\n", |
2961 | DEVNAME(sc)((sc)->sc_dev.dv_xname), i); |
2962 | goto intrdisestablish; |
2963 | } |
2964 | |
2965 | if (mcx_create_eq(sc, &q->q_eq, q->q_uar, 0, vec) != 0) { |
2966 | printf("%s: unable to create event queue %d\n", |
2967 | DEVNAME(sc)((sc)->sc_dev.dv_xname), i); |
2968 | goto intrdisestablish; |
2969 | } |
2970 | |
2971 | rx->rx_softc = sc; |
2972 | rx->rx_ifiq = ifiq; |
2973 | timeout_set(&rx->rx_refill, mcx_refill, rx); |
2974 | ifiq->ifiq_softc_ifiq_ptr._ifiq_softc = rx; |
2975 | |
2976 | tx->tx_softc = sc; |
2977 | tx->tx_ifq = ifq; |
2978 | ifq->ifq_softc_ifq_ptr._ifq_softc = tx; |
2979 | |
2980 | if (pci_intr_map_msix(pa, vec, &ih) != 0) { |
2981 | printf("%s: unable to map queue interrupt %d\n", |
2982 | DEVNAME(sc)((sc)->sc_dev.dv_xname), i); |
2983 | goto intrdisestablish; |
2984 | } |
2985 | snprintf(q->q_name, sizeof(q->q_name), "%s:%d", |
2986 | DEVNAME(sc)((sc)->sc_dev.dv_xname), i); |
2987 | q->q_ihc = pci_intr_establish_cpu(sc->sc_pc, ih, |
2988 | IPL_NET0x4 | IPL_MPSAFE0x100, intrmap_cpu(sc->sc_intrmap, i), |
2989 | mcx_cq_intr, q, q->q_name); |
2990 | if (q->q_ihc == NULL((void *)0)) { |
2991 | printf("%s: unable to establish interrupt %d\n", |
2992 | DEVNAME(sc)((sc)->sc_dev.dv_xname), i); |
2993 | goto intrdisestablish; |
2994 | } |
2995 | } |
2996 | |
2997 | timeout_set(&sc->sc_calibrate, mcx_calibrate, sc); |
2998 | |
2999 | task_set(&sc->sc_port_change, mcx_port_change, sc); |
3000 | mcx_port_change(sc); |
3001 | |
3002 | sc->sc_mac_flow_table_id = -1; |
3003 | sc->sc_rss_flow_table_id = -1; |
3004 | sc->sc_rqt = -1; |
3005 | for (i = 0; i < MCX_NUM_FLOW_GROUPS6; i++) { |
3006 | struct mcx_flow_group *mfg = &sc->sc_flow_group[i]; |
3007 | mfg->g_id = -1; |
3008 | mfg->g_table = -1; |
3009 | mfg->g_size = 0; |
3010 | mfg->g_start = 0; |
3011 | } |
3012 | sc->sc_extra_mcast = 0; |
3013 | memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows))__builtin_memset((sc->sc_mcast_flows), (0), (sizeof(sc-> sc_mcast_flows))); |
3014 | |
3015 | #if NKSTAT1 > 0 |
3016 | mcx_kstat_attach(sc); |
3017 | #endif |
3018 | mcx_timecounter_attach(sc); |
3019 | return; |
3020 | |
3021 | intrdisestablish: |
3022 | for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) { |
3023 | struct mcx_queues *q = &sc->sc_queues[i]; |
3024 | if (q->q_ihc == NULL((void *)0)) |
3025 | continue; |
3026 | pci_intr_disestablish(sc->sc_pc, q->q_ihc); |
3027 | q->q_ihc = NULL((void *)0); |
3028 | } |
3029 | free(sc->sc_queues, M_DEVBUF2, |
3030 | intrmap_count(sc->sc_intrmap) * sizeof(*sc->sc_queues)); |
3031 | intrunmap: |
3032 | intrmap_destroy(sc->sc_intrmap); |
3033 | sc->sc_intrmap = NULL((void *)0); |
3034 | teardown: |
3035 | mcx_teardown_hca(sc, htobe16(MCX_CMD_TEARDOWN_HCA_GRACEFUL)(__uint16_t)(__builtin_constant_p(0x0) ? (__uint16_t)(((__uint16_t )(0x0) & 0xffU) << 8 | ((__uint16_t)(0x0) & 0xff00U ) >> 8) : __swap16md(0x0))); |
3036 | /* error printed by mcx_teardown_hca, and we're already unwinding */ |
3037 | cqfree: |
3038 | mcx_wr(sc, MCX_CMDQ_ADDR_HI0x0010, MCX_DMA_DVA(&sc->sc_cmdq_mem)((&sc->sc_cmdq_mem)->mxm_map->dm_segs[0].ds_addr ) >> 32); |
3039 | mcx_bar(sc, MCX_CMDQ_ADDR_HI0x0010, sizeof(uint64_t), |
3040 | BUS_SPACE_BARRIER_WRITE0x02); |
3041 | mcx_wr(sc, MCX_CMDQ_ADDR_LO0x0014, MCX_DMA_DVA(&sc->sc_cmdq_mem)((&sc->sc_cmdq_mem)->mxm_map->dm_segs[0].ds_addr ) | |
3042 | MCX_CMDQ_INTERFACE_DISABLED(0x1 << 8)); |
3043 | mcx_bar(sc, MCX_CMDQ_ADDR_LO0x0014, sizeof(uint64_t), |
3044 | BUS_SPACE_BARRIER_WRITE0x02); |
3045 | |
3046 | mcx_wr(sc, MCX_CMDQ_ADDR_HI0x0010, 0); |
3047 | mcx_bar(sc, MCX_CMDQ_ADDR_HI0x0010, sizeof(uint64_t), |
3048 | BUS_SPACE_BARRIER_WRITE0x02); |
3049 | mcx_wr(sc, MCX_CMDQ_ADDR_LO0x0014, MCX_CMDQ_INTERFACE_DISABLED(0x1 << 8)); |
3050 | |
3051 | mcx_dmamem_free(sc, &sc->sc_cmdq_mem); |
3052 | dbfree: |
3053 | mcx_dmamem_free(sc, &sc->sc_doorbell_mem); |
3054 | unmap: |
3055 | bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); |
3056 | sc->sc_mems = 0; |
3057 | } |
3058 | |
3059 | static int |
3060 | mcx_version(struct mcx_softc *sc) |
3061 | { |
3062 | uint32_t fw0, fw1; |
3063 | uint16_t cmdif; |
3064 | |
3065 | fw0 = mcx_rd(sc, MCX_FW_VER0x0000); |
3066 | fw1 = mcx_rd(sc, MCX_CMDIF_FW_SUBVER0x0004); |
3067 | |
3068 | printf(": FW %u.%u.%04u", MCX_FW_VER_MAJOR(fw0)((fw0) & 0xffff), |
3069 | MCX_FW_VER_MINOR(fw0)((fw0) >> 16), MCX_FW_VER_SUBMINOR(fw1)((fw1) & 0xffff)); |
3070 | |
3071 | cmdif = MCX_CMDIF(fw1)((fw1) >> 16); |
3072 | if (cmdif != MCX_CMD_IF_SUPPORTED5) { |
3073 | printf(", unsupported command interface %u\n", cmdif); |
3074 | return (-1); |
3075 | } |
3076 | |
3077 | return (0); |
3078 | } |
3079 | |
3080 | static int |
3081 | mcx_init_wait(struct mcx_softc *sc) |
3082 | { |
3083 | unsigned int i; |
3084 | uint32_t r; |
3085 | |
3086 | for (i = 0; i < 2000; i++) { |
3087 | r = mcx_rd(sc, MCX_STATE0x01fc); |
3088 | if ((r & MCX_STATE_MASK(1U << 31)) == MCX_STATE_READY(0 << 31)) |
3089 | return (0); |
3090 | |
3091 | delay(1000)(*delay_func)(1000); |
3092 | mcx_bar(sc, MCX_STATE0x01fc, sizeof(uint32_t), |
3093 | BUS_SPACE_BARRIER_READ0x01); |
3094 | } |
3095 | |
3096 | return (-1); |
3097 | } |
3098 | |
3099 | static uint8_t |
3100 | mcx_cmdq_poll(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, |
3101 | unsigned int msec) |
3102 | { |
3103 | unsigned int i; |
3104 | |
3105 | for (i = 0; i < msec; i++) { |
3106 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_cmdq_mem)->mxm_map)), (0), (((&sc->sc_cmdq_mem )->mxm_size)), ((0x02|0x08))) |
3107 | 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_cmdq_mem)->mxm_map)), (0), (((&sc->sc_cmdq_mem )->mxm_size)), ((0x02|0x08))); |
3108 | |
3109 | if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK0x1) == |
3110 | MCX_CQ_STATUS_OWN_SW0x0) |
3111 | return (0); |
3112 | |
3113 | delay(1000)(*delay_func)(1000); |
3114 | } |
3115 | |
3116 | return (ETIMEDOUT60); |
3117 | } |
3118 | |
3119 | static uint32_t |
3120 | mcx_mix_u64(uint32_t xor, uint64_t u64) |
3121 | { |
3122 | xor ^= u64 >> 32; |
3123 | xor ^= u64; |
3124 | |
3125 | return (xor); |
3126 | } |
3127 | |
3128 | static uint32_t |
3129 | mcx_mix_u32(uint32_t xor, uint32_t u32) |
3130 | { |
3131 | xor ^= u32; |
3132 | |
3133 | return (xor); |
3134 | } |
3135 | |
3136 | static uint32_t |
3137 | mcx_mix_u8(uint32_t xor, uint8_t u8) |
3138 | { |
3139 | xor ^= u8; |
3140 | |
3141 | return (xor); |
3142 | } |
3143 | |
3144 | static uint8_t |
3145 | mcx_mix_done(uint32_t xor) |
3146 | { |
3147 | xor ^= xor >> 16; |
3148 | xor ^= xor >> 8; |
3149 | |
3150 | return (xor); |
3151 | } |
3152 | |
3153 | static uint8_t |
3154 | mcx_xor(const void *buf, size_t len) |
3155 | { |
3156 | const uint32_t *dwords = buf; |
3157 | uint32_t xor = 0xff; |
3158 | size_t i; |
3159 | |
3160 | len /= sizeof(*dwords); |
3161 | |
3162 | for (i = 0; i < len; i++) |
3163 | xor ^= dwords[i]; |
3164 | |
3165 | return (mcx_mix_done(xor)); |
3166 | } |
3167 | |
3168 | static uint8_t |
3169 | mcx_cmdq_token(struct mcx_softc *sc) |
3170 | { |
3171 | uint8_t token; |
3172 | |
3173 | mtx_enter(&sc->sc_cmdq_mtx); |
3174 | do { |
3175 | token = ++sc->sc_cmdq_token; |
3176 | } while (token == 0); |
3177 | mtx_leave(&sc->sc_cmdq_mtx); |
3178 | |
3179 | return (token); |
3180 | } |
3181 | |
3182 | static struct mcx_cmdq_entry * |
3183 | mcx_get_cmdq_entry(struct mcx_softc *sc, enum mcx_cmdq_slot slot) |
3184 | { |
3185 | struct mcx_cmdq_entry *cqe; |
3186 | |
3187 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
3188 | cqe += slot; |
3189 | |
3190 | /* make sure the slot isn't running a command already */ |
3191 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_cmdq_mem)->mxm_map)), (0), (((&sc->sc_cmdq_mem )->mxm_size)), ((0x02|0x08))) |
3192 | 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_cmdq_mem)->mxm_map)), (0), (((&sc->sc_cmdq_mem )->mxm_size)), ((0x02|0x08))); |
3193 | if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK0x1) != |
3194 | MCX_CQ_STATUS_OWN_SW0x0) |
3195 | cqe = NULL((void *)0); |
3196 | |
3197 | return (cqe); |
3198 | } |
3199 | |
3200 | static void |
3201 | mcx_cmdq_init(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, |
3202 | uint32_t ilen, uint32_t olen, uint8_t token) |
3203 | { |
3204 | memset(cqe, 0, sc->sc_cmdq_size)__builtin_memset((cqe), (0), (sc->sc_cmdq_size)); |
3205 | |
3206 | cqe->cq_type = MCX_CMDQ_TYPE_PCIE0x7; |
3207 | htobem32(&cqe->cq_input_length, ilen)(*(__uint32_t *)(&cqe->cq_input_length) = (__uint32_t) (__builtin_constant_p(ilen) ? (__uint32_t)(((__uint32_t)(ilen ) & 0xff) << 24 | ((__uint32_t)(ilen) & 0xff00) << 8 | ((__uint32_t)(ilen) & 0xff0000) >> 8 | ((__uint32_t)(ilen) & 0xff000000) >> 24) : __swap32md (ilen))); |
3208 | htobem32(&cqe->cq_output_length, olen)(*(__uint32_t *)(&cqe->cq_output_length) = (__uint32_t )(__builtin_constant_p(olen) ? (__uint32_t)(((__uint32_t)(olen ) & 0xff) << 24 | ((__uint32_t)(olen) & 0xff00) << 8 | ((__uint32_t)(olen) & 0xff0000) >> 8 | ((__uint32_t)(olen) & 0xff000000) >> 24) : __swap32md (olen))); |
3209 | cqe->cq_token = token; |
3210 | cqe->cq_status = MCX_CQ_STATUS_OWN_HW0x1; |
3211 | } |
3212 | |
3213 | static void |
3214 | mcx_cmdq_sign(struct mcx_cmdq_entry *cqe) |
3215 | { |
3216 | cqe->cq_signature = ~mcx_xor(cqe, sizeof(*cqe)); |
3217 | } |
3218 | |
3219 | static int |
3220 | mcx_cmdq_verify(const struct mcx_cmdq_entry *cqe) |
3221 | { |
3222 | /* return (mcx_xor(cqe, sizeof(*cqe)) ? -1 : 0); */ |
3223 | return (0); |
3224 | } |
3225 | |
3226 | static void * |
3227 | mcx_cmdq_in(struct mcx_cmdq_entry *cqe) |
3228 | { |
3229 | return (&cqe->cq_input_data); |
3230 | } |
3231 | |
3232 | static void * |
3233 | mcx_cmdq_out(struct mcx_cmdq_entry *cqe) |
3234 | { |
3235 | return (&cqe->cq_output_data); |
3236 | } |
3237 | |
3238 | static void |
3239 | mcx_cmdq_post(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, |
3240 | unsigned int slot) |
3241 | { |
3242 | mcx_cmdq_sign(cqe); |
3243 | |
3244 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_cmdq_mem)->mxm_map)), (0), (((&sc->sc_cmdq_mem )->mxm_size)), ((0x01|0x04))) |
3245 | 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_PRERW)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_cmdq_mem)->mxm_map)), (0), (((&sc->sc_cmdq_mem )->mxm_size)), ((0x01|0x04))); |
3246 | |
3247 | mcx_wr(sc, MCX_CMDQ_DOORBELL0x0018, 1U << slot); |
3248 | mcx_bar(sc, MCX_CMDQ_DOORBELL0x0018, sizeof(uint32_t), |
3249 | BUS_SPACE_BARRIER_WRITE0x02); |
3250 | } |
3251 | |
3252 | static int |
3253 | mcx_cmdq_exec(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, |
3254 | unsigned int slot, unsigned int msec) |
3255 | { |
3256 | int err; |
3257 | |
3258 | if (slot == MCX_CMDQ_SLOT_POLL) { |
3259 | mcx_cmdq_post(sc, cqe, slot); |
3260 | return (mcx_cmdq_poll(sc, cqe, msec)); |
3261 | } |
3262 | |
3263 | mtx_enter(&sc->sc_cmdq_mtx); |
3264 | mcx_cmdq_post(sc, cqe, slot); |
3265 | |
3266 | err = 0; |
3267 | while (err == 0) { |
3268 | err = msleep_nsec(&sc->sc_cmdq_token, &sc->sc_cmdq_mtx, 0, |
3269 | "mcxcmd", msec * 1000); |
3270 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_cmdq_mem)->mxm_map)), (0), (((&sc->sc_cmdq_mem )->mxm_size)), ((0x02|0x08))) |
3271 | MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_cmdq_mem)->mxm_map)), (0), (((&sc->sc_cmdq_mem )->mxm_size)), ((0x02|0x08))); |
3272 | if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK0x1) == |
3273 | MCX_CQ_STATUS_OWN_SW0x0) { |
3274 | err = 0; |
3275 | break; |
3276 | } |
3277 | } |
3278 | |
3279 | mtx_leave(&sc->sc_cmdq_mtx); |
3280 | return (err); |
3281 | } |
3282 | |
3283 | static int |
3284 | mcx_enable_hca(struct mcx_softc *sc) |
3285 | { |
3286 | struct mcx_cmdq_entry *cqe; |
3287 | struct mcx_cmd_enable_hca_in *in; |
3288 | struct mcx_cmd_enable_hca_out *out; |
3289 | int error; |
3290 | uint8_t status; |
3291 | |
3292 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
3293 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
3294 | |
3295 | in = mcx_cmdq_in(cqe); |
3296 | in->cmd_opcode = htobe16(MCX_CMD_ENABLE_HCA)(__uint16_t)(__builtin_constant_p(0x104) ? (__uint16_t)(((__uint16_t )(0x104) & 0xffU) << 8 | ((__uint16_t)(0x104) & 0xff00U) >> 8) : __swap16md(0x104)); |
3297 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
3298 | in->cmd_function_id = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
3299 | |
3300 | mcx_cmdq_post(sc, cqe, 0); |
3301 | |
3302 | error = mcx_cmdq_poll(sc, cqe, 1000); |
3303 | if (error != 0) { |
3304 | printf(", hca enable timeout\n"); |
3305 | return (-1); |
3306 | } |
3307 | if (mcx_cmdq_verify(cqe) != 0) { |
3308 | printf(", hca enable command corrupt\n"); |
3309 | return (-1); |
3310 | } |
3311 | |
3312 | status = cqe->cq_output_data[0]; |
3313 | if (status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
3314 | printf(", hca enable failed (%x)\n", status); |
3315 | return (-1); |
3316 | } |
3317 | |
3318 | return (0); |
3319 | } |
3320 | |
3321 | static int |
3322 | mcx_teardown_hca(struct mcx_softc *sc, uint16_t profile) |
3323 | { |
3324 | struct mcx_cmdq_entry *cqe; |
3325 | struct mcx_cmd_teardown_hca_in *in; |
3326 | struct mcx_cmd_teardown_hca_out *out; |
3327 | int error; |
3328 | uint8_t status; |
3329 | |
3330 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
3331 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
3332 | |
3333 | in = mcx_cmdq_in(cqe); |
3334 | in->cmd_opcode = htobe16(MCX_CMD_TEARDOWN_HCA)(__uint16_t)(__builtin_constant_p(0x103) ? (__uint16_t)(((__uint16_t )(0x103) & 0xffU) << 8 | ((__uint16_t)(0x103) & 0xff00U) >> 8) : __swap16md(0x103)); |
3335 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
3336 | in->cmd_profile = profile; |
3337 | |
3338 | mcx_cmdq_post(sc, cqe, 0); |
3339 | |
3340 | error = mcx_cmdq_poll(sc, cqe, 1000); |
3341 | if (error != 0) { |
3342 | printf(", hca teardown timeout\n"); |
3343 | return (-1); |
3344 | } |
3345 | if (mcx_cmdq_verify(cqe) != 0) { |
3346 | printf(", hca teardown command corrupt\n"); |
3347 | return (-1); |
3348 | } |
3349 | |
3350 | status = cqe->cq_output_data[0]; |
3351 | if (status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
3352 | printf(", hca teardown failed (%x)\n", status); |
3353 | return (-1); |
3354 | } |
3355 | |
3356 | return (0); |
3357 | } |
3358 | |
3359 | static int |
3360 | mcx_cmdq_mboxes_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm, |
3361 | unsigned int nmb, uint64_t *ptr, uint8_t token) |
3362 | { |
3363 | caddr_t kva; |
3364 | uint64_t dva; |
3365 | int i; |
3366 | int error; |
3367 | |
3368 | error = mcx_dmamem_alloc(sc, mxm, |
3369 | nmb * MCX_CMDQ_MAILBOX_SIZE((((sizeof(struct mcx_cmdq_mailbox))+(((1 << 10))-1))/( (1 << 10)))*((1 << 10))), MCX_CMDQ_MAILBOX_ALIGN(1 << 10)); |
3370 | if (error != 0) |
3371 | return (error); |
3372 | |
3373 | mcx_dmamem_zero(mxm); |
3374 | |
3375 | dva = MCX_DMA_DVA(mxm)((mxm)->mxm_map->dm_segs[0].ds_addr); |
3376 | kva = MCX_DMA_KVA(mxm)((void *)(mxm)->mxm_kva); |
3377 | for (i = 0; i < nmb; i++) { |
3378 | struct mcx_cmdq_mailbox *mbox = (struct mcx_cmdq_mailbox *)kva; |
3379 | |
3380 | /* patch the cqe or mbox pointing at this one */ |
3381 | htobem64(ptr, dva)(*(__uint64_t *)(ptr) = (__uint64_t)(__builtin_constant_p(dva ) ? (__uint64_t)((((__uint64_t)(dva) & 0xff) << 56) | ((__uint64_t)(dva) & 0xff00ULL) << 40 | ((__uint64_t )(dva) & 0xff0000ULL) << 24 | ((__uint64_t)(dva) & 0xff000000ULL) << 8 | ((__uint64_t)(dva) & 0xff00000000ULL ) >> 8 | ((__uint64_t)(dva) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(dva) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(dva) & 0xff00000000000000ULL) >> 56 ) : __swap64md(dva))); |
3382 | |
3383 | /* fill in this mbox */ |
3384 | htobem32(&mbox->mb_block_number, i)(*(__uint32_t *)(&mbox->mb_block_number) = (__uint32_t )(__builtin_constant_p(i) ? (__uint32_t)(((__uint32_t)(i) & 0xff) << 24 | ((__uint32_t)(i) & 0xff00) << 8 | ((__uint32_t)(i) & 0xff0000) >> 8 | ((__uint32_t )(i) & 0xff000000) >> 24) : __swap32md(i))); |
3385 | mbox->mb_token = token; |
3386 | |
3387 | /* move to the next one */ |
3388 | ptr = &mbox->mb_next_ptr; |
3389 | |
3390 | dva += MCX_CMDQ_MAILBOX_SIZE((((sizeof(struct mcx_cmdq_mailbox))+(((1 << 10))-1))/( (1 << 10)))*((1 << 10))); |
3391 | kva += MCX_CMDQ_MAILBOX_SIZE((((sizeof(struct mcx_cmdq_mailbox))+(((1 << 10))-1))/( (1 << 10)))*((1 << 10))); |
3392 | } |
3393 | |
3394 | return (0); |
3395 | } |
3396 | |
3397 | static uint32_t |
3398 | mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox *mb) |
3399 | { |
3400 | uint32_t xor = 0xff; |
3401 | |
3402 | /* only 3 fields get set, so mix them directly */ |
3403 | xor = mcx_mix_u64(xor, mb->mb_next_ptr); |
3404 | xor = mcx_mix_u32(xor, mb->mb_block_number); |
3405 | xor = mcx_mix_u8(xor, mb->mb_token); |
3406 | |
3407 | return (mcx_mix_done(xor)); |
3408 | } |
3409 | |
3410 | static void |
3411 | mcx_cmdq_mboxes_sign(struct mcx_dmamem *mxm, unsigned int nmb) |
3412 | { |
3413 | caddr_t kva; |
3414 | int i; |
3415 | |
3416 | kva = MCX_DMA_KVA(mxm)((void *)(mxm)->mxm_kva); |
3417 | |
3418 | for (i = 0; i < nmb; i++) { |
3419 | struct mcx_cmdq_mailbox *mb = (struct mcx_cmdq_mailbox *)kva; |
3420 | uint8_t sig = mcx_cmdq_mbox_ctrl_sig(mb); |
3421 | mb->mb_ctrl_signature = sig; |
3422 | mb->mb_signature = sig ^ |
3423 | mcx_xor(mb->mb_data, sizeof(mb->mb_data)); |
3424 | |
3425 | kva += MCX_CMDQ_MAILBOX_SIZE((((sizeof(struct mcx_cmdq_mailbox))+(((1 << 10))-1))/( (1 << 10)))*((1 << 10))); |
3426 | } |
3427 | } |
3428 | |
3429 | static void |
3430 | mcx_cmdq_mboxes_sync(struct mcx_softc *sc, struct mcx_dmamem *mxm, int ops) |
3431 | { |
3432 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(mxm),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mxm )->mxm_map)), (0), (((mxm)->mxm_size)), (ops)) |
3433 | 0, MCX_DMA_LEN(mxm), ops)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mxm )->mxm_map)), (0), (((mxm)->mxm_size)), (ops)); |
3434 | } |
3435 | |
3436 | static struct mcx_cmdq_mailbox * |
3437 | mcx_cq_mbox(struct mcx_dmamem *mxm, unsigned int i) |
3438 | { |
3439 | caddr_t kva; |
3440 | |
3441 | kva = MCX_DMA_KVA(mxm)((void *)(mxm)->mxm_kva); |
3442 | kva += i * MCX_CMDQ_MAILBOX_SIZE((((sizeof(struct mcx_cmdq_mailbox))+(((1 << 10))-1))/( (1 << 10)))*((1 << 10))); |
3443 | |
3444 | return ((struct mcx_cmdq_mailbox *)kva); |
3445 | } |
3446 | |
3447 | static inline void * |
3448 | mcx_cq_mbox_data(struct mcx_cmdq_mailbox *mb) |
3449 | { |
3450 | return (&mb->mb_data); |
3451 | } |
3452 | |
3453 | static void |
3454 | mcx_cmdq_mboxes_copyin(struct mcx_dmamem *mxm, unsigned int nmb, |
3455 | void *b, size_t len) |
3456 | { |
3457 | caddr_t buf = b; |
3458 | struct mcx_cmdq_mailbox *mb; |
3459 | int i; |
3460 | |
3461 | mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm)((void *)(mxm)->mxm_kva); |
3462 | for (i = 0; i < nmb; i++) { |
3463 | |
3464 | memcpy(mb->mb_data, buf, min(sizeof(mb->mb_data), len))__builtin_memcpy((mb->mb_data), (buf), (min(sizeof(mb-> mb_data), len))); |
3465 | |
3466 | if (sizeof(mb->mb_data) >= len) |
3467 | break; |
3468 | |
3469 | buf += sizeof(mb->mb_data); |
3470 | len -= sizeof(mb->mb_data); |
3471 | mb++; |
3472 | } |
3473 | } |
3474 | |
3475 | static void |
3476 | mcx_cmdq_mboxes_pas(struct mcx_dmamem *mxm, int offset, int npages, |
3477 | struct mcx_dmamem *buf) |
3478 | { |
3479 | uint64_t *pas; |
3480 | int mbox, mbox_pages, i; |
3481 | |
3482 | mbox = offset / MCX_CMDQ_MAILBOX_DATASIZE512; |
3483 | offset %= MCX_CMDQ_MAILBOX_DATASIZE512; |
3484 | |
3485 | pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox)); |
3486 | pas += (offset / sizeof(*pas)); |
3487 | mbox_pages = (MCX_CMDQ_MAILBOX_DATASIZE512 - offset) / sizeof(*pas); |
3488 | for (i = 0; i < npages; i++) { |
3489 | if (i == mbox_pages) { |
3490 | mbox++; |
3491 | pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox)); |
3492 | mbox_pages += MCX_CMDQ_MAILBOX_DATASIZE512 / sizeof(*pas); |
3493 | } |
3494 | *pas = htobe64(MCX_DMA_DVA(buf) + (i * MCX_PAGE_SIZE))(__uint64_t)(__builtin_constant_p(((buf)->mxm_map->dm_segs [0].ds_addr) + (i * (1 << 12))) ? (__uint64_t)((((__uint64_t )(((buf)->mxm_map->dm_segs[0].ds_addr) + (i * (1 << 12))) & 0xff) << 56) | ((__uint64_t)(((buf)->mxm_map ->dm_segs[0].ds_addr) + (i * (1 << 12))) & 0xff00ULL ) << 40 | ((__uint64_t)(((buf)->mxm_map->dm_segs[ 0].ds_addr) + (i * (1 << 12))) & 0xff0000ULL) << 24 | ((__uint64_t)(((buf)->mxm_map->dm_segs[0].ds_addr ) + (i * (1 << 12))) & 0xff000000ULL) << 8 | ( (__uint64_t)(((buf)->mxm_map->dm_segs[0].ds_addr) + (i * (1 << 12))) & 0xff00000000ULL) >> 8 | ((__uint64_t )(((buf)->mxm_map->dm_segs[0].ds_addr) + (i * (1 << 12))) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(( (buf)->mxm_map->dm_segs[0].ds_addr) + (i * (1 << 12 ))) & 0xff000000000000ULL) >> 40 | ((__uint64_t)((( buf)->mxm_map->dm_segs[0].ds_addr) + (i * (1 << 12 ))) & 0xff00000000000000ULL) >> 56) : __swap64md((( buf)->mxm_map->dm_segs[0].ds_addr) + (i * (1 << 12 )))); |
3495 | pas++; |
3496 | } |
3497 | } |
3498 | |
3499 | static void |
3500 | mcx_cmdq_mboxes_copyout(struct mcx_dmamem *mxm, int nmb, void *b, size_t len) |
3501 | { |
3502 | caddr_t buf = b; |
3503 | struct mcx_cmdq_mailbox *mb; |
3504 | int i; |
3505 | |
3506 | mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm)((void *)(mxm)->mxm_kva); |
3507 | for (i = 0; i < nmb; i++) { |
3508 | memcpy(buf, mb->mb_data, min(sizeof(mb->mb_data), len))__builtin_memcpy((buf), (mb->mb_data), (min(sizeof(mb-> mb_data), len))); |
3509 | |
3510 | if (sizeof(mb->mb_data) >= len) |
3511 | break; |
3512 | |
3513 | buf += sizeof(mb->mb_data); |
3514 | len -= sizeof(mb->mb_data); |
3515 | mb++; |
3516 | } |
3517 | } |
3518 | |
3519 | static void |
3520 | mcx_cq_mboxes_free(struct mcx_softc *sc, struct mcx_dmamem *mxm) |
3521 | { |
3522 | mcx_dmamem_free(sc, mxm); |
3523 | } |
3524 | |
3525 | #if 0 |
3526 | static void |
3527 | mcx_cmdq_dump(const struct mcx_cmdq_entry *cqe) |
3528 | { |
3529 | unsigned int i; |
3530 | |
3531 | printf(" type %02x, ilen %u, iptr %016llx", cqe->cq_type, |
3532 | bemtoh32(&cqe->cq_input_length)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&cqe-> cq_input_length)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t * )(&cqe->cq_input_length)) & 0xff) << 24 | (( __uint32_t)(*(__uint32_t *)(&cqe->cq_input_length)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(&cqe-> cq_input_length)) & 0xff0000) >> 8 | ((__uint32_t)( *(__uint32_t *)(&cqe->cq_input_length)) & 0xff000000 ) >> 24) : __swap32md(*(__uint32_t *)(&cqe->cq_input_length ))), bemtoh64(&cqe->cq_input_ptr)(__uint64_t)(__builtin_constant_p(*(__uint64_t *)(&cqe-> cq_input_ptr)) ? (__uint64_t)((((__uint64_t)(*(__uint64_t *)( &cqe->cq_input_ptr)) & 0xff) << 56) | ((__uint64_t )(*(__uint64_t *)(&cqe->cq_input_ptr)) & 0xff00ULL ) << 40 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_input_ptr )) & 0xff0000ULL) << 24 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_input_ptr)) & 0xff000000ULL) << 8 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_input_ptr) ) & 0xff00000000ULL) >> 8 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_input_ptr)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_input_ptr )) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_input_ptr)) & 0xff00000000000000ULL) >> 56) : __swap64md(*(__uint64_t *)(&cqe->cq_input_ptr)) )); |
3533 | |
3534 | printf(", idata "); |
3535 | for (i = 0; i < sizeof(cqe->cq_input_data); i++) |
3536 | printf("%02x", cqe->cq_input_data[i]); |
3537 | |
3538 | printf(", odata "); |
3539 | for (i = 0; i < sizeof(cqe->cq_output_data); i++) |
3540 | printf("%02x", cqe->cq_output_data[i]); |
3541 | |
3542 | printf(", optr %016llx, olen %u, token %02x, sig %02x, status %02x", |
3543 | bemtoh64(&cqe->cq_output_ptr)(__uint64_t)(__builtin_constant_p(*(__uint64_t *)(&cqe-> cq_output_ptr)) ? (__uint64_t)((((__uint64_t)(*(__uint64_t *) (&cqe->cq_output_ptr)) & 0xff) << 56) | ((__uint64_t )(*(__uint64_t *)(&cqe->cq_output_ptr)) & 0xff00ULL ) << 40 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_output_ptr )) & 0xff0000ULL) << 24 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_output_ptr)) & 0xff000000ULL) << 8 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_output_ptr )) & 0xff00000000ULL) >> 8 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_output_ptr)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_output_ptr )) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_output_ptr)) & 0xff00000000000000ULL) >> 56) : __swap64md(*(__uint64_t *)(&cqe->cq_output_ptr ))), bemtoh32(&cqe->cq_output_length)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&cqe-> cq_output_length)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t * )(&cqe->cq_output_length)) & 0xff) << 24 | ( (__uint32_t)(*(__uint32_t *)(&cqe->cq_output_length)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(&cqe-> cq_output_length)) & 0xff0000) >> 8 | ((__uint32_t) (*(__uint32_t *)(&cqe->cq_output_length)) & 0xff000000 ) >> 24) : __swap32md(*(__uint32_t *)(&cqe->cq_output_length ))), |
3544 | cqe->cq_token, cqe->cq_signature, cqe->cq_status); |
3545 | } |
3546 | |
3547 | static void |
3548 | mcx_cmdq_mbox_dump(struct mcx_dmamem *mboxes, int num) |
3549 | { |
3550 | int i, j; |
3551 | uint8_t *d; |
3552 | |
3553 | for (i = 0; i < num; i++) { |
3554 | struct mcx_cmdq_mailbox *mbox; |
3555 | mbox = mcx_cq_mbox(mboxes, i); |
3556 | |
3557 | d = mcx_cq_mbox_data(mbox); |
3558 | for (j = 0; j < MCX_CMDQ_MAILBOX_DATASIZE512; j++) { |
3559 | if (j != 0 && (j % 16 == 0)) |
3560 | printf("\n"); |
3561 | printf("%.2x ", d[j]); |
3562 | } |
3563 | } |
3564 | } |
3565 | #endif |
3566 | |
3567 | static int |
3568 | mcx_access_hca_reg(struct mcx_softc *sc, uint16_t reg, int op, void *data, |
3569 | int len, enum mcx_cmdq_slot slot) |
3570 | { |
3571 | struct mcx_dmamem mxm; |
3572 | struct mcx_cmdq_entry *cqe; |
3573 | struct mcx_cmd_access_reg_in *in; |
3574 | struct mcx_cmd_access_reg_out *out; |
3575 | uint8_t token = mcx_cmdq_token(sc); |
3576 | int error, nmb; |
3577 | |
3578 | cqe = mcx_get_cmdq_entry(sc, slot); |
3579 | if (cqe == NULL((void *)0)) |
3580 | return (-1); |
3581 | |
3582 | mcx_cmdq_init(sc, cqe, sizeof(*in) + len, sizeof(*out) + len, |
3583 | token); |
3584 | |
3585 | in = mcx_cmdq_in(cqe); |
3586 | in->cmd_opcode = htobe16(MCX_CMD_ACCESS_REG)(__uint16_t)(__builtin_constant_p(0x805) ? (__uint16_t)(((__uint16_t )(0x805) & 0xffU) << 8 | ((__uint16_t)(0x805) & 0xff00U) >> 8) : __swap16md(0x805)); |
3587 | in->cmd_op_mod = htobe16(op)(__uint16_t)(__builtin_constant_p(op) ? (__uint16_t)(((__uint16_t )(op) & 0xffU) << 8 | ((__uint16_t)(op) & 0xff00U ) >> 8) : __swap16md(op)); |
3588 | in->cmd_register_id = htobe16(reg)(__uint16_t)(__builtin_constant_p(reg) ? (__uint16_t)(((__uint16_t )(reg) & 0xffU) << 8 | ((__uint16_t)(reg) & 0xff00U ) >> 8) : __swap16md(reg)); |
3589 | |
3590 | nmb = howmany(len, MCX_CMDQ_MAILBOX_DATASIZE)(((len) + ((512) - 1)) / (512)); |
3591 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb, |
3592 | &cqe->cq_output_ptr, token) != 0) { |
3593 | printf(", unable to allocate access reg mailboxen\n"); |
3594 | return (-1); |
3595 | } |
3596 | cqe->cq_input_ptr = cqe->cq_output_ptr; |
3597 | mcx_cmdq_mboxes_copyin(&mxm, nmb, data, len); |
3598 | mcx_cmdq_mboxes_sign(&mxm, nmb); |
3599 | mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW(0x01|0x04)); |
3600 | |
3601 | error = mcx_cmdq_exec(sc, cqe, slot, 1000); |
3602 | mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW(0x02|0x08)); |
3603 | |
3604 | if (error != 0) { |
3605 | printf("%s: access reg (%s %x) timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
3606 | (op == MCX_REG_OP_WRITE0 ? "write" : "read"), reg); |
3607 | goto free; |
3608 | } |
3609 | error = mcx_cmdq_verify(cqe); |
3610 | if (error != 0) { |
3611 | printf("%s: access reg (%s %x) reply corrupt\n", |
3612 | (op == MCX_REG_OP_WRITE0 ? "write" : "read"), DEVNAME(sc)((sc)->sc_dev.dv_xname), |
3613 | reg); |
3614 | goto free; |
3615 | } |
3616 | |
3617 | out = mcx_cmdq_out(cqe); |
3618 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
3619 | printf("%s: access reg (%s %x) failed (%x, %.6x)\n", |
3620 | DEVNAME(sc)((sc)->sc_dev.dv_xname), (op == MCX_REG_OP_WRITE0 ? "write" : "read"), |
3621 | reg, out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
3622 | error = -1; |
3623 | goto free; |
3624 | } |
3625 | |
3626 | mcx_cmdq_mboxes_copyout(&mxm, nmb, data, len); |
3627 | free: |
3628 | mcx_dmamem_free(sc, &mxm); |
3629 | |
3630 | return (error); |
3631 | } |
3632 | |
3633 | static int |
3634 | mcx_set_issi(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, |
3635 | unsigned int slot) |
3636 | { |
3637 | struct mcx_cmd_set_issi_in *in; |
3638 | struct mcx_cmd_set_issi_out *out; |
3639 | uint8_t status; |
3640 | |
3641 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
3642 | |
3643 | in = mcx_cmdq_in(cqe); |
3644 | in->cmd_opcode = htobe16(MCX_CMD_SET_ISSI)(__uint16_t)(__builtin_constant_p(0x10b) ? (__uint16_t)(((__uint16_t )(0x10b) & 0xffU) << 8 | ((__uint16_t)(0x10b) & 0xff00U) >> 8) : __swap16md(0x10b)); |
3645 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
3646 | in->cmd_current_issi = htobe16(MCX_ISSI)(__uint16_t)(__builtin_constant_p(1) ? (__uint16_t)(((__uint16_t )(1) & 0xffU) << 8 | ((__uint16_t)(1) & 0xff00U ) >> 8) : __swap16md(1)); |
3647 | |
3648 | mcx_cmdq_post(sc, cqe, slot); |
3649 | if (mcx_cmdq_poll(sc, cqe, 1000) != 0) |
3650 | return (-1); |
3651 | if (mcx_cmdq_verify(cqe) != 0) |
3652 | return (-1); |
3653 | |
3654 | status = cqe->cq_output_data[0]; |
3655 | if (status != MCX_CQ_STATUS_OK(0x00 << 1)) |
3656 | return (-1); |
3657 | |
3658 | return (0); |
3659 | } |
3660 | |
3661 | static int |
3662 | mcx_issi(struct mcx_softc *sc) |
3663 | { |
3664 | struct mcx_dmamem mxm; |
3665 | struct mcx_cmdq_entry *cqe; |
3666 | struct mcx_cmd_query_issi_in *in; |
3667 | struct mcx_cmd_query_issi_il_out *out; |
3668 | struct mcx_cmd_query_issi_mb_out *mb; |
3669 | uint8_t token = mcx_cmdq_token(sc); |
3670 | uint8_t status; |
3671 | int error; |
3672 | |
3673 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
3674 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mb), token); |
3675 | |
3676 | in = mcx_cmdq_in(cqe); |
3677 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI)(__uint16_t)(__builtin_constant_p(0x10a) ? (__uint16_t)(((__uint16_t )(0x10a) & 0xffU) << 8 | ((__uint16_t)(0x10a) & 0xff00U) >> 8) : __swap16md(0x10a)); |
3678 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
3679 | |
3680 | CTASSERT(sizeof(*mb) <= MCX_CMDQ_MAILBOX_DATASIZE)extern char _ctassert[(sizeof(*mb) <= 512) ? 1 : -1 ] __attribute__ ((__unused__)); |
3681 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
3682 | &cqe->cq_output_ptr, token) != 0) { |
3683 | printf(", unable to allocate query issi mailbox\n"); |
3684 | return (-1); |
3685 | } |
3686 | mcx_cmdq_mboxes_sign(&mxm, 1); |
3687 | |
3688 | mcx_cmdq_post(sc, cqe, 0); |
3689 | error = mcx_cmdq_poll(sc, cqe, 1000); |
3690 | if (error != 0) { |
3691 | printf(", query issi timeout\n"); |
3692 | goto free; |
3693 | } |
3694 | error = mcx_cmdq_verify(cqe); |
3695 | if (error != 0) { |
3696 | printf(", query issi reply corrupt\n"); |
3697 | goto free; |
3698 | } |
3699 | |
3700 | status = cqe->cq_output_data[0]; |
3701 | switch (status) { |
3702 | case MCX_CQ_STATUS_OK(0x00 << 1): |
3703 | break; |
3704 | case MCX_CQ_STATUS_BAD_OPCODE(0x02 << 1): |
3705 | /* use ISSI 0 */ |
3706 | goto free; |
3707 | default: |
3708 | printf(", query issi failed (%x)\n", status); |
3709 | error = -1; |
3710 | goto free; |
3711 | } |
3712 | |
3713 | out = mcx_cmdq_out(cqe); |
3714 | if (out->cmd_current_issi == htobe16(MCX_ISSI)(__uint16_t)(__builtin_constant_p(1) ? (__uint16_t)(((__uint16_t )(1) & 0xffU) << 8 | ((__uint16_t)(1) & 0xff00U ) >> 8) : __swap16md(1))) { |
3715 | /* use ISSI 1 */ |
3716 | goto free; |
3717 | } |
3718 | |
3719 | /* don't need to read cqe anymore, can be used for SET ISSI */ |
3720 | |
3721 | mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
3722 | CTASSERT(MCX_ISSI < NBBY)extern char _ctassert[(1 < 8) ? 1 : -1 ] __attribute__((__unused__ )); |
3723 | /* XXX math is hard */ |
3724 | if (!ISSET(mb->cmd_supported_issi[79], 1 << MCX_ISSI)((mb->cmd_supported_issi[79]) & (1 << 1))) { |
3725 | /* use ISSI 0 */ |
3726 | goto free; |
3727 | } |
3728 | |
3729 | if (mcx_set_issi(sc, cqe, 0) != 0) { |
3730 | /* ignore the error, just use ISSI 0 */ |
3731 | } else { |
3732 | /* use ISSI 1 */ |
3733 | } |
3734 | |
3735 | free: |
3736 | mcx_cq_mboxes_free(sc, &mxm); |
3737 | return (error); |
3738 | } |
3739 | |
3740 | static int |
3741 | mcx_query_pages(struct mcx_softc *sc, uint16_t type, |
3742 | int32_t *npages, uint16_t *func_id) |
3743 | { |
3744 | struct mcx_cmdq_entry *cqe; |
3745 | struct mcx_cmd_query_pages_in *in; |
3746 | struct mcx_cmd_query_pages_out *out; |
3747 | |
3748 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
3749 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
3750 | |
3751 | in = mcx_cmdq_in(cqe); |
3752 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_PAGES)(__uint16_t)(__builtin_constant_p(0x107) ? (__uint16_t)(((__uint16_t )(0x107) & 0xffU) << 8 | ((__uint16_t)(0x107) & 0xff00U) >> 8) : __swap16md(0x107)); |
3753 | in->cmd_op_mod = type; |
3754 | |
3755 | mcx_cmdq_post(sc, cqe, 0); |
3756 | if (mcx_cmdq_poll(sc, cqe, 1000) != 0) { |
3757 | printf(", query pages timeout\n"); |
3758 | return (-1); |
3759 | } |
3760 | if (mcx_cmdq_verify(cqe) != 0) { |
3761 | printf(", query pages reply corrupt\n"); |
3762 | return (-1); |
3763 | } |
3764 | |
3765 | out = mcx_cmdq_out(cqe); |
3766 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
3767 | printf(", query pages failed (%x)\n", out->cmd_status); |
3768 | return (-1); |
3769 | } |
3770 | |
3771 | *func_id = out->cmd_func_id; |
3772 | *npages = bemtoh32(&out->cmd_num_pages)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&out-> cmd_num_pages)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)( &out->cmd_num_pages)) & 0xff) << 24 | ((__uint32_t )(*(__uint32_t *)(&out->cmd_num_pages)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(&out->cmd_num_pages )) & 0xff0000) >> 8 | ((__uint32_t)(*(__uint32_t *) (&out->cmd_num_pages)) & 0xff000000) >> 24) : __swap32md(*(__uint32_t *)(&out->cmd_num_pages))); |
3773 | |
3774 | return (0); |
3775 | } |
3776 | |
3777 | struct bus_dma_iter { |
3778 | bus_dmamap_t i_map; |
3779 | bus_size_t i_offset; |
3780 | unsigned int i_index; |
3781 | }; |
3782 | |
3783 | static void |
3784 | bus_dma_iter_init(struct bus_dma_iter *i, bus_dmamap_t map) |
3785 | { |
3786 | i->i_map = map; |
3787 | i->i_offset = 0; |
3788 | i->i_index = 0; |
3789 | } |
3790 | |
3791 | static bus_addr_t |
3792 | bus_dma_iter_addr(struct bus_dma_iter *i) |
3793 | { |
3794 | return (i->i_map->dm_segs[i->i_index].ds_addr + i->i_offset); |
3795 | } |
3796 | |
3797 | static void |
3798 | bus_dma_iter_add(struct bus_dma_iter *i, bus_size_t size) |
3799 | { |
3800 | bus_dma_segment_t *seg = i->i_map->dm_segs + i->i_index; |
3801 | bus_size_t diff; |
3802 | |
3803 | do { |
3804 | diff = seg->ds_len - i->i_offset; |
3805 | if (size < diff) |
3806 | break; |
3807 | |
3808 | size -= diff; |
3809 | |
3810 | seg++; |
3811 | |
3812 | i->i_offset = 0; |
3813 | i->i_index++; |
3814 | } while (size > 0); |
3815 | |
3816 | i->i_offset += size; |
3817 | } |
3818 | |
3819 | static int |
3820 | mcx_add_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t func_id) |
3821 | { |
3822 | struct mcx_dmamem mxm; |
3823 | struct mcx_cmdq_entry *cqe; |
3824 | struct mcx_cmd_manage_pages_in *in; |
3825 | struct mcx_cmd_manage_pages_out *out; |
3826 | unsigned int paslen, nmb, i, j, npages; |
3827 | struct bus_dma_iter iter; |
3828 | uint64_t *pas; |
3829 | uint8_t status; |
3830 | uint8_t token = mcx_cmdq_token(sc); |
3831 | int error; |
3832 | |
3833 | npages = mhm->mhm_npages; |
3834 | |
3835 | paslen = sizeof(*pas) * npages; |
3836 | nmb = howmany(paslen, MCX_CMDQ_MAILBOX_DATASIZE)(((paslen) + ((512) - 1)) / (512)); |
3837 | |
3838 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
3839 | mcx_cmdq_init(sc, cqe, sizeof(*in) + paslen, sizeof(*out), token); |
3840 | |
3841 | in = mcx_cmdq_in(cqe); |
3842 | in->cmd_opcode = htobe16(MCX_CMD_MANAGE_PAGES)(__uint16_t)(__builtin_constant_p(0x108) ? (__uint16_t)(((__uint16_t )(0x108) & 0xffU) << 8 | ((__uint16_t)(0x108) & 0xff00U) >> 8) : __swap16md(0x108)); |
3843 | in->cmd_op_mod = htobe16(MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS)(__uint16_t)(__builtin_constant_p(0x01) ? (__uint16_t)(((__uint16_t )(0x01) & 0xffU) << 8 | ((__uint16_t)(0x01) & 0xff00U ) >> 8) : __swap16md(0x01)); |
3844 | in->cmd_func_id = func_id; |
3845 | htobem32(&in->cmd_input_num_entries, npages)(*(__uint32_t *)(&in->cmd_input_num_entries) = (__uint32_t )(__builtin_constant_p(npages) ? (__uint32_t)(((__uint32_t)(npages ) & 0xff) << 24 | ((__uint32_t)(npages) & 0xff00 ) << 8 | ((__uint32_t)(npages) & 0xff0000) >> 8 | ((__uint32_t)(npages) & 0xff000000) >> 24) : __swap32md (npages))); |
3846 | |
3847 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb, |
3848 | &cqe->cq_input_ptr, token) != 0) { |
3849 | printf(", unable to allocate manage pages mailboxen\n"); |
3850 | return (-1); |
3851 | } |
3852 | |
3853 | bus_dma_iter_init(&iter, mhm->mhm_map); |
3854 | for (i = 0; i < nmb; i++) { |
3855 | unsigned int lim; |
3856 | |
3857 | pas = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, i)); |
3858 | lim = min(MCX_CMDQ_MAILBOX_DATASIZE512 / sizeof(*pas), npages); |
3859 | |
3860 | for (j = 0; j < lim; j++) { |
3861 | htobem64(&pas[j], bus_dma_iter_addr(&iter))(*(__uint64_t *)(&pas[j]) = (__uint64_t)(__builtin_constant_p (bus_dma_iter_addr(&iter)) ? (__uint64_t)((((__uint64_t)( bus_dma_iter_addr(&iter)) & 0xff) << 56) | ((__uint64_t )(bus_dma_iter_addr(&iter)) & 0xff00ULL) << 40 | ((__uint64_t)(bus_dma_iter_addr(&iter)) & 0xff0000ULL ) << 24 | ((__uint64_t)(bus_dma_iter_addr(&iter)) & 0xff000000ULL) << 8 | ((__uint64_t)(bus_dma_iter_addr( &iter)) & 0xff00000000ULL) >> 8 | ((__uint64_t) (bus_dma_iter_addr(&iter)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(bus_dma_iter_addr(&iter)) & 0xff000000000000ULL ) >> 40 | ((__uint64_t)(bus_dma_iter_addr(&iter)) & 0xff00000000000000ULL) >> 56) : __swap64md(bus_dma_iter_addr (&iter)))); |
3862 | bus_dma_iter_add(&iter, MCX_PAGE_SIZE(1 << 12)); |
3863 | } |
3864 | |
3865 | npages -= lim; |
3866 | } |
3867 | |
3868 | mcx_cmdq_mboxes_sign(&mxm, nmb); |
3869 | |
3870 | mcx_cmdq_post(sc, cqe, 0); |
3871 | error = mcx_cmdq_poll(sc, cqe, 1000); |
3872 | if (error != 0) { |
3873 | printf(", manage pages timeout\n"); |
3874 | goto free; |
3875 | } |
3876 | error = mcx_cmdq_verify(cqe); |
3877 | if (error != 0) { |
3878 | printf(", manage pages reply corrupt\n"); |
3879 | goto free; |
3880 | } |
3881 | |
3882 | status = cqe->cq_output_data[0]; |
3883 | if (status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
3884 | printf(", manage pages failed (%x)\n", status); |
3885 | error = -1; |
3886 | goto free; |
3887 | } |
3888 | |
3889 | free: |
3890 | mcx_dmamem_free(sc, &mxm); |
3891 | |
3892 | return (error); |
3893 | } |
3894 | |
3895 | static int |
3896 | mcx_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t type) |
3897 | { |
3898 | int32_t npages; |
3899 | uint16_t func_id; |
3900 | |
3901 | if (mcx_query_pages(sc, type, &npages, &func_id) != 0) { |
3902 | /* error printed by mcx_query_pages */ |
3903 | return (-1); |
3904 | } |
3905 | |
3906 | if (npages < 1) |
3907 | return (0); |
3908 | |
3909 | if (mcx_hwmem_alloc(sc, mhm, npages) != 0) { |
3910 | printf(", unable to allocate hwmem\n"); |
3911 | return (-1); |
3912 | } |
3913 | |
3914 | if (mcx_add_pages(sc, mhm, func_id) != 0) { |
3915 | printf(", unable to add hwmem\n"); |
3916 | goto free; |
3917 | } |
3918 | |
3919 | return (0); |
3920 | |
3921 | free: |
3922 | mcx_hwmem_free(sc, mhm); |
3923 | |
3924 | return (-1); |
3925 | } |
3926 | |
3927 | static int |
3928 | mcx_hca_max_caps(struct mcx_softc *sc) |
3929 | { |
3930 | struct mcx_dmamem mxm; |
3931 | struct mcx_cmdq_entry *cqe; |
3932 | struct mcx_cmd_query_hca_cap_in *in; |
3933 | struct mcx_cmd_query_hca_cap_out *out; |
3934 | struct mcx_cmdq_mailbox *mb; |
3935 | struct mcx_cap_device *hca; |
3936 | uint8_t status; |
3937 | uint8_t token = mcx_cmdq_token(sc); |
3938 | int error; |
3939 | |
3940 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
3941 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN0x1000, |
3942 | token); |
3943 | |
3944 | in = mcx_cmdq_in(cqe); |
3945 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP)(__uint16_t)(__builtin_constant_p(0x100) ? (__uint16_t)(((__uint16_t )(0x100) & 0xffU) << 8 | ((__uint16_t)(0x100) & 0xff00U) >> 8) : __swap16md(0x100)); |
3946 | in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_MAX |(__uint16_t)(__builtin_constant_p((0x0 << 0) | (0x0 << 1)) ? (__uint16_t)(((__uint16_t)((0x0 << 0) | (0x0 << 1)) & 0xffU) << 8 | ((__uint16_t)((0x0 << 0) | (0x0 << 1)) & 0xff00U) >> 8) : __swap16md( (0x0 << 0) | (0x0 << 1))) |
3947 | MCX_CMD_QUERY_HCA_CAP_DEVICE)(__uint16_t)(__builtin_constant_p((0x0 << 0) | (0x0 << 1)) ? (__uint16_t)(((__uint16_t)((0x0 << 0) | (0x0 << 1)) & 0xffU) << 8 | ((__uint16_t)((0x0 << 0) | (0x0 << 1)) & 0xff00U) >> 8) : __swap16md( (0x0 << 0) | (0x0 << 1))); |
3948 | |
3949 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES(0x1000 / 512), |
3950 | &cqe->cq_output_ptr, token) != 0) { |
3951 | printf(", unable to allocate query hca caps mailboxen\n"); |
3952 | return (-1); |
3953 | } |
3954 | mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES(0x1000 / 512)); |
3955 | mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW(0x01|0x04)); |
3956 | |
3957 | mcx_cmdq_post(sc, cqe, 0); |
3958 | error = mcx_cmdq_poll(sc, cqe, 1000); |
3959 | mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW(0x02|0x08)); |
3960 | |
3961 | if (error != 0) { |
3962 | printf(", query hca caps timeout\n"); |
3963 | goto free; |
3964 | } |
3965 | error = mcx_cmdq_verify(cqe); |
3966 | if (error != 0) { |
3967 | printf(", query hca caps reply corrupt\n"); |
3968 | goto free; |
3969 | } |
3970 | |
3971 | status = cqe->cq_output_data[0]; |
3972 | if (status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
3973 | printf(", query hca caps failed (%x)\n", status); |
3974 | error = -1; |
3975 | goto free; |
3976 | } |
3977 | |
3978 | mb = mcx_cq_mbox(&mxm, 0); |
3979 | hca = mcx_cq_mbox_data(mb); |
3980 | |
3981 | if ((hca->port_type & MCX_CAP_DEVICE_PORT_TYPE0x03) |
3982 | != MCX_CAP_DEVICE_PORT_TYPE_ETH0x01) { |
3983 | printf(", not in ethernet mode\n"); |
3984 | error = -1; |
3985 | goto free; |
3986 | } |
3987 | if (hca->log_pg_sz > PAGE_SHIFT12) { |
3988 | printf(", minimum system page shift %u is too large\n", |
3989 | hca->log_pg_sz); |
3990 | error = -1; |
3991 | goto free; |
3992 | } |
3993 | /* |
3994 | * blueflame register is split into two buffers, and we must alternate |
3995 | * between the two of them. |
3996 | */ |
3997 | sc->sc_bf_size = (1 << hca->log_bf_reg_size) / 2; |
3998 | sc->sc_max_rqt_size = (1 << hca->log_max_rqt_size); |
3999 | |
4000 | if (hca->local_ca_ack_delay & MCX_CAP_DEVICE_MCAM_REG0x40) |
4001 | sc->sc_mcam_reg = 1; |
4002 | |
4003 | sc->sc_mhz = bemtoh32(&hca->device_frequency_mhz)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&hca-> device_frequency_mhz)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&hca->device_frequency_mhz)) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(&hca->device_frequency_mhz )) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(& hca->device_frequency_mhz)) & 0xff0000) >> 8 | ( (__uint32_t)(*(__uint32_t *)(&hca->device_frequency_mhz )) & 0xff000000) >> 24) : __swap32md(*(__uint32_t * )(&hca->device_frequency_mhz))); |
4004 | sc->sc_khz = bemtoh32(&hca->device_frequency_khz)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&hca-> device_frequency_khz)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&hca->device_frequency_khz)) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(&hca->device_frequency_khz )) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(& hca->device_frequency_khz)) & 0xff0000) >> 8 | ( (__uint32_t)(*(__uint32_t *)(&hca->device_frequency_khz )) & 0xff000000) >> 24) : __swap32md(*(__uint32_t * )(&hca->device_frequency_khz))); |
4005 | |
4006 | free: |
4007 | mcx_dmamem_free(sc, &mxm); |
4008 | |
4009 | return (error); |
4010 | } |
4011 | |
4012 | static int |
4013 | mcx_hca_set_caps(struct mcx_softc *sc) |
4014 | { |
4015 | struct mcx_dmamem mxm; |
4016 | struct mcx_cmdq_entry *cqe; |
4017 | struct mcx_cmd_query_hca_cap_in *in; |
4018 | struct mcx_cmd_query_hca_cap_out *out; |
4019 | struct mcx_cmdq_mailbox *mb; |
4020 | struct mcx_cap_device *hca; |
4021 | uint8_t status; |
4022 | uint8_t token = mcx_cmdq_token(sc); |
4023 | int error; |
4024 | |
4025 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4026 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN0x1000, |
4027 | token); |
4028 | |
4029 | in = mcx_cmdq_in(cqe); |
4030 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP)(__uint16_t)(__builtin_constant_p(0x100) ? (__uint16_t)(((__uint16_t )(0x100) & 0xffU) << 8 | ((__uint16_t)(0x100) & 0xff00U) >> 8) : __swap16md(0x100)); |
4031 | in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_CURRENT |(__uint16_t)(__builtin_constant_p((0x1 << 0) | (0x0 << 1)) ? (__uint16_t)(((__uint16_t)((0x1 << 0) | (0x0 << 1)) & 0xffU) << 8 | ((__uint16_t)((0x1 << 0) | (0x0 << 1)) & 0xff00U) >> 8) : __swap16md( (0x1 << 0) | (0x0 << 1))) |
4032 | MCX_CMD_QUERY_HCA_CAP_DEVICE)(__uint16_t)(__builtin_constant_p((0x1 << 0) | (0x0 << 1)) ? (__uint16_t)(((__uint16_t)((0x1 << 0) | (0x0 << 1)) & 0xffU) << 8 | ((__uint16_t)((0x1 << 0) | (0x0 << 1)) & 0xff00U) >> 8) : __swap16md( (0x1 << 0) | (0x0 << 1))); |
4033 | |
4034 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES(0x1000 / 512), |
4035 | &cqe->cq_output_ptr, token) != 0) { |
4036 | printf(", unable to allocate manage pages mailboxen\n"); |
4037 | return (-1); |
4038 | } |
4039 | mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES(0x1000 / 512)); |
4040 | mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW(0x01|0x04)); |
4041 | |
4042 | mcx_cmdq_post(sc, cqe, 0); |
4043 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4044 | mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW(0x02|0x08)); |
4045 | |
4046 | if (error != 0) { |
4047 | printf(", query hca caps timeout\n"); |
4048 | goto free; |
4049 | } |
4050 | error = mcx_cmdq_verify(cqe); |
4051 | if (error != 0) { |
4052 | printf(", query hca caps reply corrupt\n"); |
4053 | goto free; |
4054 | } |
4055 | |
4056 | status = cqe->cq_output_data[0]; |
4057 | if (status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4058 | printf(", query hca caps failed (%x)\n", status); |
4059 | error = -1; |
4060 | goto free; |
4061 | } |
4062 | |
4063 | mb = mcx_cq_mbox(&mxm, 0); |
4064 | hca = mcx_cq_mbox_data(mb); |
4065 | |
4066 | hca->log_pg_sz = PAGE_SHIFT12; |
4067 | |
4068 | free: |
4069 | mcx_dmamem_free(sc, &mxm); |
4070 | |
4071 | return (error); |
4072 | } |
4073 | |
4074 | |
4075 | static int |
4076 | mcx_init_hca(struct mcx_softc *sc) |
4077 | { |
4078 | struct mcx_cmdq_entry *cqe; |
4079 | struct mcx_cmd_init_hca_in *in; |
4080 | struct mcx_cmd_init_hca_out *out; |
4081 | int error; |
4082 | uint8_t status; |
4083 | |
4084 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4085 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
4086 | |
4087 | in = mcx_cmdq_in(cqe); |
4088 | in->cmd_opcode = htobe16(MCX_CMD_INIT_HCA)(__uint16_t)(__builtin_constant_p(0x102) ? (__uint16_t)(((__uint16_t )(0x102) & 0xffU) << 8 | ((__uint16_t)(0x102) & 0xff00U) >> 8) : __swap16md(0x102)); |
4089 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4090 | |
4091 | mcx_cmdq_post(sc, cqe, 0); |
4092 | |
4093 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4094 | if (error != 0) { |
4095 | printf(", hca init timeout\n"); |
4096 | return (-1); |
4097 | } |
4098 | if (mcx_cmdq_verify(cqe) != 0) { |
4099 | printf(", hca init command corrupt\n"); |
4100 | return (-1); |
4101 | } |
4102 | |
4103 | status = cqe->cq_output_data[0]; |
4104 | if (status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4105 | printf(", hca init failed (%x)\n", status); |
4106 | return (-1); |
4107 | } |
4108 | |
4109 | return (0); |
4110 | } |
4111 | |
4112 | static int |
4113 | mcx_set_driver_version(struct mcx_softc *sc) |
4114 | { |
4115 | struct mcx_dmamem mxm; |
4116 | struct mcx_cmdq_entry *cqe; |
4117 | struct mcx_cmd_set_driver_version_in *in; |
4118 | struct mcx_cmd_set_driver_version_out *out; |
4119 | int error; |
4120 | int token; |
4121 | uint8_t status; |
4122 | |
4123 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4124 | token = mcx_cmdq_token(sc); |
4125 | mcx_cmdq_init(sc, cqe, sizeof(*in) + |
4126 | sizeof(struct mcx_cmd_set_driver_version), sizeof(*out), token); |
4127 | |
4128 | in = mcx_cmdq_in(cqe); |
4129 | in->cmd_opcode = htobe16(MCX_CMD_SET_DRIVER_VERSION)(__uint16_t)(__builtin_constant_p(0x10d) ? (__uint16_t)(((__uint16_t )(0x10d) & 0xffU) << 8 | ((__uint16_t)(0x10d) & 0xff00U) >> 8) : __swap16md(0x10d)); |
4130 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4131 | |
4132 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
4133 | &cqe->cq_input_ptr, token) != 0) { |
4134 | printf(", unable to allocate set driver version mailboxen\n"); |
4135 | return (-1); |
4136 | } |
4137 | strlcpy(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)), |
4138 | "OpenBSD,mcx,1.000.000000", MCX_CMDQ_MAILBOX_DATASIZE512); |
4139 | |
4140 | mcx_cmdq_mboxes_sign(&mxm, 1); |
4141 | mcx_cmdq_post(sc, cqe, 0); |
4142 | |
4143 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4144 | if (error != 0) { |
4145 | printf(", set driver version timeout\n"); |
4146 | goto free; |
4147 | } |
4148 | if (mcx_cmdq_verify(cqe) != 0) { |
4149 | printf(", set driver version command corrupt\n"); |
4150 | goto free; |
4151 | } |
4152 | |
4153 | status = cqe->cq_output_data[0]; |
4154 | if (status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4155 | printf(", set driver version failed (%x)\n", status); |
4156 | error = -1; |
4157 | goto free; |
4158 | } |
4159 | |
4160 | free: |
4161 | mcx_dmamem_free(sc, &mxm); |
4162 | |
4163 | return (error); |
4164 | } |
4165 | |
4166 | static int |
4167 | mcx_iff(struct mcx_softc *sc) |
4168 | { |
4169 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
4170 | struct mcx_dmamem mxm; |
4171 | struct mcx_cmdq_entry *cqe; |
4172 | struct mcx_cmd_modify_nic_vport_context_in *in; |
4173 | struct mcx_cmd_modify_nic_vport_context_out *out; |
4174 | struct mcx_nic_vport_ctx *ctx; |
4175 | int error; |
4176 | int token; |
4177 | int insize; |
4178 | uint32_t dest; |
4179 | |
4180 | dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE(1 << 24) | |
4181 | sc->sc_rss_flow_table_id; |
4182 | |
4183 | /* enable or disable the promisc flow */ |
4184 | if (ISSET(ifp->if_flags, IFF_PROMISC)((ifp->if_flags) & (0x100))) { |
4185 | if (sc->sc_promisc_flow_enabled == 0) { |
4186 | mcx_set_flow_table_entry_mac(sc, |
4187 | MCX_FLOW_GROUP_PROMISC0, 0, NULL((void *)0), dest); |
4188 | sc->sc_promisc_flow_enabled = 1; |
4189 | } |
4190 | } else if (sc->sc_promisc_flow_enabled != 0) { |
4191 | mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC0, 0); |
4192 | sc->sc_promisc_flow_enabled = 0; |
4193 | } |
4194 | |
4195 | /* enable or disable the all-multicast flow */ |
4196 | if (ISSET(ifp->if_flags, IFF_ALLMULTI)((ifp->if_flags) & (0x200))) { |
4197 | if (sc->sc_allmulti_flow_enabled == 0) { |
4198 | uint8_t mcast[ETHER_ADDR_LEN6]; |
4199 | |
4200 | memset(mcast, 0, sizeof(mcast))__builtin_memset((mcast), (0), (sizeof(mcast))); |
4201 | mcast[0] = 0x01; |
4202 | mcx_set_flow_table_entry_mac(sc, |
4203 | MCX_FLOW_GROUP_ALLMULTI1, 0, mcast, dest); |
4204 | sc->sc_allmulti_flow_enabled = 1; |
4205 | } |
4206 | } else if (sc->sc_allmulti_flow_enabled != 0) { |
4207 | mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI1, 0); |
4208 | sc->sc_allmulti_flow_enabled = 0; |
4209 | } |
4210 | |
4211 | insize = sizeof(struct mcx_nic_vport_ctx) + 240; |
4212 | |
4213 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4214 | token = mcx_cmdq_token(sc); |
4215 | mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token); |
4216 | |
4217 | in = mcx_cmdq_in(cqe); |
4218 | in->cmd_opcode = htobe16(MCX_CMD_MODIFY_NIC_VPORT_CONTEXT)(__uint16_t)(__builtin_constant_p(0x755) ? (__uint16_t)(((__uint16_t )(0x755) & 0xffU) << 8 | ((__uint16_t)(0x755) & 0xff00U) >> 8) : __swap16md(0x755)); |
4219 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4220 | in->cmd_field_select = htobe32((__uint32_t)(__builtin_constant_p(0x10 | 0x40) ? (__uint32_t) (((__uint32_t)(0x10 | 0x40) & 0xff) << 24 | ((__uint32_t )(0x10 | 0x40) & 0xff00) << 8 | ((__uint32_t)(0x10 | 0x40) & 0xff0000) >> 8 | ((__uint32_t)(0x10 | 0x40 ) & 0xff000000) >> 24) : __swap32md(0x10 | 0x40)) |
4221 | MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC |(__uint32_t)(__builtin_constant_p(0x10 | 0x40) ? (__uint32_t) (((__uint32_t)(0x10 | 0x40) & 0xff) << 24 | ((__uint32_t )(0x10 | 0x40) & 0xff00) << 8 | ((__uint32_t)(0x10 | 0x40) & 0xff0000) >> 8 | ((__uint32_t)(0x10 | 0x40 ) & 0xff000000) >> 24) : __swap32md(0x10 | 0x40)) |
4222 | MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU)(__uint32_t)(__builtin_constant_p(0x10 | 0x40) ? (__uint32_t) (((__uint32_t)(0x10 | 0x40) & 0xff) << 24 | ((__uint32_t )(0x10 | 0x40) & 0xff00) << 8 | ((__uint32_t)(0x10 | 0x40) & 0xff0000) >> 8 | ((__uint32_t)(0x10 | 0x40 ) & 0xff000000) >> 24) : __swap32md(0x10 | 0x40)); |
4223 | |
4224 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) { |
4225 | printf(", unable to allocate modify " |
4226 | "nic vport context mailboxen\n"); |
4227 | return (-1); |
4228 | } |
4229 | ctx = (struct mcx_nic_vport_ctx *) |
4230 | (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 240); |
4231 | ctx->vp_mtu = htobe32(sc->sc_hardmtu)(__uint32_t)(__builtin_constant_p(sc->sc_hardmtu) ? (__uint32_t )(((__uint32_t)(sc->sc_hardmtu) & 0xff) << 24 | ( (__uint32_t)(sc->sc_hardmtu) & 0xff00) << 8 | (( __uint32_t)(sc->sc_hardmtu) & 0xff0000) >> 8 | ( (__uint32_t)(sc->sc_hardmtu) & 0xff000000) >> 24 ) : __swap32md(sc->sc_hardmtu)); |
4232 | /* |
4233 | * always leave promisc-all enabled on the vport since we |
4234 | * can't give it a vlan list, and we're already doing multicast |
4235 | * filtering in the flow table. |
4236 | */ |
4237 | ctx->vp_flags = htobe16(MCX_NIC_VPORT_CTX_PROMISC_ALL)(__uint16_t)(__builtin_constant_p((1 << 13)) ? (__uint16_t )(((__uint16_t)((1 << 13)) & 0xffU) << 8 | (( __uint16_t)((1 << 13)) & 0xff00U) >> 8) : __swap16md ((1 << 13))); |
4238 | |
4239 | mcx_cmdq_mboxes_sign(&mxm, 1); |
4240 | mcx_cmdq_post(sc, cqe, 0); |
4241 | |
4242 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4243 | if (error != 0) { |
4244 | printf(", modify nic vport context timeout\n"); |
4245 | goto free; |
4246 | } |
4247 | if (mcx_cmdq_verify(cqe) != 0) { |
4248 | printf(", modify nic vport context command corrupt\n"); |
4249 | goto free; |
4250 | } |
4251 | |
4252 | out = mcx_cmdq_out(cqe); |
4253 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4254 | printf(", modify nic vport context failed (%x, %x)\n", |
4255 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
4256 | error = -1; |
4257 | goto free; |
4258 | } |
4259 | |
4260 | free: |
4261 | mcx_dmamem_free(sc, &mxm); |
4262 | |
4263 | return (error); |
4264 | } |
4265 | |
4266 | static int |
4267 | mcx_alloc_uar(struct mcx_softc *sc, int *uar) |
4268 | { |
4269 | struct mcx_cmdq_entry *cqe; |
4270 | struct mcx_cmd_alloc_uar_in *in; |
4271 | struct mcx_cmd_alloc_uar_out *out; |
4272 | int error; |
4273 | |
4274 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4275 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
4276 | |
4277 | in = mcx_cmdq_in(cqe); |
4278 | in->cmd_opcode = htobe16(MCX_CMD_ALLOC_UAR)(__uint16_t)(__builtin_constant_p(0x802) ? (__uint16_t)(((__uint16_t )(0x802) & 0xffU) << 8 | ((__uint16_t)(0x802) & 0xff00U) >> 8) : __swap16md(0x802)); |
4279 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4280 | |
4281 | mcx_cmdq_post(sc, cqe, 0); |
4282 | |
4283 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4284 | if (error != 0) { |
4285 | printf(", alloc uar timeout\n"); |
4286 | return (-1); |
4287 | } |
4288 | if (mcx_cmdq_verify(cqe) != 0) { |
4289 | printf(", alloc uar command corrupt\n"); |
4290 | return (-1); |
4291 | } |
4292 | |
4293 | out = mcx_cmdq_out(cqe); |
4294 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4295 | printf(", alloc uar failed (%x)\n", out->cmd_status); |
4296 | return (-1); |
4297 | } |
4298 | |
4299 | *uar = mcx_get_id(out->cmd_uar); |
4300 | return (0); |
4301 | } |
4302 | |
4303 | static int |
4304 | mcx_create_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar, |
4305 | uint64_t events, int vector) |
4306 | { |
4307 | struct mcx_cmdq_entry *cqe; |
4308 | struct mcx_dmamem mxm; |
4309 | struct mcx_cmd_create_eq_in *in; |
4310 | struct mcx_cmd_create_eq_mb_in *mbin; |
4311 | struct mcx_cmd_create_eq_out *out; |
4312 | struct mcx_eq_entry *eqe; |
4313 | int error; |
4314 | uint64_t *pas; |
4315 | int insize, npages, paslen, i, token; |
4316 | |
4317 | eq->eq_cons = 0; |
4318 | |
4319 | npages = howmany((1 << MCX_LOG_EQ_SIZE) * sizeof(struct mcx_eq_entry),((((1 << 7) * sizeof(struct mcx_eq_entry)) + (((1 << 12)) - 1)) / ((1 << 12))) |
4320 | MCX_PAGE_SIZE)((((1 << 7) * sizeof(struct mcx_eq_entry)) + (((1 << 12)) - 1)) / ((1 << 12))); |
4321 | paslen = npages * sizeof(*pas); |
4322 | insize = sizeof(struct mcx_cmd_create_eq_mb_in) + paslen; |
4323 | |
4324 | if (mcx_dmamem_alloc(sc, &eq->eq_mem, npages * MCX_PAGE_SIZE(1 << 12), |
4325 | MCX_PAGE_SIZE(1 << 12)) != 0) { |
4326 | printf(", unable to allocate event queue memory\n"); |
4327 | return (-1); |
4328 | } |
4329 | |
4330 | eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem)((void *)(&eq->eq_mem)->mxm_kva); |
4331 | for (i = 0; i < (1 << MCX_LOG_EQ_SIZE7); i++) { |
4332 | eqe[i].eq_owner = MCX_EQ_ENTRY_OWNER_INIT1; |
4333 | } |
4334 | |
4335 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4336 | token = mcx_cmdq_token(sc); |
4337 | mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token); |
4338 | |
4339 | in = mcx_cmdq_in(cqe); |
4340 | in->cmd_opcode = htobe16(MCX_CMD_CREATE_EQ)(__uint16_t)(__builtin_constant_p(0x301) ? (__uint16_t)(((__uint16_t )(0x301) & 0xffU) << 8 | ((__uint16_t)(0x301) & 0xff00U) >> 8) : __swap16md(0x301)); |
4341 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4342 | |
4343 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, |
4344 | howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE)(((insize) + ((512) - 1)) / (512)), |
4345 | &cqe->cq_input_ptr, token) != 0) { |
4346 | printf(", unable to allocate create eq mailboxen\n"); |
4347 | goto free_eq; |
4348 | } |
4349 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
4350 | mbin->cmd_eq_ctx.eq_uar_size = htobe32((__uint32_t)(__builtin_constant_p((7 << 24) | uar) ? (__uint32_t )(((__uint32_t)((7 << 24) | uar) & 0xff) << 24 | ((__uint32_t)((7 << 24) | uar) & 0xff00) << 8 | ((__uint32_t)((7 << 24) | uar) & 0xff0000) >> 8 | ((__uint32_t)((7 << 24) | uar) & 0xff000000) >> 24) : __swap32md((7 << 24) | uar)) |
4351 | (MCX_LOG_EQ_SIZE << MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT) | uar)(__uint32_t)(__builtin_constant_p((7 << 24) | uar) ? (__uint32_t )(((__uint32_t)((7 << 24) | uar) & 0xff) << 24 | ((__uint32_t)((7 << 24) | uar) & 0xff00) << 8 | ((__uint32_t)((7 << 24) | uar) & 0xff0000) >> 8 | ((__uint32_t)((7 << 24) | uar) & 0xff000000) >> 24) : __swap32md((7 << 24) | uar)); |
4352 | mbin->cmd_eq_ctx.eq_intr = vector; |
4353 | mbin->cmd_event_bitmask = htobe64(events)(__uint64_t)(__builtin_constant_p(events) ? (__uint64_t)((((__uint64_t )(events) & 0xff) << 56) | ((__uint64_t)(events) & 0xff00ULL) << 40 | ((__uint64_t)(events) & 0xff0000ULL ) << 24 | ((__uint64_t)(events) & 0xff000000ULL) << 8 | ((__uint64_t)(events) & 0xff00000000ULL) >> 8 | ((__uint64_t)(events) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(events) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(events) & 0xff00000000000000ULL) >> 56) : __swap64md(events)); |
4354 | |
4355 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& eq->eq_mem)->mxm_map)), (0), (((&eq->eq_mem)-> mxm_size)), (0x01)) |
4356 | 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& eq->eq_mem)->mxm_map)), (0), (((&eq->eq_mem)-> mxm_size)), (0x01)); |
4357 | |
4358 | /* physical addresses follow the mailbox in data */ |
4359 | mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &eq->eq_mem); |
4360 | mcx_cmdq_mboxes_sign(&mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE)(((insize) + ((512) - 1)) / (512))); |
4361 | mcx_cmdq_post(sc, cqe, 0); |
4362 | |
4363 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4364 | if (error != 0) { |
4365 | printf(", create eq timeout\n"); |
4366 | goto free_mxm; |
4367 | } |
4368 | if (mcx_cmdq_verify(cqe) != 0) { |
4369 | printf(", create eq command corrupt\n"); |
4370 | goto free_mxm; |
4371 | } |
4372 | |
4373 | out = mcx_cmdq_out(cqe); |
4374 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4375 | printf(", create eq failed (%x, %x)\n", out->cmd_status, |
4376 | betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
4377 | goto free_mxm; |
4378 | } |
4379 | |
4380 | eq->eq_n = mcx_get_id(out->cmd_eqn); |
4381 | |
4382 | mcx_dmamem_free(sc, &mxm); |
4383 | |
4384 | mcx_arm_eq(sc, eq, uar); |
4385 | |
4386 | return (0); |
4387 | |
4388 | free_mxm: |
4389 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& eq->eq_mem)->mxm_map)), (0), (((&eq->eq_mem)-> mxm_size)), (0x02)) |
4390 | 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& eq->eq_mem)->mxm_map)), (0), (((&eq->eq_mem)-> mxm_size)), (0x02)); |
4391 | mcx_dmamem_free(sc, &mxm); |
4392 | free_eq: |
4393 | mcx_dmamem_free(sc, &eq->eq_mem); |
4394 | return (-1); |
4395 | } |
4396 | |
4397 | static int |
4398 | mcx_alloc_pd(struct mcx_softc *sc) |
4399 | { |
4400 | struct mcx_cmdq_entry *cqe; |
4401 | struct mcx_cmd_alloc_pd_in *in; |
4402 | struct mcx_cmd_alloc_pd_out *out; |
4403 | int error; |
4404 | |
4405 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4406 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
4407 | |
4408 | in = mcx_cmdq_in(cqe); |
4409 | in->cmd_opcode = htobe16(MCX_CMD_ALLOC_PD)(__uint16_t)(__builtin_constant_p(0x800) ? (__uint16_t)(((__uint16_t )(0x800) & 0xffU) << 8 | ((__uint16_t)(0x800) & 0xff00U) >> 8) : __swap16md(0x800)); |
4410 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4411 | |
4412 | mcx_cmdq_post(sc, cqe, 0); |
4413 | |
4414 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4415 | if (error != 0) { |
4416 | printf(", alloc pd timeout\n"); |
4417 | return (-1); |
4418 | } |
4419 | if (mcx_cmdq_verify(cqe) != 0) { |
4420 | printf(", alloc pd command corrupt\n"); |
4421 | return (-1); |
4422 | } |
4423 | |
4424 | out = mcx_cmdq_out(cqe); |
4425 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4426 | printf(", alloc pd failed (%x)\n", out->cmd_status); |
4427 | return (-1); |
4428 | } |
4429 | |
4430 | sc->sc_pd = mcx_get_id(out->cmd_pd); |
4431 | return (0); |
4432 | } |
4433 | |
4434 | static int |
4435 | mcx_alloc_tdomain(struct mcx_softc *sc) |
4436 | { |
4437 | struct mcx_cmdq_entry *cqe; |
4438 | struct mcx_cmd_alloc_td_in *in; |
4439 | struct mcx_cmd_alloc_td_out *out; |
4440 | int error; |
4441 | |
4442 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4443 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
4444 | |
4445 | in = mcx_cmdq_in(cqe); |
4446 | in->cmd_opcode = htobe16(MCX_CMD_ALLOC_TRANSPORT_DOMAIN)(__uint16_t)(__builtin_constant_p(0x816) ? (__uint16_t)(((__uint16_t )(0x816) & 0xffU) << 8 | ((__uint16_t)(0x816) & 0xff00U) >> 8) : __swap16md(0x816)); |
4447 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4448 | |
4449 | mcx_cmdq_post(sc, cqe, 0); |
4450 | |
4451 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4452 | if (error != 0) { |
4453 | printf(", alloc transport domain timeout\n"); |
4454 | return (-1); |
4455 | } |
4456 | if (mcx_cmdq_verify(cqe) != 0) { |
4457 | printf(", alloc transport domain command corrupt\n"); |
4458 | return (-1); |
4459 | } |
4460 | |
4461 | out = mcx_cmdq_out(cqe); |
4462 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4463 | printf(", alloc transport domain failed (%x)\n", |
4464 | out->cmd_status); |
4465 | return (-1); |
4466 | } |
4467 | |
4468 | sc->sc_tdomain = mcx_get_id(out->cmd_tdomain); |
4469 | return (0); |
4470 | } |
4471 | |
4472 | static int |
4473 | mcx_query_nic_vport_context(struct mcx_softc *sc) |
4474 | { |
4475 | struct mcx_dmamem mxm; |
4476 | struct mcx_cmdq_entry *cqe; |
4477 | struct mcx_cmd_query_nic_vport_context_in *in; |
4478 | struct mcx_cmd_query_nic_vport_context_out *out; |
4479 | struct mcx_nic_vport_ctx *ctx; |
4480 | uint8_t *addr; |
4481 | int error, token, i; |
4482 | |
4483 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4484 | token = mcx_cmdq_token(sc); |
4485 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx), token); |
4486 | |
4487 | in = mcx_cmdq_in(cqe); |
4488 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_NIC_VPORT_CONTEXT)(__uint16_t)(__builtin_constant_p(0x754) ? (__uint16_t)(((__uint16_t )(0x754) & 0xffU) << 8 | ((__uint16_t)(0x754) & 0xff00U) >> 8) : __swap16md(0x754)); |
4489 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4490 | in->cmd_allowed_list_type = 0; |
4491 | |
4492 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
4493 | &cqe->cq_output_ptr, token) != 0) { |
4494 | printf(", unable to allocate " |
4495 | "query nic vport context mailboxen\n"); |
4496 | return (-1); |
4497 | } |
4498 | mcx_cmdq_mboxes_sign(&mxm, 1); |
4499 | mcx_cmdq_post(sc, cqe, 0); |
4500 | |
4501 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4502 | if (error != 0) { |
4503 | printf(", query nic vport context timeout\n"); |
4504 | goto free; |
4505 | } |
4506 | if (mcx_cmdq_verify(cqe) != 0) { |
4507 | printf(", query nic vport context command corrupt\n"); |
4508 | goto free; |
4509 | } |
4510 | |
4511 | out = mcx_cmdq_out(cqe); |
4512 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4513 | printf(", query nic vport context failed (%x, %x)\n", |
4514 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
4515 | error = -1; |
4516 | goto free; |
4517 | } |
4518 | |
4519 | ctx = (struct mcx_nic_vport_ctx *) |
4520 | mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
4521 | addr = (uint8_t *)&ctx->vp_perm_addr; |
4522 | for (i = 0; i < ETHER_ADDR_LEN6; i++) { |
4523 | sc->sc_ac.ac_enaddr[i] = addr[i + 2]; |
4524 | } |
4525 | free: |
4526 | mcx_dmamem_free(sc, &mxm); |
4527 | |
4528 | return (error); |
4529 | } |
4530 | |
4531 | static int |
4532 | mcx_query_special_contexts(struct mcx_softc *sc) |
4533 | { |
4534 | struct mcx_cmdq_entry *cqe; |
4535 | struct mcx_cmd_query_special_ctx_in *in; |
4536 | struct mcx_cmd_query_special_ctx_out *out; |
4537 | int error; |
4538 | |
4539 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4540 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
4541 | |
4542 | in = mcx_cmdq_in(cqe); |
4543 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_SPECIAL_CONTEXTS)(__uint16_t)(__builtin_constant_p(0x203) ? (__uint16_t)(((__uint16_t )(0x203) & 0xffU) << 8 | ((__uint16_t)(0x203) & 0xff00U) >> 8) : __swap16md(0x203)); |
4544 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4545 | |
4546 | mcx_cmdq_post(sc, cqe, 0); |
4547 | |
4548 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4549 | if (error != 0) { |
4550 | printf(", query special contexts timeout\n"); |
4551 | return (-1); |
4552 | } |
4553 | if (mcx_cmdq_verify(cqe) != 0) { |
4554 | printf(", query special contexts command corrupt\n"); |
4555 | return (-1); |
4556 | } |
4557 | |
4558 | out = mcx_cmdq_out(cqe); |
4559 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4560 | printf(", query special contexts failed (%x)\n", |
4561 | out->cmd_status); |
4562 | return (-1); |
4563 | } |
4564 | |
4565 | sc->sc_lkey = betoh32(out->cmd_resd_lkey)(__uint32_t)(__builtin_constant_p(out->cmd_resd_lkey) ? (__uint32_t )(((__uint32_t)(out->cmd_resd_lkey) & 0xff) << 24 | ((__uint32_t)(out->cmd_resd_lkey) & 0xff00) << 8 | ((__uint32_t)(out->cmd_resd_lkey) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_resd_lkey) & 0xff000000) >> 24) : __swap32md(out->cmd_resd_lkey)); |
4566 | return (0); |
4567 | } |
4568 | |
4569 | static int |
4570 | mcx_set_port_mtu(struct mcx_softc *sc, int mtu) |
4571 | { |
4572 | struct mcx_reg_pmtu pmtu; |
4573 | int error; |
4574 | |
4575 | /* read max mtu */ |
4576 | memset(&pmtu, 0, sizeof(pmtu))__builtin_memset((&pmtu), (0), (sizeof(pmtu))); |
4577 | pmtu.rp_local_port = 1; |
4578 | error = mcx_access_hca_reg(sc, MCX_REG_PMTU0x5003, MCX_REG_OP_READ1, &pmtu, |
4579 | sizeof(pmtu), MCX_CMDQ_SLOT_POLL); |
4580 | if (error != 0) { |
4581 | printf(", unable to get port MTU\n"); |
4582 | return error; |
4583 | } |
4584 | |
4585 | mtu = min(mtu, betoh16(pmtu.rp_max_mtu)(__uint16_t)(__builtin_constant_p(pmtu.rp_max_mtu) ? (__uint16_t )(((__uint16_t)(pmtu.rp_max_mtu) & 0xffU) << 8 | (( __uint16_t)(pmtu.rp_max_mtu) & 0xff00U) >> 8) : __swap16md (pmtu.rp_max_mtu))); |
4586 | pmtu.rp_admin_mtu = htobe16(mtu)(__uint16_t)(__builtin_constant_p(mtu) ? (__uint16_t)(((__uint16_t )(mtu) & 0xffU) << 8 | ((__uint16_t)(mtu) & 0xff00U ) >> 8) : __swap16md(mtu)); |
4587 | error = mcx_access_hca_reg(sc, MCX_REG_PMTU0x5003, MCX_REG_OP_WRITE0, &pmtu, |
4588 | sizeof(pmtu), MCX_CMDQ_SLOT_POLL); |
4589 | if (error != 0) { |
4590 | printf(", unable to set port MTU\n"); |
4591 | return error; |
4592 | } |
4593 | |
4594 | sc->sc_hardmtu = mtu; |
4595 | sc->sc_rxbufsz = roundup(mtu + ETHER_ALIGN, sizeof(long))((((mtu + 2)+((sizeof(long))-1))/(sizeof(long)))*(sizeof(long ))); |
4596 | return 0; |
4597 | } |
4598 | |
4599 | static int |
4600 | mcx_create_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar, int db, int eqn) |
4601 | { |
4602 | struct mcx_cmdq_entry *cmde; |
4603 | struct mcx_cq_entry *cqe; |
4604 | struct mcx_dmamem mxm; |
4605 | struct mcx_cmd_create_cq_in *in; |
4606 | struct mcx_cmd_create_cq_mb_in *mbin; |
4607 | struct mcx_cmd_create_cq_out *out; |
4608 | int error; |
4609 | uint64_t *pas; |
4610 | int insize, npages, paslen, i, token; |
4611 | |
4612 | cq->cq_doorbell = MCX_CQ_DOORBELL_BASE0 + (MCX_CQ_DOORBELL_STRIDE64 * db); |
4613 | |
4614 | npages = howmany((1 << MCX_LOG_CQ_SIZE) * sizeof(struct mcx_cq_entry),((((1 << 12) * sizeof(struct mcx_cq_entry)) + (((1 << 12)) - 1)) / ((1 << 12))) |
4615 | MCX_PAGE_SIZE)((((1 << 12) * sizeof(struct mcx_cq_entry)) + (((1 << 12)) - 1)) / ((1 << 12))); |
4616 | paslen = npages * sizeof(*pas); |
4617 | insize = sizeof(struct mcx_cmd_create_cq_mb_in) + paslen; |
4618 | |
4619 | if (mcx_dmamem_alloc(sc, &cq->cq_mem, npages * MCX_PAGE_SIZE(1 << 12), |
4620 | MCX_PAGE_SIZE(1 << 12)) != 0) { |
4621 | printf("%s: unable to allocate completion queue memory\n", |
4622 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4623 | return (-1); |
4624 | } |
4625 | cqe = MCX_DMA_KVA(&cq->cq_mem)((void *)(&cq->cq_mem)->mxm_kva); |
4626 | for (i = 0; i < (1 << MCX_LOG_CQ_SIZE12); i++) { |
4627 | cqe[i].cq_opcode_owner = MCX_CQ_ENTRY_FLAG_OWNER(1 << 0); |
4628 | } |
4629 | |
4630 | cmde = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4631 | token = mcx_cmdq_token(sc); |
4632 | mcx_cmdq_init(sc, cmde, sizeof(*in) + insize, sizeof(*out), token); |
4633 | |
4634 | in = mcx_cmdq_in(cmde); |
4635 | in->cmd_opcode = htobe16(MCX_CMD_CREATE_CQ)(__uint16_t)(__builtin_constant_p(0x400) ? (__uint16_t)(((__uint16_t )(0x400) & 0xffU) << 8 | ((__uint16_t)(0x400) & 0xff00U) >> 8) : __swap16md(0x400)); |
4636 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4637 | |
4638 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, |
4639 | howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE)(((insize) + ((512) - 1)) / (512)), |
4640 | &cmde->cq_input_ptr, token) != 0) { |
4641 | printf("%s: unable to allocate create cq mailboxen\n", |
4642 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4643 | goto free_cq; |
4644 | } |
4645 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
4646 | mbin->cmd_cq_ctx.cq_uar_size = htobe32((__uint32_t)(__builtin_constant_p((12 << 24) | uar) ? ( __uint32_t)(((__uint32_t)((12 << 24) | uar) & 0xff) << 24 | ((__uint32_t)((12 << 24) | uar) & 0xff00 ) << 8 | ((__uint32_t)((12 << 24) | uar) & 0xff0000 ) >> 8 | ((__uint32_t)((12 << 24) | uar) & 0xff000000 ) >> 24) : __swap32md((12 << 24) | uar)) |
4647 | (MCX_LOG_CQ_SIZE << MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT) | uar)(__uint32_t)(__builtin_constant_p((12 << 24) | uar) ? ( __uint32_t)(((__uint32_t)((12 << 24) | uar) & 0xff) << 24 | ((__uint32_t)((12 << 24) | uar) & 0xff00 ) << 8 | ((__uint32_t)((12 << 24) | uar) & 0xff0000 ) >> 8 | ((__uint32_t)((12 << 24) | uar) & 0xff000000 ) >> 24) : __swap32md((12 << 24) | uar)); |
4648 | mbin->cmd_cq_ctx.cq_eqn = htobe32(eqn)(__uint32_t)(__builtin_constant_p(eqn) ? (__uint32_t)(((__uint32_t )(eqn) & 0xff) << 24 | ((__uint32_t)(eqn) & 0xff00 ) << 8 | ((__uint32_t)(eqn) & 0xff0000) >> 8 | ((__uint32_t)(eqn) & 0xff000000) >> 24) : __swap32md (eqn)); |
4649 | mbin->cmd_cq_ctx.cq_period_max_count = htobe32((__uint32_t)(__builtin_constant_p((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) ? (__uint32_t)(((__uint32_t)((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) & 0xff) << 24 | ((__uint32_t)((50 << 16) | (((1 << (12 - 1) ) * 9) / 10)) & 0xff00) << 8 | ((__uint32_t)((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) & 0xff0000) >> 8 | ((__uint32_t)((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) & 0xff000000) >> 24) : __swap32md((50 << 16) | (((1 << (12 - 1)) * 9) / 10))) |
4650 | (MCX_CQ_MOD_PERIOD << MCX_CQ_CTX_PERIOD_SHIFT) |(__uint32_t)(__builtin_constant_p((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) ? (__uint32_t)(((__uint32_t)((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) & 0xff) << 24 | ((__uint32_t)((50 << 16) | (((1 << (12 - 1) ) * 9) / 10)) & 0xff00) << 8 | ((__uint32_t)((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) & 0xff0000) >> 8 | ((__uint32_t)((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) & 0xff000000) >> 24) : __swap32md((50 << 16) | (((1 << (12 - 1)) * 9) / 10))) |
4651 | MCX_CQ_MOD_COUNTER)(__uint32_t)(__builtin_constant_p((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) ? (__uint32_t)(((__uint32_t)((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) & 0xff) << 24 | ((__uint32_t)((50 << 16) | (((1 << (12 - 1) ) * 9) / 10)) & 0xff00) << 8 | ((__uint32_t)((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) & 0xff0000) >> 8 | ((__uint32_t)((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) & 0xff000000) >> 24) : __swap32md((50 << 16) | (((1 << (12 - 1)) * 9) / 10))); |
4652 | mbin->cmd_cq_ctx.cq_doorbell = htobe64((__uint64_t)(__builtin_constant_p(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) ? (__uint64_t)((((__uint64_t)(((&sc->sc_doorbell_mem)-> mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff ) << 56) | ((__uint64_t)(((&sc->sc_doorbell_mem) ->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff00ULL) << 40 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff0000ULL) << 24 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff000000ULL) << 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff00000000ULL) >> 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(((&sc-> sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr) + cq-> cq_doorbell) & 0xff000000000000ULL) >> 40 | ((__uint64_t )(((&sc->sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr ) + cq->cq_doorbell) & 0xff00000000000000ULL) >> 56) : __swap64md(((&sc->sc_doorbell_mem)->mxm_map-> dm_segs[0].ds_addr) + cq->cq_doorbell)) |
4653 | MCX_DMA_DVA(&sc->sc_doorbell_mem) + cq->cq_doorbell)(__uint64_t)(__builtin_constant_p(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) ? (__uint64_t)((((__uint64_t)(((&sc->sc_doorbell_mem)-> mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff ) << 56) | ((__uint64_t)(((&sc->sc_doorbell_mem) ->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff00ULL) << 40 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff0000ULL) << 24 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff000000ULL) << 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff00000000ULL) >> 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(((&sc-> sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr) + cq-> cq_doorbell) & 0xff000000000000ULL) >> 40 | ((__uint64_t )(((&sc->sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr ) + cq->cq_doorbell) & 0xff00000000000000ULL) >> 56) : __swap64md(((&sc->sc_doorbell_mem)->mxm_map-> dm_segs[0].ds_addr) + cq->cq_doorbell)); |
4654 | |
4655 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& cq->cq_mem)->mxm_map)), (0), (((&cq->cq_mem)-> mxm_size)), (0x01)) |
4656 | 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& cq->cq_mem)->mxm_map)), (0), (((&cq->cq_mem)-> mxm_size)), (0x01)); |
4657 | |
4658 | /* physical addresses follow the mailbox in data */ |
4659 | mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &cq->cq_mem); |
4660 | mcx_cmdq_post(sc, cmde, 0); |
4661 | |
4662 | error = mcx_cmdq_poll(sc, cmde, 1000); |
4663 | if (error != 0) { |
4664 | printf("%s: create cq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4665 | goto free_mxm; |
4666 | } |
4667 | if (mcx_cmdq_verify(cmde) != 0) { |
4668 | printf("%s: create cq command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4669 | goto free_mxm; |
4670 | } |
4671 | |
4672 | out = mcx_cmdq_out(cmde); |
4673 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4674 | printf("%s: create cq failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
4675 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
4676 | goto free_mxm; |
4677 | } |
4678 | |
4679 | cq->cq_n = mcx_get_id(out->cmd_cqn); |
4680 | cq->cq_cons = 0; |
4681 | cq->cq_count = 0; |
4682 | |
4683 | mcx_dmamem_free(sc, &mxm); |
4684 | |
4685 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (cq->cq_doorbell), ( sizeof(struct mcx_cq_doorbell)), (0x04)) |
4686 | cq->cq_doorbell, sizeof(struct mcx_cq_doorbell),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (cq->cq_doorbell), ( sizeof(struct mcx_cq_doorbell)), (0x04)) |
4687 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (cq->cq_doorbell), ( sizeof(struct mcx_cq_doorbell)), (0x04)); |
4688 | |
4689 | mcx_arm_cq(sc, cq, uar); |
4690 | |
4691 | return (0); |
4692 | |
4693 | free_mxm: |
4694 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& cq->cq_mem)->mxm_map)), (0), (((&cq->cq_mem)-> mxm_size)), (0x02)) |
4695 | 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& cq->cq_mem)->mxm_map)), (0), (((&cq->cq_mem)-> mxm_size)), (0x02)); |
4696 | mcx_dmamem_free(sc, &mxm); |
4697 | free_cq: |
4698 | mcx_dmamem_free(sc, &cq->cq_mem); |
4699 | return (-1); |
4700 | } |
4701 | |
4702 | static int |
4703 | mcx_destroy_cq(struct mcx_softc *sc, struct mcx_cq *cq) |
4704 | { |
4705 | struct mcx_cmdq_entry *cqe; |
4706 | struct mcx_cmd_destroy_cq_in *in; |
4707 | struct mcx_cmd_destroy_cq_out *out; |
4708 | int error; |
4709 | int token; |
4710 | |
4711 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4712 | token = mcx_cmdq_token(sc); |
4713 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token); |
4714 | |
4715 | in = mcx_cmdq_in(cqe); |
4716 | in->cmd_opcode = htobe16(MCX_CMD_DESTROY_CQ)(__uint16_t)(__builtin_constant_p(0x401) ? (__uint16_t)(((__uint16_t )(0x401) & 0xffU) << 8 | ((__uint16_t)(0x401) & 0xff00U) >> 8) : __swap16md(0x401)); |
4717 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4718 | in->cmd_cqn = htobe32(cq->cq_n)(__uint32_t)(__builtin_constant_p(cq->cq_n) ? (__uint32_t) (((__uint32_t)(cq->cq_n) & 0xff) << 24 | ((__uint32_t )(cq->cq_n) & 0xff00) << 8 | ((__uint32_t)(cq-> cq_n) & 0xff0000) >> 8 | ((__uint32_t)(cq->cq_n) & 0xff000000) >> 24) : __swap32md(cq->cq_n)); |
4719 | |
4720 | mcx_cmdq_post(sc, cqe, 0); |
4721 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4722 | if (error != 0) { |
4723 | printf("%s: destroy cq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4724 | return error; |
4725 | } |
4726 | if (mcx_cmdq_verify(cqe) != 0) { |
4727 | printf("%s: destroy cq command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4728 | return error; |
4729 | } |
4730 | |
4731 | out = mcx_cmdq_out(cqe); |
4732 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4733 | printf("%s: destroy cq failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
4734 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
4735 | return -1; |
4736 | } |
4737 | |
4738 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (cq->cq_doorbell), ( sizeof(struct mcx_cq_doorbell)), (0x08)) |
4739 | cq->cq_doorbell, sizeof(struct mcx_cq_doorbell),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (cq->cq_doorbell), ( sizeof(struct mcx_cq_doorbell)), (0x08)) |
4740 | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (cq->cq_doorbell), ( sizeof(struct mcx_cq_doorbell)), (0x08)); |
4741 | |
4742 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& cq->cq_mem)->mxm_map)), (0), (((&cq->cq_mem)-> mxm_size)), (0x02)) |
4743 | 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& cq->cq_mem)->mxm_map)), (0), (((&cq->cq_mem)-> mxm_size)), (0x02)); |
4744 | mcx_dmamem_free(sc, &cq->cq_mem); |
4745 | |
4746 | cq->cq_n = 0; |
4747 | cq->cq_cons = 0; |
4748 | cq->cq_count = 0; |
4749 | return 0; |
4750 | } |
4751 | |
4752 | static int |
4753 | mcx_create_rq(struct mcx_softc *sc, struct mcx_rx *rx, int db, int cqn) |
4754 | { |
4755 | struct mcx_cmdq_entry *cqe; |
4756 | struct mcx_dmamem mxm; |
4757 | struct mcx_cmd_create_rq_in *in; |
4758 | struct mcx_cmd_create_rq_out *out; |
4759 | struct mcx_rq_ctx *mbin; |
4760 | int error; |
4761 | uint64_t *pas; |
4762 | uint32_t rq_flags; |
4763 | int insize, npages, paslen, token; |
4764 | |
4765 | rx->rx_doorbell = MCX_WQ_DOORBELL_BASE(1 << 12)/2 + |
4766 | (db * MCX_WQ_DOORBELL_STRIDE64); |
4767 | |
4768 | npages = howmany((1 << MCX_LOG_RQ_SIZE) * sizeof(struct mcx_rq_entry),((((1 << 10) * sizeof(struct mcx_rq_entry)) + (((1 << 12)) - 1)) / ((1 << 12))) |
4769 | MCX_PAGE_SIZE)((((1 << 10) * sizeof(struct mcx_rq_entry)) + (((1 << 12)) - 1)) / ((1 << 12))); |
4770 | paslen = npages * sizeof(*pas); |
4771 | insize = 0x10 + sizeof(struct mcx_rq_ctx) + paslen; |
4772 | |
4773 | if (mcx_dmamem_alloc(sc, &rx->rx_rq_mem, npages * MCX_PAGE_SIZE(1 << 12), |
4774 | MCX_PAGE_SIZE(1 << 12)) != 0) { |
4775 | printf("%s: unable to allocate receive queue memory\n", |
4776 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4777 | return (-1); |
4778 | } |
4779 | |
4780 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4781 | token = mcx_cmdq_token(sc); |
4782 | mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token); |
4783 | |
4784 | in = mcx_cmdq_in(cqe); |
4785 | in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQ)(__uint16_t)(__builtin_constant_p(0x908) ? (__uint16_t)(((__uint16_t )(0x908) & 0xffU) << 8 | ((__uint16_t)(0x908) & 0xff00U) >> 8) : __swap16md(0x908)); |
4786 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4787 | |
4788 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, |
4789 | howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE)(((insize) + ((512) - 1)) / (512)), |
4790 | &cqe->cq_input_ptr, token) != 0) { |
4791 | printf("%s: unable to allocate create rq mailboxen\n", |
4792 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4793 | goto free_rq; |
4794 | } |
4795 | mbin = (struct mcx_rq_ctx *) |
4796 | (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10); |
4797 | rq_flags = MCX_RQ_CTX_RLKEY(1U << 31); |
4798 | #if NVLAN1 == 0 |
4799 | rq_flags |= MCX_RQ_CTX_VLAN_STRIP_DIS(1 << 28); |
4800 | #endif |
4801 | mbin->rq_flags = htobe32(rq_flags)(__uint32_t)(__builtin_constant_p(rq_flags) ? (__uint32_t)((( __uint32_t)(rq_flags) & 0xff) << 24 | ((__uint32_t) (rq_flags) & 0xff00) << 8 | ((__uint32_t)(rq_flags) & 0xff0000) >> 8 | ((__uint32_t)(rq_flags) & 0xff000000 ) >> 24) : __swap32md(rq_flags)); |
4802 | mbin->rq_cqn = htobe32(cqn)(__uint32_t)(__builtin_constant_p(cqn) ? (__uint32_t)(((__uint32_t )(cqn) & 0xff) << 24 | ((__uint32_t)(cqn) & 0xff00 ) << 8 | ((__uint32_t)(cqn) & 0xff0000) >> 8 | ((__uint32_t)(cqn) & 0xff000000) >> 24) : __swap32md (cqn)); |
4803 | mbin->rq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC(1 << 4); |
4804 | mbin->rq_wq.wq_pd = htobe32(sc->sc_pd)(__uint32_t)(__builtin_constant_p(sc->sc_pd) ? (__uint32_t )(((__uint32_t)(sc->sc_pd) & 0xff) << 24 | ((__uint32_t )(sc->sc_pd) & 0xff00) << 8 | ((__uint32_t)(sc-> sc_pd) & 0xff0000) >> 8 | ((__uint32_t)(sc->sc_pd ) & 0xff000000) >> 24) : __swap32md(sc->sc_pd)); |
4805 | mbin->rq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +(__uint64_t)(__builtin_constant_p(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) ? (__uint64_t)((((__uint64_t)(((&sc->sc_doorbell_mem)-> mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff ) << 56) | ((__uint64_t)(((&sc->sc_doorbell_mem) ->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff00ULL) << 40 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff0000ULL) << 24 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff000000ULL) << 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff00000000ULL) >> 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(((&sc-> sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr) + rx-> rx_doorbell) & 0xff000000000000ULL) >> 40 | ((__uint64_t )(((&sc->sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr ) + rx->rx_doorbell) & 0xff00000000000000ULL) >> 56) : __swap64md(((&sc->sc_doorbell_mem)->mxm_map-> dm_segs[0].ds_addr) + rx->rx_doorbell)) |
4806 | rx->rx_doorbell)(__uint64_t)(__builtin_constant_p(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) ? (__uint64_t)((((__uint64_t)(((&sc->sc_doorbell_mem)-> mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff ) << 56) | ((__uint64_t)(((&sc->sc_doorbell_mem) ->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff00ULL) << 40 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff0000ULL) << 24 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff000000ULL) << 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff00000000ULL) >> 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(((&sc-> sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr) + rx-> rx_doorbell) & 0xff000000000000ULL) >> 40 | ((__uint64_t )(((&sc->sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr ) + rx->rx_doorbell) & 0xff00000000000000ULL) >> 56) : __swap64md(((&sc->sc_doorbell_mem)->mxm_map-> dm_segs[0].ds_addr) + rx->rx_doorbell)); |
4807 | mbin->rq_wq.wq_log_stride = htobe16(4)(__uint16_t)(__builtin_constant_p(4) ? (__uint16_t)(((__uint16_t )(4) & 0xffU) << 8 | ((__uint16_t)(4) & 0xff00U ) >> 8) : __swap16md(4)); |
4808 | mbin->rq_wq.wq_log_size = MCX_LOG_RQ_SIZE10; |
4809 | |
4810 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rx->rx_rq_mem)->mxm_map)), (0), (((&rx->rx_rq_mem )->mxm_size)), (0x04)) |
4811 | 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rx->rx_rq_mem)->mxm_map)), (0), (((&rx->rx_rq_mem )->mxm_size)), (0x04)); |
4812 | |
4813 | /* physical addresses follow the mailbox in data */ |
4814 | mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10, npages, &rx->rx_rq_mem); |
4815 | mcx_cmdq_post(sc, cqe, 0); |
4816 | |
4817 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4818 | if (error != 0) { |
4819 | printf("%s: create rq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4820 | goto free_mxm; |
4821 | } |
4822 | if (mcx_cmdq_verify(cqe) != 0) { |
4823 | printf("%s: create rq command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4824 | goto free_mxm; |
4825 | } |
4826 | |
4827 | out = mcx_cmdq_out(cqe); |
4828 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4829 | printf("%s: create rq failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
4830 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
4831 | goto free_mxm; |
4832 | } |
4833 | |
4834 | rx->rx_rqn = mcx_get_id(out->cmd_rqn); |
4835 | |
4836 | mcx_dmamem_free(sc, &mxm); |
4837 | |
4838 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (rx->rx_doorbell), ( sizeof(uint32_t)), (0x04)) |
4839 | rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (rx->rx_doorbell), ( sizeof(uint32_t)), (0x04)); |
4840 | |
4841 | return (0); |
4842 | |
4843 | free_mxm: |
4844 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rx->rx_rq_mem)->mxm_map)), (0), (((&rx->rx_rq_mem )->mxm_size)), (0x08)) |
4845 | 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rx->rx_rq_mem)->mxm_map)), (0), (((&rx->rx_rq_mem )->mxm_size)), (0x08)); |
4846 | mcx_dmamem_free(sc, &mxm); |
4847 | free_rq: |
4848 | mcx_dmamem_free(sc, &rx->rx_rq_mem); |
4849 | return (-1); |
4850 | } |
4851 | |
4852 | static int |
4853 | mcx_ready_rq(struct mcx_softc *sc, struct mcx_rx *rx) |
4854 | { |
4855 | struct mcx_cmdq_entry *cqe; |
4856 | struct mcx_dmamem mxm; |
4857 | struct mcx_cmd_modify_rq_in *in; |
4858 | struct mcx_cmd_modify_rq_mb_in *mbin; |
4859 | struct mcx_cmd_modify_rq_out *out; |
4860 | int error; |
4861 | int token; |
4862 | |
4863 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4864 | token = mcx_cmdq_token(sc); |
4865 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), |
4866 | sizeof(*out), token); |
4867 | |
4868 | in = mcx_cmdq_in(cqe); |
4869 | in->cmd_opcode = htobe16(MCX_CMD_MODIFY_RQ)(__uint16_t)(__builtin_constant_p(0x909) ? (__uint16_t)(((__uint16_t )(0x909) & 0xffU) << 8 | ((__uint16_t)(0x909) & 0xff00U) >> 8) : __swap16md(0x909)); |
4870 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4871 | in->cmd_rq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | rx->rx_rqn)(__uint32_t)(__builtin_constant_p((0 << 28) | rx->rx_rqn ) ? (__uint32_t)(((__uint32_t)((0 << 28) | rx->rx_rqn ) & 0xff) << 24 | ((__uint32_t)((0 << 28) | rx ->rx_rqn) & 0xff00) << 8 | ((__uint32_t)((0 << 28) | rx->rx_rqn) & 0xff0000) >> 8 | ((__uint32_t )((0 << 28) | rx->rx_rqn) & 0xff000000) >> 24) : __swap32md((0 << 28) | rx->rx_rqn)); |
4872 | |
4873 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
4874 | &cqe->cq_input_ptr, token) != 0) { |
4875 | printf("%s: unable to allocate modify rq mailbox\n", |
4876 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4877 | return (-1); |
4878 | } |
4879 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
4880 | mbin->cmd_rq_ctx.rq_flags = htobe32((__uint32_t)(__builtin_constant_p(1 << 20) ? (__uint32_t )(((__uint32_t)(1 << 20) & 0xff) << 24 | ((__uint32_t )(1 << 20) & 0xff00) << 8 | ((__uint32_t)(1 << 20) & 0xff0000) >> 8 | ((__uint32_t)(1 << 20 ) & 0xff000000) >> 24) : __swap32md(1 << 20)) |
4881 | MCX_QUEUE_STATE_RDY << MCX_RQ_CTX_STATE_SHIFT)(__uint32_t)(__builtin_constant_p(1 << 20) ? (__uint32_t )(((__uint32_t)(1 << 20) & 0xff) << 24 | ((__uint32_t )(1 << 20) & 0xff00) << 8 | ((__uint32_t)(1 << 20) & 0xff0000) >> 8 | ((__uint32_t)(1 << 20 ) & 0xff000000) >> 24) : __swap32md(1 << 20)); |
4882 | |
4883 | mcx_cmdq_mboxes_sign(&mxm, 1); |
4884 | mcx_cmdq_post(sc, cqe, 0); |
4885 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4886 | if (error != 0) { |
4887 | printf("%s: modify rq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4888 | goto free; |
4889 | } |
4890 | if (mcx_cmdq_verify(cqe) != 0) { |
4891 | printf("%s: modify rq command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4892 | goto free; |
4893 | } |
4894 | |
4895 | out = mcx_cmdq_out(cqe); |
4896 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4897 | printf("%s: modify rq failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
4898 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
4899 | error = -1; |
4900 | goto free; |
4901 | } |
4902 | |
4903 | free: |
4904 | mcx_dmamem_free(sc, &mxm); |
4905 | return (error); |
4906 | } |
4907 | |
4908 | static int |
4909 | mcx_destroy_rq(struct mcx_softc *sc, struct mcx_rx *rx) |
4910 | { |
4911 | struct mcx_cmdq_entry *cqe; |
4912 | struct mcx_cmd_destroy_rq_in *in; |
4913 | struct mcx_cmd_destroy_rq_out *out; |
4914 | int error; |
4915 | int token; |
4916 | |
4917 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4918 | token = mcx_cmdq_token(sc); |
4919 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token); |
4920 | |
4921 | in = mcx_cmdq_in(cqe); |
4922 | in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQ)(__uint16_t)(__builtin_constant_p(0x90a) ? (__uint16_t)(((__uint16_t )(0x90a) & 0xffU) << 8 | ((__uint16_t)(0x90a) & 0xff00U) >> 8) : __swap16md(0x90a)); |
4923 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4924 | in->cmd_rqn = htobe32(rx->rx_rqn)(__uint32_t)(__builtin_constant_p(rx->rx_rqn) ? (__uint32_t )(((__uint32_t)(rx->rx_rqn) & 0xff) << 24 | ((__uint32_t )(rx->rx_rqn) & 0xff00) << 8 | ((__uint32_t)(rx-> rx_rqn) & 0xff0000) >> 8 | ((__uint32_t)(rx->rx_rqn ) & 0xff000000) >> 24) : __swap32md(rx->rx_rqn)); |
4925 | |
4926 | mcx_cmdq_post(sc, cqe, 0); |
4927 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4928 | if (error != 0) { |
4929 | printf("%s: destroy rq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4930 | return error; |
4931 | } |
4932 | if (mcx_cmdq_verify(cqe) != 0) { |
4933 | printf("%s: destroy rq command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4934 | return error; |
4935 | } |
4936 | |
4937 | out = mcx_cmdq_out(cqe); |
4938 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4939 | printf("%s: destroy rq failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
4940 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
4941 | return -1; |
4942 | } |
4943 | |
4944 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (rx->rx_doorbell), ( sizeof(uint32_t)), (0x08)) |
4945 | rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (rx->rx_doorbell), ( sizeof(uint32_t)), (0x08)); |
4946 | |
4947 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rx->rx_rq_mem)->mxm_map)), (0), (((&rx->rx_rq_mem )->mxm_size)), (0x08)) |
4948 | 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rx->rx_rq_mem)->mxm_map)), (0), (((&rx->rx_rq_mem )->mxm_size)), (0x08)); |
4949 | mcx_dmamem_free(sc, &rx->rx_rq_mem); |
4950 | |
4951 | rx->rx_rqn = 0; |
4952 | return 0; |
4953 | } |
4954 | |
4955 | static int |
4956 | mcx_create_tir_direct(struct mcx_softc *sc, struct mcx_rx *rx, int *tirn) |
4957 | { |
4958 | struct mcx_cmdq_entry *cqe; |
4959 | struct mcx_dmamem mxm; |
4960 | struct mcx_cmd_create_tir_in *in; |
4961 | struct mcx_cmd_create_tir_mb_in *mbin; |
4962 | struct mcx_cmd_create_tir_out *out; |
4963 | int error; |
4964 | int token; |
4965 | |
4966 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4967 | token = mcx_cmdq_token(sc); |
4968 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), |
4969 | sizeof(*out), token); |
4970 | |
4971 | in = mcx_cmdq_in(cqe); |
4972 | in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR)(__uint16_t)(__builtin_constant_p(0x900) ? (__uint16_t)(((__uint16_t )(0x900) & 0xffU) << 8 | ((__uint16_t)(0x900) & 0xff00U) >> 8) : __swap16md(0x900)); |
4973 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4974 | |
4975 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
4976 | &cqe->cq_input_ptr, token) != 0) { |
4977 | printf("%s: unable to allocate create tir mailbox\n", |
4978 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4979 | return (-1); |
4980 | } |
4981 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
4982 | /* leave disp_type = 0, so packets get sent to the inline rqn */ |
4983 | mbin->cmd_inline_rqn = htobe32(rx->rx_rqn)(__uint32_t)(__builtin_constant_p(rx->rx_rqn) ? (__uint32_t )(((__uint32_t)(rx->rx_rqn) & 0xff) << 24 | ((__uint32_t )(rx->rx_rqn) & 0xff00) << 8 | ((__uint32_t)(rx-> rx_rqn) & 0xff0000) >> 8 | ((__uint32_t)(rx->rx_rqn ) & 0xff000000) >> 24) : __swap32md(rx->rx_rqn)); |
4984 | mbin->cmd_tdomain = htobe32(sc->sc_tdomain)(__uint32_t)(__builtin_constant_p(sc->sc_tdomain) ? (__uint32_t )(((__uint32_t)(sc->sc_tdomain) & 0xff) << 24 | ( (__uint32_t)(sc->sc_tdomain) & 0xff00) << 8 | (( __uint32_t)(sc->sc_tdomain) & 0xff0000) >> 8 | ( (__uint32_t)(sc->sc_tdomain) & 0xff000000) >> 24 ) : __swap32md(sc->sc_tdomain)); |
4985 | |
4986 | mcx_cmdq_post(sc, cqe, 0); |
4987 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4988 | if (error != 0) { |
4989 | printf("%s: create tir timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4990 | goto free; |
4991 | } |
4992 | if (mcx_cmdq_verify(cqe) != 0) { |
4993 | printf("%s: create tir command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4994 | goto free; |
4995 | } |
4996 | |
4997 | out = mcx_cmdq_out(cqe); |
4998 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4999 | printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5000 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5001 | error = -1; |
5002 | goto free; |
5003 | } |
5004 | |
5005 | *tirn = mcx_get_id(out->cmd_tirn); |
5006 | free: |
5007 | mcx_dmamem_free(sc, &mxm); |
5008 | return (error); |
5009 | } |
5010 | |
5011 | static int |
5012 | mcx_create_tir_indirect(struct mcx_softc *sc, int rqtn, uint32_t hash_sel, |
5013 | int *tirn) |
5014 | { |
5015 | struct mcx_cmdq_entry *cqe; |
5016 | struct mcx_dmamem mxm; |
5017 | struct mcx_cmd_create_tir_in *in; |
5018 | struct mcx_cmd_create_tir_mb_in *mbin; |
5019 | struct mcx_cmd_create_tir_out *out; |
5020 | int error; |
5021 | int token; |
5022 | |
5023 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5024 | token = mcx_cmdq_token(sc); |
5025 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), |
5026 | sizeof(*out), token); |
5027 | |
5028 | in = mcx_cmdq_in(cqe); |
5029 | in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR)(__uint16_t)(__builtin_constant_p(0x900) ? (__uint16_t)(((__uint16_t )(0x900) & 0xffU) << 8 | ((__uint16_t)(0x900) & 0xff00U) >> 8) : __swap16md(0x900)); |
5030 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5031 | |
5032 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
5033 | &cqe->cq_input_ptr, token) != 0) { |
5034 | printf("%s: unable to allocate create tir mailbox\n", |
5035 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5036 | return (-1); |
5037 | } |
5038 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
5039 | mbin->cmd_disp_type = htobe32(MCX_TIR_CTX_DISP_TYPE_INDIRECT(__uint32_t)(__builtin_constant_p(1 << 28) ? (__uint32_t )(((__uint32_t)(1 << 28) & 0xff) << 24 | ((__uint32_t )(1 << 28) & 0xff00) << 8 | ((__uint32_t)(1 << 28) & 0xff0000) >> 8 | ((__uint32_t)(1 << 28 ) & 0xff000000) >> 24) : __swap32md(1 << 28)) |
5040 | << MCX_TIR_CTX_DISP_TYPE_SHIFT)(__uint32_t)(__builtin_constant_p(1 << 28) ? (__uint32_t )(((__uint32_t)(1 << 28) & 0xff) << 24 | ((__uint32_t )(1 << 28) & 0xff00) << 8 | ((__uint32_t)(1 << 28) & 0xff0000) >> 8 | ((__uint32_t)(1 << 28 ) & 0xff000000) >> 24) : __swap32md(1 << 28)); |
5041 | mbin->cmd_indir_table = htobe32(rqtn)(__uint32_t)(__builtin_constant_p(rqtn) ? (__uint32_t)(((__uint32_t )(rqtn) & 0xff) << 24 | ((__uint32_t)(rqtn) & 0xff00 ) << 8 | ((__uint32_t)(rqtn) & 0xff0000) >> 8 | ((__uint32_t)(rqtn) & 0xff000000) >> 24) : __swap32md (rqtn)); |
5042 | mbin->cmd_tdomain = htobe32(sc->sc_tdomain |(__uint32_t)(__builtin_constant_p(sc->sc_tdomain | 2 << 28) ? (__uint32_t)(((__uint32_t)(sc->sc_tdomain | 2 << 28) & 0xff) << 24 | ((__uint32_t)(sc->sc_tdomain | 2 << 28) & 0xff00) << 8 | ((__uint32_t)(sc ->sc_tdomain | 2 << 28) & 0xff0000) >> 8 | ((__uint32_t)(sc->sc_tdomain | 2 << 28) & 0xff000000 ) >> 24) : __swap32md(sc->sc_tdomain | 2 << 28 )) |
5043 | MCX_TIR_CTX_HASH_TOEPLITZ << MCX_TIR_CTX_HASH_SHIFT)(__uint32_t)(__builtin_constant_p(sc->sc_tdomain | 2 << 28) ? (__uint32_t)(((__uint32_t)(sc->sc_tdomain | 2 << 28) & 0xff) << 24 | ((__uint32_t)(sc->sc_tdomain | 2 << 28) & 0xff00) << 8 | ((__uint32_t)(sc ->sc_tdomain | 2 << 28) & 0xff0000) >> 8 | ((__uint32_t)(sc->sc_tdomain | 2 << 28) & 0xff000000 ) >> 24) : __swap32md(sc->sc_tdomain | 2 << 28 )); |
5044 | mbin->cmd_rx_hash_sel_outer = htobe32(hash_sel)(__uint32_t)(__builtin_constant_p(hash_sel) ? (__uint32_t)((( __uint32_t)(hash_sel) & 0xff) << 24 | ((__uint32_t) (hash_sel) & 0xff00) << 8 | ((__uint32_t)(hash_sel) & 0xff0000) >> 8 | ((__uint32_t)(hash_sel) & 0xff000000 ) >> 24) : __swap32md(hash_sel)); |
5045 | stoeplitz_to_key(&mbin->cmd_rx_hash_key, |
5046 | sizeof(mbin->cmd_rx_hash_key)); |
5047 | |
5048 | mcx_cmdq_post(sc, cqe, 0); |
5049 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5050 | if (error != 0) { |
5051 | printf("%s: create tir timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5052 | goto free; |
5053 | } |
5054 | if (mcx_cmdq_verify(cqe) != 0) { |
5055 | printf("%s: create tir command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5056 | goto free; |
5057 | } |
5058 | |
5059 | out = mcx_cmdq_out(cqe); |
5060 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5061 | printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5062 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5063 | error = -1; |
5064 | goto free; |
5065 | } |
5066 | |
5067 | *tirn = mcx_get_id(out->cmd_tirn); |
5068 | free: |
5069 | mcx_dmamem_free(sc, &mxm); |
5070 | return (error); |
5071 | } |
5072 | |
5073 | static int |
5074 | mcx_destroy_tir(struct mcx_softc *sc, int tirn) |
5075 | { |
5076 | struct mcx_cmdq_entry *cqe; |
5077 | struct mcx_cmd_destroy_tir_in *in; |
5078 | struct mcx_cmd_destroy_tir_out *out; |
5079 | int error; |
5080 | int token; |
5081 | |
5082 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5083 | token = mcx_cmdq_token(sc); |
5084 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token); |
5085 | |
5086 | in = mcx_cmdq_in(cqe); |
5087 | in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIR)(__uint16_t)(__builtin_constant_p(0x902) ? (__uint16_t)(((__uint16_t )(0x902) & 0xffU) << 8 | ((__uint16_t)(0x902) & 0xff00U) >> 8) : __swap16md(0x902)); |
5088 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5089 | in->cmd_tirn = htobe32(tirn)(__uint32_t)(__builtin_constant_p(tirn) ? (__uint32_t)(((__uint32_t )(tirn) & 0xff) << 24 | ((__uint32_t)(tirn) & 0xff00 ) << 8 | ((__uint32_t)(tirn) & 0xff0000) >> 8 | ((__uint32_t)(tirn) & 0xff000000) >> 24) : __swap32md (tirn)); |
5090 | |
5091 | mcx_cmdq_post(sc, cqe, 0); |
5092 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5093 | if (error != 0) { |
5094 | printf("%s: destroy tir timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5095 | return error; |
5096 | } |
5097 | if (mcx_cmdq_verify(cqe) != 0) { |
5098 | printf("%s: destroy tir command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5099 | return error; |
5100 | } |
5101 | |
5102 | out = mcx_cmdq_out(cqe); |
5103 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5104 | printf("%s: destroy tir failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5105 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5106 | return -1; |
5107 | } |
5108 | |
5109 | return (0); |
5110 | } |
5111 | |
5112 | static int |
5113 | mcx_create_sq(struct mcx_softc *sc, struct mcx_tx *tx, int uar, int db, |
5114 | int cqn) |
5115 | { |
5116 | struct mcx_cmdq_entry *cqe; |
5117 | struct mcx_dmamem mxm; |
5118 | struct mcx_cmd_create_sq_in *in; |
5119 | struct mcx_sq_ctx *mbin; |
5120 | struct mcx_cmd_create_sq_out *out; |
5121 | int error; |
5122 | uint64_t *pas; |
5123 | int insize, npages, paslen, token; |
5124 | |
5125 | tx->tx_doorbell = MCX_WQ_DOORBELL_BASE(1 << 12)/2 + |
5126 | (db * MCX_WQ_DOORBELL_STRIDE64) + 4; |
5127 | |
5128 | npages = howmany((1 << MCX_LOG_SQ_SIZE) * sizeof(struct mcx_sq_entry),((((1 << 11) * sizeof(struct mcx_sq_entry)) + (((1 << 12)) - 1)) / ((1 << 12))) |
5129 | MCX_PAGE_SIZE)((((1 << 11) * sizeof(struct mcx_sq_entry)) + (((1 << 12)) - 1)) / ((1 << 12))); |
5130 | paslen = npages * sizeof(*pas); |
5131 | insize = sizeof(struct mcx_sq_ctx) + paslen; |
5132 | |
5133 | if (mcx_dmamem_alloc(sc, &tx->tx_sq_mem, npages * MCX_PAGE_SIZE(1 << 12), |
5134 | MCX_PAGE_SIZE(1 << 12)) != 0) { |
5135 | printf("%s: unable to allocate send queue memory\n", |
5136 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5137 | return (-1); |
5138 | } |
5139 | |
5140 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5141 | token = mcx_cmdq_token(sc); |
5142 | mcx_cmdq_init(sc, cqe, sizeof(*in) + insize + paslen, sizeof(*out), |
5143 | token); |
5144 | |
5145 | in = mcx_cmdq_in(cqe); |
5146 | in->cmd_opcode = htobe16(MCX_CMD_CREATE_SQ)(__uint16_t)(__builtin_constant_p(0x904) ? (__uint16_t)(((__uint16_t )(0x904) & 0xffU) << 8 | ((__uint16_t)(0x904) & 0xff00U) >> 8) : __swap16md(0x904)); |
5147 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5148 | |
5149 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, |
5150 | howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE)(((insize) + ((512) - 1)) / (512)), |
5151 | &cqe->cq_input_ptr, token) != 0) { |
5152 | printf("%s: unable to allocate create sq mailboxen\n", |
5153 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5154 | goto free_sq; |
5155 | } |
5156 | mbin = (struct mcx_sq_ctx *) |
5157 | (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10); |
5158 | mbin->sq_flags = htobe32(MCX_SQ_CTX_RLKEY |(__uint32_t)(__builtin_constant_p((1U << 31) | (1 << 24)) ? (__uint32_t)(((__uint32_t)((1U << 31) | (1 << 24)) & 0xff) << 24 | ((__uint32_t)((1U << 31 ) | (1 << 24)) & 0xff00) << 8 | ((__uint32_t) ((1U << 31) | (1 << 24)) & 0xff0000) >> 8 | ((__uint32_t)((1U << 31) | (1 << 24)) & 0xff000000 ) >> 24) : __swap32md((1U << 31) | (1 << 24 ))) |
5159 | (1 << MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT))(__uint32_t)(__builtin_constant_p((1U << 31) | (1 << 24)) ? (__uint32_t)(((__uint32_t)((1U << 31) | (1 << 24)) & 0xff) << 24 | ((__uint32_t)((1U << 31 ) | (1 << 24)) & 0xff00) << 8 | ((__uint32_t) ((1U << 31) | (1 << 24)) & 0xff0000) >> 8 | ((__uint32_t)((1U << 31) | (1 << 24)) & 0xff000000 ) >> 24) : __swap32md((1U << 31) | (1 << 24 ))); |
5160 | mbin->sq_cqn = htobe32(cqn)(__uint32_t)(__builtin_constant_p(cqn) ? (__uint32_t)(((__uint32_t )(cqn) & 0xff) << 24 | ((__uint32_t)(cqn) & 0xff00 ) << 8 | ((__uint32_t)(cqn) & 0xff0000) >> 8 | ((__uint32_t)(cqn) & 0xff000000) >> 24) : __swap32md (cqn)); |
5161 | mbin->sq_tis_lst_sz = htobe32(1 << MCX_SQ_CTX_TIS_LST_SZ_SHIFT)(__uint32_t)(__builtin_constant_p(1 << 16) ? (__uint32_t )(((__uint32_t)(1 << 16) & 0xff) << 24 | ((__uint32_t )(1 << 16) & 0xff00) << 8 | ((__uint32_t)(1 << 16) & 0xff0000) >> 8 | ((__uint32_t)(1 << 16 ) & 0xff000000) >> 24) : __swap32md(1 << 16)); |
5162 | mbin->sq_tis_num = htobe32(sc->sc_tis)(__uint32_t)(__builtin_constant_p(sc->sc_tis) ? (__uint32_t )(((__uint32_t)(sc->sc_tis) & 0xff) << 24 | ((__uint32_t )(sc->sc_tis) & 0xff00) << 8 | ((__uint32_t)(sc-> sc_tis) & 0xff0000) >> 8 | ((__uint32_t)(sc->sc_tis ) & 0xff000000) >> 24) : __swap32md(sc->sc_tis)); |
5163 | mbin->sq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC(1 << 4); |
5164 | mbin->sq_wq.wq_pd = htobe32(sc->sc_pd)(__uint32_t)(__builtin_constant_p(sc->sc_pd) ? (__uint32_t )(((__uint32_t)(sc->sc_pd) & 0xff) << 24 | ((__uint32_t )(sc->sc_pd) & 0xff00) << 8 | ((__uint32_t)(sc-> sc_pd) & 0xff0000) >> 8 | ((__uint32_t)(sc->sc_pd ) & 0xff000000) >> 24) : __swap32md(sc->sc_pd)); |
5165 | mbin->sq_wq.wq_uar_page = htobe32(uar)(__uint32_t)(__builtin_constant_p(uar) ? (__uint32_t)(((__uint32_t )(uar) & 0xff) << 24 | ((__uint32_t)(uar) & 0xff00 ) << 8 | ((__uint32_t)(uar) & 0xff0000) >> 8 | ((__uint32_t)(uar) & 0xff000000) >> 24) : __swap32md (uar)); |
5166 | mbin->sq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +(__uint64_t)(__builtin_constant_p(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) ? (__uint64_t)((((__uint64_t)(((&sc->sc_doorbell_mem)-> mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff ) << 56) | ((__uint64_t)(((&sc->sc_doorbell_mem) ->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff00ULL) << 40 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff0000ULL) << 24 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff000000ULL) << 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff00000000ULL) >> 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(((&sc-> sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr) + tx-> tx_doorbell) & 0xff000000000000ULL) >> 40 | ((__uint64_t )(((&sc->sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr ) + tx->tx_doorbell) & 0xff00000000000000ULL) >> 56) : __swap64md(((&sc->sc_doorbell_mem)->mxm_map-> dm_segs[0].ds_addr) + tx->tx_doorbell)) |
5167 | tx->tx_doorbell)(__uint64_t)(__builtin_constant_p(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) ? (__uint64_t)((((__uint64_t)(((&sc->sc_doorbell_mem)-> mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff ) << 56) | ((__uint64_t)(((&sc->sc_doorbell_mem) ->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff00ULL) << 40 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff0000ULL) << 24 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff000000ULL) << 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff00000000ULL) >> 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(((&sc-> sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr) + tx-> tx_doorbell) & 0xff000000000000ULL) >> 40 | ((__uint64_t )(((&sc->sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr ) + tx->tx_doorbell) & 0xff00000000000000ULL) >> 56) : __swap64md(((&sc->sc_doorbell_mem)->mxm_map-> dm_segs[0].ds_addr) + tx->tx_doorbell)); |
5168 | mbin->sq_wq.wq_log_stride = htobe16(MCX_LOG_SQ_ENTRY_SIZE)(__uint16_t)(__builtin_constant_p(6) ? (__uint16_t)(((__uint16_t )(6) & 0xffU) << 8 | ((__uint16_t)(6) & 0xff00U ) >> 8) : __swap16md(6)); |
5169 | mbin->sq_wq.wq_log_size = MCX_LOG_SQ_SIZE11; |
5170 | |
5171 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& tx->tx_sq_mem)->mxm_map)), (0), (((&tx->tx_sq_mem )->mxm_size)), (0x04)) |
5172 | 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& tx->tx_sq_mem)->mxm_map)), (0), (((&tx->tx_sq_mem )->mxm_size)), (0x04)); |
5173 | |
5174 | /* physical addresses follow the mailbox in data */ |
5175 | mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10, |
5176 | npages, &tx->tx_sq_mem); |
5177 | mcx_cmdq_post(sc, cqe, 0); |
5178 | |
5179 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5180 | if (error != 0) { |
5181 | printf("%s: create sq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5182 | goto free_mxm; |
5183 | } |
5184 | if (mcx_cmdq_verify(cqe) != 0) { |
5185 | printf("%s: create sq command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5186 | goto free_mxm; |
5187 | } |
5188 | |
5189 | out = mcx_cmdq_out(cqe); |
5190 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5191 | printf("%s: create sq failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5192 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5193 | goto free_mxm; |
5194 | } |
5195 | |
5196 | tx->tx_uar = uar; |
5197 | tx->tx_sqn = mcx_get_id(out->cmd_sqn); |
5198 | |
5199 | mcx_dmamem_free(sc, &mxm); |
5200 | |
5201 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (tx->tx_doorbell), ( sizeof(uint32_t)), (0x04)) |
5202 | tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (tx->tx_doorbell), ( sizeof(uint32_t)), (0x04)); |
5203 | |
5204 | return (0); |
5205 | |
5206 | free_mxm: |
5207 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& tx->tx_sq_mem)->mxm_map)), (0), (((&tx->tx_sq_mem )->mxm_size)), (0x08)) |
5208 | 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& tx->tx_sq_mem)->mxm_map)), (0), (((&tx->tx_sq_mem )->mxm_size)), (0x08)); |
5209 | mcx_dmamem_free(sc, &mxm); |
5210 | free_sq: |
5211 | mcx_dmamem_free(sc, &tx->tx_sq_mem); |
5212 | return (-1); |
5213 | } |
5214 | |
5215 | static int |
5216 | mcx_destroy_sq(struct mcx_softc *sc, struct mcx_tx *tx) |
5217 | { |
5218 | struct mcx_cmdq_entry *cqe; |
5219 | struct mcx_cmd_destroy_sq_in *in; |
5220 | struct mcx_cmd_destroy_sq_out *out; |
5221 | int error; |
5222 | int token; |
5223 | |
5224 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5225 | token = mcx_cmdq_token(sc); |
5226 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token); |
5227 | |
5228 | in = mcx_cmdq_in(cqe); |
5229 | in->cmd_opcode = htobe16(MCX_CMD_DESTROY_SQ)(__uint16_t)(__builtin_constant_p(0x906) ? (__uint16_t)(((__uint16_t )(0x906) & 0xffU) << 8 | ((__uint16_t)(0x906) & 0xff00U) >> 8) : __swap16md(0x906)); |
5230 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5231 | in->cmd_sqn = htobe32(tx->tx_sqn)(__uint32_t)(__builtin_constant_p(tx->tx_sqn) ? (__uint32_t )(((__uint32_t)(tx->tx_sqn) & 0xff) << 24 | ((__uint32_t )(tx->tx_sqn) & 0xff00) << 8 | ((__uint32_t)(tx-> tx_sqn) & 0xff0000) >> 8 | ((__uint32_t)(tx->tx_sqn ) & 0xff000000) >> 24) : __swap32md(tx->tx_sqn)); |
5232 | |
5233 | mcx_cmdq_post(sc, cqe, 0); |
5234 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5235 | if (error != 0) { |
5236 | printf("%s: destroy sq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5237 | return error; |
5238 | } |
5239 | if (mcx_cmdq_verify(cqe) != 0) { |
5240 | printf("%s: destroy sq command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5241 | return error; |
5242 | } |
5243 | |
5244 | out = mcx_cmdq_out(cqe); |
5245 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5246 | printf("%s: destroy sq failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5247 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5248 | return -1; |
5249 | } |
5250 | |
5251 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (tx->tx_doorbell), ( sizeof(uint32_t)), (0x08)) |
5252 | tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (tx->tx_doorbell), ( sizeof(uint32_t)), (0x08)); |
5253 | |
5254 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& tx->tx_sq_mem)->mxm_map)), (0), (((&tx->tx_sq_mem )->mxm_size)), (0x08)) |
5255 | 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& tx->tx_sq_mem)->mxm_map)), (0), (((&tx->tx_sq_mem )->mxm_size)), (0x08)); |
5256 | mcx_dmamem_free(sc, &tx->tx_sq_mem); |
5257 | |
5258 | tx->tx_sqn = 0; |
5259 | return 0; |
5260 | } |
5261 | |
5262 | static int |
5263 | mcx_ready_sq(struct mcx_softc *sc, struct mcx_tx *tx) |
5264 | { |
5265 | struct mcx_cmdq_entry *cqe; |
5266 | struct mcx_dmamem mxm; |
5267 | struct mcx_cmd_modify_sq_in *in; |
5268 | struct mcx_cmd_modify_sq_mb_in *mbin; |
5269 | struct mcx_cmd_modify_sq_out *out; |
5270 | int error; |
5271 | int token; |
5272 | |
5273 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5274 | token = mcx_cmdq_token(sc); |
5275 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), |
5276 | sizeof(*out), token); |
5277 | |
5278 | in = mcx_cmdq_in(cqe); |
5279 | in->cmd_opcode = htobe16(MCX_CMD_MODIFY_SQ)(__uint16_t)(__builtin_constant_p(0x905) ? (__uint16_t)(((__uint16_t )(0x905) & 0xffU) << 8 | ((__uint16_t)(0x905) & 0xff00U) >> 8) : __swap16md(0x905)); |
5280 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5281 | in->cmd_sq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | tx->tx_sqn)(__uint32_t)(__builtin_constant_p((0 << 28) | tx->tx_sqn ) ? (__uint32_t)(((__uint32_t)((0 << 28) | tx->tx_sqn ) & 0xff) << 24 | ((__uint32_t)((0 << 28) | tx ->tx_sqn) & 0xff00) << 8 | ((__uint32_t)((0 << 28) | tx->tx_sqn) & 0xff0000) >> 8 | ((__uint32_t )((0 << 28) | tx->tx_sqn) & 0xff000000) >> 24) : __swap32md((0 << 28) | tx->tx_sqn)); |
5282 | |
5283 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
5284 | &cqe->cq_input_ptr, token) != 0) { |
5285 | printf("%s: unable to allocate modify sq mailbox\n", |
5286 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5287 | return (-1); |
5288 | } |
5289 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
5290 | mbin->cmd_sq_ctx.sq_flags = htobe32((__uint32_t)(__builtin_constant_p(1 << 20) ? (__uint32_t )(((__uint32_t)(1 << 20) & 0xff) << 24 | ((__uint32_t )(1 << 20) & 0xff00) << 8 | ((__uint32_t)(1 << 20) & 0xff0000) >> 8 | ((__uint32_t)(1 << 20 ) & 0xff000000) >> 24) : __swap32md(1 << 20)) |
5291 | MCX_QUEUE_STATE_RDY << MCX_SQ_CTX_STATE_SHIFT)(__uint32_t)(__builtin_constant_p(1 << 20) ? (__uint32_t )(((__uint32_t)(1 << 20) & 0xff) << 24 | ((__uint32_t )(1 << 20) & 0xff00) << 8 | ((__uint32_t)(1 << 20) & 0xff0000) >> 8 | ((__uint32_t)(1 << 20 ) & 0xff000000) >> 24) : __swap32md(1 << 20)); |
5292 | |
5293 | mcx_cmdq_mboxes_sign(&mxm, 1); |
5294 | mcx_cmdq_post(sc, cqe, 0); |
5295 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5296 | if (error != 0) { |
5297 | printf("%s: modify sq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5298 | goto free; |
5299 | } |
5300 | if (mcx_cmdq_verify(cqe) != 0) { |
5301 | printf("%s: modify sq command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5302 | goto free; |
5303 | } |
5304 | |
5305 | out = mcx_cmdq_out(cqe); |
5306 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5307 | printf("%s: modify sq failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5308 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5309 | error = -1; |
5310 | goto free; |
5311 | } |
5312 | |
5313 | free: |
5314 | mcx_dmamem_free(sc, &mxm); |
5315 | return (error); |
5316 | } |
5317 | |
5318 | static int |
5319 | mcx_create_tis(struct mcx_softc *sc, int *tis) |
5320 | { |
5321 | struct mcx_cmdq_entry *cqe; |
5322 | struct mcx_dmamem mxm; |
5323 | struct mcx_cmd_create_tis_in *in; |
5324 | struct mcx_cmd_create_tis_mb_in *mbin; |
5325 | struct mcx_cmd_create_tis_out *out; |
5326 | int error; |
5327 | int token; |
5328 | |
5329 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5330 | token = mcx_cmdq_token(sc); |
5331 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), |
5332 | sizeof(*out), token); |
5333 | |
5334 | in = mcx_cmdq_in(cqe); |
5335 | in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIS)(__uint16_t)(__builtin_constant_p(0x912) ? (__uint16_t)(((__uint16_t )(0x912) & 0xffU) << 8 | ((__uint16_t)(0x912) & 0xff00U) >> 8) : __swap16md(0x912)); |
5336 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5337 | |
5338 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
5339 | &cqe->cq_input_ptr, token) != 0) { |
5340 | printf("%s: unable to allocate create tis mailbox\n", |
5341 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5342 | return (-1); |
5343 | } |
5344 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
5345 | mbin->cmd_tdomain = htobe32(sc->sc_tdomain)(__uint32_t)(__builtin_constant_p(sc->sc_tdomain) ? (__uint32_t )(((__uint32_t)(sc->sc_tdomain) & 0xff) << 24 | ( (__uint32_t)(sc->sc_tdomain) & 0xff00) << 8 | (( __uint32_t)(sc->sc_tdomain) & 0xff0000) >> 8 | ( (__uint32_t)(sc->sc_tdomain) & 0xff000000) >> 24 ) : __swap32md(sc->sc_tdomain)); |
5346 | |
5347 | mcx_cmdq_mboxes_sign(&mxm, 1); |
5348 | mcx_cmdq_post(sc, cqe, 0); |
5349 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5350 | if (error != 0) { |
5351 | printf("%s: create tis timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5352 | goto free; |
5353 | } |
5354 | if (mcx_cmdq_verify(cqe) != 0) { |
5355 | printf("%s: create tis command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5356 | goto free; |
5357 | } |
5358 | |
5359 | out = mcx_cmdq_out(cqe); |
5360 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5361 | printf("%s: create tis failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5362 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5363 | error = -1; |
5364 | goto free; |
5365 | } |
5366 | |
5367 | *tis = mcx_get_id(out->cmd_tisn); |
5368 | free: |
5369 | mcx_dmamem_free(sc, &mxm); |
5370 | return (error); |
5371 | } |
5372 | |
5373 | static int |
5374 | mcx_destroy_tis(struct mcx_softc *sc, int tis) |
5375 | { |
5376 | struct mcx_cmdq_entry *cqe; |
5377 | struct mcx_cmd_destroy_tis_in *in; |
5378 | struct mcx_cmd_destroy_tis_out *out; |
5379 | int error; |
5380 | int token; |
5381 | |
5382 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5383 | token = mcx_cmdq_token(sc); |
5384 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token); |
5385 | |
5386 | in = mcx_cmdq_in(cqe); |
5387 | in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIS)(__uint16_t)(__builtin_constant_p(0x914) ? (__uint16_t)(((__uint16_t )(0x914) & 0xffU) << 8 | ((__uint16_t)(0x914) & 0xff00U) >> 8) : __swap16md(0x914)); |
5388 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5389 | in->cmd_tisn = htobe32(tis)(__uint32_t)(__builtin_constant_p(tis) ? (__uint32_t)(((__uint32_t )(tis) & 0xff) << 24 | ((__uint32_t)(tis) & 0xff00 ) << 8 | ((__uint32_t)(tis) & 0xff0000) >> 8 | ((__uint32_t)(tis) & 0xff000000) >> 24) : __swap32md (tis)); |
5390 | |
5391 | mcx_cmdq_post(sc, cqe, 0); |
5392 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5393 | if (error != 0) { |
5394 | printf("%s: destroy tis timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5395 | return error; |
5396 | } |
5397 | if (mcx_cmdq_verify(cqe) != 0) { |
5398 | printf("%s: destroy tis command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5399 | return error; |
5400 | } |
5401 | |
5402 | out = mcx_cmdq_out(cqe); |
5403 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5404 | printf("%s: destroy tis failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5405 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5406 | return -1; |
5407 | } |
5408 | |
5409 | return 0; |
5410 | } |
5411 | |
5412 | static int |
5413 | mcx_create_rqt(struct mcx_softc *sc, int size, int *rqns, int *rqt) |
5414 | { |
5415 | struct mcx_cmdq_entry *cqe; |
5416 | struct mcx_dmamem mxm; |
5417 | struct mcx_cmd_create_rqt_in *in; |
5418 | struct mcx_cmd_create_rqt_mb_in *mbin; |
5419 | struct mcx_cmd_create_rqt_out *out; |
5420 | struct mcx_rqt_ctx *rqt_ctx; |
5421 | int *rqtn; |
5422 | int error; |
5423 | int token; |
5424 | int i; |
5425 | |
5426 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5427 | token = mcx_cmdq_token(sc); |
5428 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + |
5429 | (size * sizeof(int)), sizeof(*out), token); |
5430 | |
5431 | in = mcx_cmdq_in(cqe); |
5432 | in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQT)(__uint16_t)(__builtin_constant_p(0x916) ? (__uint16_t)(((__uint16_t )(0x916) & 0xffU) << 8 | ((__uint16_t)(0x916) & 0xff00U) >> 8) : __swap16md(0x916)); |
5433 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5434 | |
5435 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
5436 | &cqe->cq_input_ptr, token) != 0) { |
5437 | printf("%s: unable to allocate create rqt mailbox\n", |
5438 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5439 | return (-1); |
5440 | } |
5441 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
5442 | rqt_ctx = &mbin->cmd_rqt; |
5443 | rqt_ctx->cmd_rqt_max_size = htobe16(sc->sc_max_rqt_size)(__uint16_t)(__builtin_constant_p(sc->sc_max_rqt_size) ? ( __uint16_t)(((__uint16_t)(sc->sc_max_rqt_size) & 0xffU ) << 8 | ((__uint16_t)(sc->sc_max_rqt_size) & 0xff00U ) >> 8) : __swap16md(sc->sc_max_rqt_size)); |
5444 | rqt_ctx->cmd_rqt_actual_size = htobe16(size)(__uint16_t)(__builtin_constant_p(size) ? (__uint16_t)(((__uint16_t )(size) & 0xffU) << 8 | ((__uint16_t)(size) & 0xff00U ) >> 8) : __swap16md(size)); |
5445 | |
5446 | /* rqt list follows the rqt context */ |
5447 | rqtn = (int *)(rqt_ctx + 1); |
5448 | for (i = 0; i < size; i++) { |
5449 | rqtn[i] = htobe32(rqns[i])(__uint32_t)(__builtin_constant_p(rqns[i]) ? (__uint32_t)(((__uint32_t )(rqns[i]) & 0xff) << 24 | ((__uint32_t)(rqns[i]) & 0xff00) << 8 | ((__uint32_t)(rqns[i]) & 0xff0000) >> 8 | ((__uint32_t)(rqns[i]) & 0xff000000) >> 24) : __swap32md (rqns[i])); |
5450 | } |
5451 | |
5452 | mcx_cmdq_mboxes_sign(&mxm, 1); |
5453 | mcx_cmdq_post(sc, cqe, 0); |
5454 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5455 | if (error != 0) { |
5456 | printf("%s: create rqt timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5457 | goto free; |
5458 | } |
5459 | if (mcx_cmdq_verify(cqe) != 0) { |
5460 | printf("%s: create rqt command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5461 | goto free; |
5462 | } |
5463 | |
5464 | out = mcx_cmdq_out(cqe); |
5465 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5466 | printf("%s: create rqt failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5467 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5468 | error = -1; |
5469 | goto free; |
5470 | } |
5471 | |
5472 | *rqt = mcx_get_id(out->cmd_rqtn); |
5473 | return (0); |
5474 | free: |
5475 | mcx_dmamem_free(sc, &mxm); |
5476 | return (error); |
5477 | } |
5478 | |
5479 | static int |
5480 | mcx_destroy_rqt(struct mcx_softc *sc, int rqt) |
5481 | { |
5482 | struct mcx_cmdq_entry *cqe; |
5483 | struct mcx_cmd_destroy_rqt_in *in; |
5484 | struct mcx_cmd_destroy_rqt_out *out; |
5485 | int error; |
5486 | int token; |
5487 | |
5488 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5489 | token = mcx_cmdq_token(sc); |
5490 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token); |
5491 | |
5492 | in = mcx_cmdq_in(cqe); |
5493 | in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQT)(__uint16_t)(__builtin_constant_p(0x918) ? (__uint16_t)(((__uint16_t )(0x918) & 0xffU) << 8 | ((__uint16_t)(0x918) & 0xff00U) >> 8) : __swap16md(0x918)); |
5494 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5495 | in->cmd_rqtn = htobe32(rqt)(__uint32_t)(__builtin_constant_p(rqt) ? (__uint32_t)(((__uint32_t )(rqt) & 0xff) << 24 | ((__uint32_t)(rqt) & 0xff00 ) << 8 | ((__uint32_t)(rqt) & 0xff0000) >> 8 | ((__uint32_t)(rqt) & 0xff000000) >> 24) : __swap32md (rqt)); |
5496 | |
5497 | mcx_cmdq_post(sc, cqe, 0); |
5498 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5499 | if (error != 0) { |
5500 | printf("%s: destroy rqt timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5501 | return error; |
5502 | } |
5503 | if (mcx_cmdq_verify(cqe) != 0) { |
5504 | printf("%s: destroy rqt command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5505 | return error; |
5506 | } |
5507 | |
5508 | out = mcx_cmdq_out(cqe); |
5509 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5510 | printf("%s: destroy rqt failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5511 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5512 | return -1; |
5513 | } |
5514 | |
5515 | return 0; |
5516 | } |
5517 | |
5518 | #if 0 |
5519 | static int |
5520 | mcx_alloc_flow_counter(struct mcx_softc *sc, int i) |
5521 | { |
5522 | struct mcx_cmdq_entry *cqe; |
5523 | struct mcx_cmd_alloc_flow_counter_in *in; |
5524 | struct mcx_cmd_alloc_flow_counter_out *out; |
5525 | int error; |
5526 | |
5527 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5528 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
5529 | |
5530 | in = mcx_cmdq_in(cqe); |
5531 | in->cmd_opcode = htobe16(MCX_CMD_ALLOC_FLOW_COUNTER)(__uint16_t)(__builtin_constant_p(0x939) ? (__uint16_t)(((__uint16_t )(0x939) & 0xffU) << 8 | ((__uint16_t)(0x939) & 0xff00U) >> 8) : __swap16md(0x939)); |
5532 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5533 | |
5534 | mcx_cmdq_post(sc, cqe, 0); |
5535 | |
5536 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5537 | if (error != 0) { |
5538 | printf("%s: alloc flow counter timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5539 | return (-1); |
5540 | } |
5541 | if (mcx_cmdq_verify(cqe) != 0) { |
5542 | printf("%s: alloc flow counter command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5543 | return (-1); |
5544 | } |
5545 | |
5546 | out = (struct mcx_cmd_alloc_flow_counter_out *)cqe->cq_output_data; |
5547 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5548 | printf("%s: alloc flow counter failed (%x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5549 | out->cmd_status); |
5550 | return (-1); |
5551 | } |
5552 | |
5553 | sc->sc_flow_counter_id[i] = betoh16(out->cmd_flow_counter_id)(__uint16_t)(__builtin_constant_p(out->cmd_flow_counter_id ) ? (__uint16_t)(((__uint16_t)(out->cmd_flow_counter_id) & 0xffU) << 8 | ((__uint16_t)(out->cmd_flow_counter_id ) & 0xff00U) >> 8) : __swap16md(out->cmd_flow_counter_id )); |
5554 | printf("flow counter id %d = %d\n", i, sc->sc_flow_counter_id[i]); |
5555 | |