File: | dev/pci/if_mcx.c |
Warning: | line 6936, column 4 Value stored to 'cqp' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: if_mcx.c,v 1.103 2022/01/09 05:42:54 jsg Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2017 David Gwynne <dlg@openbsd.org> |
5 | * Copyright (c) 2019 Jonathan Matthew <jmatthew@openbsd.org> |
6 | * |
7 | * Permission to use, copy, modify, and distribute this software for any |
8 | * purpose with or without fee is hereby granted, provided that the above |
9 | * copyright notice and this permission notice appear in all copies. |
10 | * |
11 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
12 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
13 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
14 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
15 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
16 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
17 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
18 | */ |
19 | |
20 | #include "bpfilter.h" |
21 | #include "vlan.h" |
22 | #include "kstat.h" |
23 | |
24 | #include <sys/param.h> |
25 | #include <sys/systm.h> |
26 | #include <sys/sockio.h> |
27 | #include <sys/mbuf.h> |
28 | #include <sys/kernel.h> |
29 | #include <sys/socket.h> |
30 | #include <sys/device.h> |
31 | #include <sys/pool.h> |
32 | #include <sys/queue.h> |
33 | #include <sys/timeout.h> |
34 | #include <sys/task.h> |
35 | #include <sys/atomic.h> |
36 | #include <sys/timetc.h> |
37 | #include <sys/intrmap.h> |
38 | |
39 | #include <machine/bus.h> |
40 | #include <machine/intr.h> |
41 | |
42 | #include <net/if.h> |
43 | #include <net/if_dl.h> |
44 | #include <net/if_media.h> |
45 | #include <net/toeplitz.h> |
46 | |
47 | #if NBPFILTER1 > 0 |
48 | #include <net/bpf.h> |
49 | #endif |
50 | |
51 | #if NKSTAT0 > 0 |
52 | #include <sys/kstat.h> |
53 | #endif |
54 | |
55 | #include <netinet/in.h> |
56 | #include <netinet/if_ether.h> |
57 | |
58 | #include <dev/pci/pcireg.h> |
59 | #include <dev/pci/pcivar.h> |
60 | #include <dev/pci/pcidevs.h> |
61 | |
62 | #define BUS_DMASYNC_PRERW(0x01|0x04) (BUS_DMASYNC_PREREAD0x01|BUS_DMASYNC_PREWRITE0x04) |
63 | #define BUS_DMASYNC_POSTRW(0x02|0x08) (BUS_DMASYNC_POSTREAD0x02|BUS_DMASYNC_POSTWRITE0x08) |
64 | |
65 | #define MCX_HCA_BAR0x10 PCI_MAPREG_START0x10 /* BAR 0 */ |
66 | |
67 | #define MCX_FW_VER0x0000 0x0000 |
68 | #define MCX_FW_VER_MAJOR(_v)((_v) & 0xffff) ((_v) & 0xffff) |
69 | #define MCX_FW_VER_MINOR(_v)((_v) >> 16) ((_v) >> 16) |
70 | #define MCX_CMDIF_FW_SUBVER0x0004 0x0004 |
71 | #define MCX_FW_VER_SUBMINOR(_v)((_v) & 0xffff) ((_v) & 0xffff) |
72 | #define MCX_CMDIF(_v)((_v) >> 16) ((_v) >> 16) |
73 | |
74 | #define MCX_ISSI1 1 /* as per the PRM */ |
75 | #define MCX_CMD_IF_SUPPORTED5 5 |
76 | |
77 | #define MCX_HARDMTU9500 9500 |
78 | |
79 | #define MCX_PAGE_SHIFT12 12 |
80 | #define MCX_PAGE_SIZE(1 << 12) (1 << MCX_PAGE_SHIFT12) |
81 | |
82 | /* queue sizes */ |
83 | #define MCX_LOG_EQ_SIZE7 7 |
84 | #define MCX_LOG_CQ_SIZE12 12 |
85 | #define MCX_LOG_RQ_SIZE10 10 |
86 | #define MCX_LOG_SQ_SIZE11 11 |
87 | |
88 | #define MCX_MAX_QUEUES16 16 |
89 | |
90 | /* completion event moderation - about 10khz, or 90% of the cq */ |
91 | #define MCX_CQ_MOD_PERIOD50 50 |
92 | #define MCX_CQ_MOD_COUNTER(((1 << (12 - 1)) * 9) / 10) \ |
93 | (((1 << (MCX_LOG_CQ_SIZE12 - 1)) * 9) / 10) |
94 | |
95 | #define MCX_LOG_SQ_ENTRY_SIZE6 6 |
96 | #define MCX_SQ_ENTRY_MAX_SLOTS4 4 |
97 | #define MCX_SQ_SEGS_PER_SLOT(sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg )) \ |
98 | (sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg)) |
99 | #define MCX_SQ_MAX_SEGMENTS1 + ((4 -1) * (sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg ))) \ |
100 | 1 + ((MCX_SQ_ENTRY_MAX_SLOTS4-1) * MCX_SQ_SEGS_PER_SLOT(sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg ))) |
101 | |
102 | #define MCX_LOG_FLOW_TABLE_SIZE5 5 |
103 | #define MCX_NUM_STATIC_FLOWS4 4 /* promisc, allmulti, ucast, bcast */ |
104 | #define MCX_NUM_MCAST_FLOWS((1 << 5) - 4) \ |
105 | ((1 << MCX_LOG_FLOW_TABLE_SIZE5) - MCX_NUM_STATIC_FLOWS4) |
106 | |
107 | #define MCX_SQ_INLINE_SIZE18 18 |
108 | CTASSERT(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN == MCX_SQ_INLINE_SIZE)extern char _ctassert[(((6 * 2) + 2) + 4 == 18) ? 1 : -1 ] __attribute__ ((__unused__)); |
109 | |
110 | /* doorbell offsets */ |
111 | #define MCX_DOORBELL_AREA_SIZE(1 << 12) MCX_PAGE_SIZE(1 << 12) |
112 | |
113 | #define MCX_CQ_DOORBELL_BASE0 0 |
114 | #define MCX_CQ_DOORBELL_STRIDE64 64 |
115 | |
116 | #define MCX_WQ_DOORBELL_BASE(1 << 12)/2 MCX_PAGE_SIZE(1 << 12)/2 |
117 | #define MCX_WQ_DOORBELL_STRIDE64 64 |
118 | /* make sure the doorbells fit */ |
119 | CTASSERT(MCX_MAX_QUEUES * MCX_CQ_DOORBELL_STRIDE < MCX_WQ_DOORBELL_BASE)extern char _ctassert[(16 * 64 < (1 << 12)/2) ? 1 : - 1 ] __attribute__((__unused__)); |
120 | CTASSERT(MCX_MAX_QUEUES * MCX_WQ_DOORBELL_STRIDE <extern char _ctassert[(16 * 64 < (1 << 12) - (1 << 12)/2) ? 1 : -1 ] __attribute__((__unused__)) |
121 | MCX_DOORBELL_AREA_SIZE - MCX_WQ_DOORBELL_BASE)extern char _ctassert[(16 * 64 < (1 << 12) - (1 << 12)/2) ? 1 : -1 ] __attribute__((__unused__)); |
122 | |
123 | #define MCX_WQ_DOORBELL_MASK0xffff 0xffff |
124 | |
125 | /* uar registers */ |
126 | #define MCX_UAR_CQ_DOORBELL0x20 0x20 |
127 | #define MCX_UAR_EQ_DOORBELL_ARM0x40 0x40 |
128 | #define MCX_UAR_EQ_DOORBELL0x48 0x48 |
129 | #define MCX_UAR_BF0x800 0x800 |
130 | |
131 | #define MCX_CMDQ_ADDR_HI0x0010 0x0010 |
132 | #define MCX_CMDQ_ADDR_LO0x0014 0x0014 |
133 | #define MCX_CMDQ_ADDR_NMASK0xfff 0xfff |
134 | #define MCX_CMDQ_LOG_SIZE(_v)((_v) >> 4 & 0xf) ((_v) >> 4 & 0xf) |
135 | #define MCX_CMDQ_LOG_STRIDE(_v)((_v) >> 0 & 0xf) ((_v) >> 0 & 0xf) |
136 | #define MCX_CMDQ_INTERFACE_MASK(0x3 << 8) (0x3 << 8) |
137 | #define MCX_CMDQ_INTERFACE_FULL_DRIVER(0x0 << 8) (0x0 << 8) |
138 | #define MCX_CMDQ_INTERFACE_DISABLED(0x1 << 8) (0x1 << 8) |
139 | |
140 | #define MCX_CMDQ_DOORBELL0x0018 0x0018 |
141 | |
142 | #define MCX_STATE0x01fc 0x01fc |
143 | #define MCX_STATE_MASK(1 << 31) (1 << 31) |
144 | #define MCX_STATE_INITIALIZING(1 << 31) (1 << 31) |
145 | #define MCX_STATE_READY(0 << 31) (0 << 31) |
146 | #define MCX_STATE_INTERFACE_MASK(0x3 << 24) (0x3 << 24) |
147 | #define MCX_STATE_INTERFACE_FULL_DRIVER(0x0 << 24) (0x0 << 24) |
148 | #define MCX_STATE_INTERFACE_DISABLED(0x1 << 24) (0x1 << 24) |
149 | |
150 | #define MCX_INTERNAL_TIMER0x1000 0x1000 |
151 | #define MCX_INTERNAL_TIMER_H0x1000 0x1000 |
152 | #define MCX_INTERNAL_TIMER_L0x1004 0x1004 |
153 | |
154 | #define MCX_CLEAR_INT0x100c 0x100c |
155 | |
156 | #define MCX_REG_OP_WRITE0 0 |
157 | #define MCX_REG_OP_READ1 1 |
158 | |
159 | #define MCX_REG_PMLP0x5002 0x5002 |
160 | #define MCX_REG_PMTU0x5003 0x5003 |
161 | #define MCX_REG_PTYS0x5004 0x5004 |
162 | #define MCX_REG_PAOS0x5006 0x5006 |
163 | #define MCX_REG_PFCC0x5007 0x5007 |
164 | #define MCX_REG_PPCNT0x5008 0x5008 |
165 | #define MCX_REG_MTCAP0x9009 0x9009 /* mgmt temp capabilities */ |
166 | #define MCX_REG_MTMP0x900a 0x900a /* mgmt temp */ |
167 | #define MCX_REG_MCIA0x9014 0x9014 |
168 | #define MCX_REG_MCAM0x907f 0x907f |
169 | |
170 | #define MCX_ETHER_CAP_SGMII0 0 |
171 | #define MCX_ETHER_CAP_1000_KX1 1 |
172 | #define MCX_ETHER_CAP_10G_CX42 2 |
173 | #define MCX_ETHER_CAP_10G_KX43 3 |
174 | #define MCX_ETHER_CAP_10G_KR4 4 |
175 | #define MCX_ETHER_CAP_40G_CR46 6 |
176 | #define MCX_ETHER_CAP_40G_KR47 7 |
177 | #define MCX_ETHER_CAP_10G_CR12 12 |
178 | #define MCX_ETHER_CAP_10G_SR13 13 |
179 | #define MCX_ETHER_CAP_10G_LR14 14 |
180 | #define MCX_ETHER_CAP_40G_SR415 15 |
181 | #define MCX_ETHER_CAP_40G_LR416 16 |
182 | #define MCX_ETHER_CAP_50G_SR218 18 |
183 | #define MCX_ETHER_CAP_100G_CR420 20 |
184 | #define MCX_ETHER_CAP_100G_SR421 21 |
185 | #define MCX_ETHER_CAP_100G_KR422 22 |
186 | #define MCX_ETHER_CAP_25G_CR27 27 |
187 | #define MCX_ETHER_CAP_25G_KR28 28 |
188 | #define MCX_ETHER_CAP_25G_SR29 29 |
189 | #define MCX_ETHER_CAP_50G_CR230 30 |
190 | #define MCX_ETHER_CAP_50G_KR231 31 |
191 | |
192 | #define MCX_MAX_CQE32 32 |
193 | |
194 | #define MCX_CMD_QUERY_HCA_CAP0x100 0x100 |
195 | #define MCX_CMD_QUERY_ADAPTER0x101 0x101 |
196 | #define MCX_CMD_INIT_HCA0x102 0x102 |
197 | #define MCX_CMD_TEARDOWN_HCA0x103 0x103 |
198 | #define MCX_CMD_ENABLE_HCA0x104 0x104 |
199 | #define MCX_CMD_DISABLE_HCA0x105 0x105 |
200 | #define MCX_CMD_QUERY_PAGES0x107 0x107 |
201 | #define MCX_CMD_MANAGE_PAGES0x108 0x108 |
202 | #define MCX_CMD_SET_HCA_CAP0x109 0x109 |
203 | #define MCX_CMD_QUERY_ISSI0x10a 0x10a |
204 | #define MCX_CMD_SET_ISSI0x10b 0x10b |
205 | #define MCX_CMD_SET_DRIVER_VERSION0x10d 0x10d |
206 | #define MCX_CMD_QUERY_SPECIAL_CONTEXTS0x203 0x203 |
207 | #define MCX_CMD_CREATE_EQ0x301 0x301 |
208 | #define MCX_CMD_DESTROY_EQ0x302 0x302 |
209 | #define MCX_CMD_QUERY_EQ0x303 0x303 |
210 | #define MCX_CMD_CREATE_CQ0x400 0x400 |
211 | #define MCX_CMD_DESTROY_CQ0x401 0x401 |
212 | #define MCX_CMD_QUERY_CQ0x402 0x402 |
213 | #define MCX_CMD_QUERY_NIC_VPORT_CONTEXT0x754 0x754 |
214 | #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT0x755 \ |
215 | 0x755 |
216 | #define MCX_CMD_QUERY_VPORT_COUNTERS0x770 0x770 |
217 | #define MCX_CMD_ALLOC_PD0x800 0x800 |
218 | #define MCX_CMD_ALLOC_UAR0x802 0x802 |
219 | #define MCX_CMD_ACCESS_REG0x805 0x805 |
220 | #define MCX_CMD_ALLOC_TRANSPORT_DOMAIN0x816 0x816 |
221 | #define MCX_CMD_CREATE_TIR0x900 0x900 |
222 | #define MCX_CMD_DESTROY_TIR0x902 0x902 |
223 | #define MCX_CMD_CREATE_SQ0x904 0x904 |
224 | #define MCX_CMD_MODIFY_SQ0x905 0x905 |
225 | #define MCX_CMD_DESTROY_SQ0x906 0x906 |
226 | #define MCX_CMD_QUERY_SQ0x907 0x907 |
227 | #define MCX_CMD_CREATE_RQ0x908 0x908 |
228 | #define MCX_CMD_MODIFY_RQ0x909 0x909 |
229 | #define MCX_CMD_DESTROY_RQ0x90a 0x90a |
230 | #define MCX_CMD_QUERY_RQ0x90b 0x90b |
231 | #define MCX_CMD_CREATE_TIS0x912 0x912 |
232 | #define MCX_CMD_DESTROY_TIS0x914 0x914 |
233 | #define MCX_CMD_CREATE_RQT0x916 0x916 |
234 | #define MCX_CMD_DESTROY_RQT0x918 0x918 |
235 | #define MCX_CMD_SET_FLOW_TABLE_ROOT0x92f 0x92f |
236 | #define MCX_CMD_CREATE_FLOW_TABLE0x930 0x930 |
237 | #define MCX_CMD_DESTROY_FLOW_TABLE0x931 0x931 |
238 | #define MCX_CMD_QUERY_FLOW_TABLE0x932 0x932 |
239 | #define MCX_CMD_CREATE_FLOW_GROUP0x933 0x933 |
240 | #define MCX_CMD_DESTROY_FLOW_GROUP0x934 0x934 |
241 | #define MCX_CMD_QUERY_FLOW_GROUP0x935 0x935 |
242 | #define MCX_CMD_SET_FLOW_TABLE_ENTRY0x936 0x936 |
243 | #define MCX_CMD_QUERY_FLOW_TABLE_ENTRY0x937 0x937 |
244 | #define MCX_CMD_DELETE_FLOW_TABLE_ENTRY0x938 0x938 |
245 | #define MCX_CMD_ALLOC_FLOW_COUNTER0x939 0x939 |
246 | #define MCX_CMD_QUERY_FLOW_COUNTER0x93b 0x93b |
247 | |
248 | #define MCX_QUEUE_STATE_RST0 0 |
249 | #define MCX_QUEUE_STATE_RDY1 1 |
250 | #define MCX_QUEUE_STATE_ERR3 3 |
251 | |
252 | #define MCX_FLOW_TABLE_TYPE_RX0 0 |
253 | #define MCX_FLOW_TABLE_TYPE_TX1 1 |
254 | |
255 | #define MCX_CMDQ_INLINE_DATASIZE16 16 |
256 | |
257 | struct mcx_cmdq_entry { |
258 | uint8_t cq_type; |
259 | #define MCX_CMDQ_TYPE_PCIE0x7 0x7 |
260 | uint8_t cq_reserved0[3]; |
261 | |
262 | uint32_t cq_input_length; |
263 | uint64_t cq_input_ptr; |
264 | uint8_t cq_input_data[MCX_CMDQ_INLINE_DATASIZE16]; |
265 | |
266 | uint8_t cq_output_data[MCX_CMDQ_INLINE_DATASIZE16]; |
267 | uint64_t cq_output_ptr; |
268 | uint32_t cq_output_length; |
269 | |
270 | uint8_t cq_token; |
271 | uint8_t cq_signature; |
272 | uint8_t cq_reserved1[1]; |
273 | uint8_t cq_status; |
274 | #define MCX_CQ_STATUS_SHIFT1 1 |
275 | #define MCX_CQ_STATUS_MASK(0x7f << 1) (0x7f << MCX_CQ_STATUS_SHIFT1) |
276 | #define MCX_CQ_STATUS_OK(0x00 << 1) (0x00 << MCX_CQ_STATUS_SHIFT1) |
277 | #define MCX_CQ_STATUS_INT_ERR(0x01 << 1) (0x01 << MCX_CQ_STATUS_SHIFT1) |
278 | #define MCX_CQ_STATUS_BAD_OPCODE(0x02 << 1) (0x02 << MCX_CQ_STATUS_SHIFT1) |
279 | #define MCX_CQ_STATUS_BAD_PARAM(0x03 << 1) (0x03 << MCX_CQ_STATUS_SHIFT1) |
280 | #define MCX_CQ_STATUS_BAD_SYS_STATE(0x04 << 1) (0x04 << MCX_CQ_STATUS_SHIFT1) |
281 | #define MCX_CQ_STATUS_BAD_RESOURCE(0x05 << 1) (0x05 << MCX_CQ_STATUS_SHIFT1) |
282 | #define MCX_CQ_STATUS_RESOURCE_BUSY(0x06 << 1) (0x06 << MCX_CQ_STATUS_SHIFT1) |
283 | #define MCX_CQ_STATUS_EXCEED_LIM(0x08 << 1) (0x08 << MCX_CQ_STATUS_SHIFT1) |
284 | #define MCX_CQ_STATUS_BAD_RES_STATE(0x09 << 1) (0x09 << MCX_CQ_STATUS_SHIFT1) |
285 | #define MCX_CQ_STATUS_BAD_INDEX(0x0a << 1) (0x0a << MCX_CQ_STATUS_SHIFT1) |
286 | #define MCX_CQ_STATUS_NO_RESOURCES(0x0f << 1) (0x0f << MCX_CQ_STATUS_SHIFT1) |
287 | #define MCX_CQ_STATUS_BAD_INPUT_LEN(0x50 << 1) (0x50 << MCX_CQ_STATUS_SHIFT1) |
288 | #define MCX_CQ_STATUS_BAD_OUTPUT_LEN(0x51 << 1) (0x51 << MCX_CQ_STATUS_SHIFT1) |
289 | #define MCX_CQ_STATUS_BAD_RESOURCE_STATE(0x10 << 1) \ |
290 | (0x10 << MCX_CQ_STATUS_SHIFT1) |
291 | #define MCX_CQ_STATUS_BAD_SIZE(0x40 << 1) (0x40 << MCX_CQ_STATUS_SHIFT1) |
292 | #define MCX_CQ_STATUS_OWN_MASK0x1 0x1 |
293 | #define MCX_CQ_STATUS_OWN_SW0x0 0x0 |
294 | #define MCX_CQ_STATUS_OWN_HW0x1 0x1 |
295 | } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8))); |
296 | |
297 | #define MCX_CMDQ_MAILBOX_DATASIZE512 512 |
298 | |
299 | struct mcx_cmdq_mailbox { |
300 | uint8_t mb_data[MCX_CMDQ_MAILBOX_DATASIZE512]; |
301 | uint8_t mb_reserved0[48]; |
302 | uint64_t mb_next_ptr; |
303 | uint32_t mb_block_number; |
304 | uint8_t mb_reserved1[1]; |
305 | uint8_t mb_token; |
306 | uint8_t mb_ctrl_signature; |
307 | uint8_t mb_signature; |
308 | } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8))); |
309 | |
310 | #define MCX_CMDQ_MAILBOX_ALIGN(1 << 10) (1 << 10) |
311 | #define MCX_CMDQ_MAILBOX_SIZE((((sizeof(struct mcx_cmdq_mailbox))+(((1 << 10))-1))/( (1 << 10)))*((1 << 10))) roundup(sizeof(struct mcx_cmdq_mailbox), \((((sizeof(struct mcx_cmdq_mailbox))+(((1 << 10))-1))/( (1 << 10)))*((1 << 10))) |
312 | MCX_CMDQ_MAILBOX_ALIGN)((((sizeof(struct mcx_cmdq_mailbox))+(((1 << 10))-1))/( (1 << 10)))*((1 << 10))) |
313 | /* |
314 | * command mailbox structures |
315 | */ |
316 | |
317 | struct mcx_cmd_enable_hca_in { |
318 | uint16_t cmd_opcode; |
319 | uint8_t cmd_reserved0[4]; |
320 | uint16_t cmd_op_mod; |
321 | uint8_t cmd_reserved1[2]; |
322 | uint16_t cmd_function_id; |
323 | uint8_t cmd_reserved2[4]; |
324 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
325 | |
326 | struct mcx_cmd_enable_hca_out { |
327 | uint8_t cmd_status; |
328 | uint8_t cmd_reserved0[3]; |
329 | uint32_t cmd_syndrome; |
330 | uint8_t cmd_reserved1[4]; |
331 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
332 | |
333 | struct mcx_cmd_init_hca_in { |
334 | uint16_t cmd_opcode; |
335 | uint8_t cmd_reserved0[4]; |
336 | uint16_t cmd_op_mod; |
337 | uint8_t cmd_reserved1[8]; |
338 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
339 | |
340 | struct mcx_cmd_init_hca_out { |
341 | uint8_t cmd_status; |
342 | uint8_t cmd_reserved0[3]; |
343 | uint32_t cmd_syndrome; |
344 | uint8_t cmd_reserved1[8]; |
345 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
346 | |
347 | struct mcx_cmd_teardown_hca_in { |
348 | uint16_t cmd_opcode; |
349 | uint8_t cmd_reserved0[4]; |
350 | uint16_t cmd_op_mod; |
351 | uint8_t cmd_reserved1[2]; |
352 | #define MCX_CMD_TEARDOWN_HCA_GRACEFUL0x0 0x0 |
353 | #define MCX_CMD_TEARDOWN_HCA_PANIC0x1 0x1 |
354 | uint16_t cmd_profile; |
355 | uint8_t cmd_reserved2[4]; |
356 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
357 | |
358 | struct mcx_cmd_teardown_hca_out { |
359 | uint8_t cmd_status; |
360 | uint8_t cmd_reserved0[3]; |
361 | uint32_t cmd_syndrome; |
362 | uint8_t cmd_reserved1[8]; |
363 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
364 | |
365 | struct mcx_cmd_access_reg_in { |
366 | uint16_t cmd_opcode; |
367 | uint8_t cmd_reserved0[4]; |
368 | uint16_t cmd_op_mod; |
369 | uint8_t cmd_reserved1[2]; |
370 | uint16_t cmd_register_id; |
371 | uint32_t cmd_argument; |
372 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
373 | |
374 | struct mcx_cmd_access_reg_out { |
375 | uint8_t cmd_status; |
376 | uint8_t cmd_reserved0[3]; |
377 | uint32_t cmd_syndrome; |
378 | uint8_t cmd_reserved1[8]; |
379 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
380 | |
381 | struct mcx_reg_pmtu { |
382 | uint8_t rp_reserved1; |
383 | uint8_t rp_local_port; |
384 | uint8_t rp_reserved2[2]; |
385 | uint16_t rp_max_mtu; |
386 | uint8_t rp_reserved3[2]; |
387 | uint16_t rp_admin_mtu; |
388 | uint8_t rp_reserved4[2]; |
389 | uint16_t rp_oper_mtu; |
390 | uint8_t rp_reserved5[2]; |
391 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
392 | |
393 | struct mcx_reg_ptys { |
394 | uint8_t rp_reserved1; |
395 | uint8_t rp_local_port; |
396 | uint8_t rp_reserved2; |
397 | uint8_t rp_proto_mask; |
398 | #define MCX_REG_PTYS_PROTO_MASK_ETH(1 << 2) (1 << 2) |
399 | uint8_t rp_reserved3[8]; |
400 | uint32_t rp_eth_proto_cap; |
401 | uint8_t rp_reserved4[8]; |
402 | uint32_t rp_eth_proto_admin; |
403 | uint8_t rp_reserved5[8]; |
404 | uint32_t rp_eth_proto_oper; |
405 | uint8_t rp_reserved6[24]; |
406 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
407 | |
408 | struct mcx_reg_paos { |
409 | uint8_t rp_reserved1; |
410 | uint8_t rp_local_port; |
411 | uint8_t rp_admin_status; |
412 | #define MCX_REG_PAOS_ADMIN_STATUS_UP1 1 |
413 | #define MCX_REG_PAOS_ADMIN_STATUS_DOWN2 2 |
414 | #define MCX_REG_PAOS_ADMIN_STATUS_UP_ONCE3 3 |
415 | #define MCX_REG_PAOS_ADMIN_STATUS_DISABLED4 4 |
416 | uint8_t rp_oper_status; |
417 | #define MCX_REG_PAOS_OPER_STATUS_UP1 1 |
418 | #define MCX_REG_PAOS_OPER_STATUS_DOWN2 2 |
419 | #define MCX_REG_PAOS_OPER_STATUS_FAILED4 4 |
420 | uint8_t rp_admin_state_update; |
421 | #define MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN(1 << 7) (1 << 7) |
422 | uint8_t rp_reserved2[11]; |
423 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
424 | |
425 | struct mcx_reg_pfcc { |
426 | uint8_t rp_reserved1; |
427 | uint8_t rp_local_port; |
428 | uint8_t rp_reserved2[3]; |
429 | uint8_t rp_prio_mask_tx; |
430 | uint8_t rp_reserved3; |
431 | uint8_t rp_prio_mask_rx; |
432 | uint8_t rp_pptx_aptx; |
433 | uint8_t rp_pfctx; |
434 | uint8_t rp_fctx_dis; |
435 | uint8_t rp_reserved4; |
436 | uint8_t rp_pprx_aprx; |
437 | uint8_t rp_pfcrx; |
438 | uint8_t rp_reserved5[2]; |
439 | uint16_t rp_dev_stall_min; |
440 | uint16_t rp_dev_stall_crit; |
441 | uint8_t rp_reserved6[12]; |
442 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
443 | |
444 | #define MCX_PMLP_MODULE_NUM_MASK0xff 0xff |
445 | struct mcx_reg_pmlp { |
446 | uint8_t rp_rxtx; |
447 | uint8_t rp_local_port; |
448 | uint8_t rp_reserved0; |
449 | uint8_t rp_width; |
450 | uint32_t rp_lane0_mapping; |
451 | uint32_t rp_lane1_mapping; |
452 | uint32_t rp_lane2_mapping; |
453 | uint32_t rp_lane3_mapping; |
454 | uint8_t rp_reserved1[44]; |
455 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
456 | |
457 | struct mcx_reg_ppcnt { |
458 | uint8_t ppcnt_swid; |
459 | uint8_t ppcnt_local_port; |
460 | uint8_t ppcnt_pnat; |
461 | uint8_t ppcnt_grp; |
462 | #define MCX_REG_PPCNT_GRP_IEEE80230x00 0x00 |
463 | #define MCX_REG_PPCNT_GRP_RFC28630x01 0x01 |
464 | #define MCX_REG_PPCNT_GRP_RFC28190x02 0x02 |
465 | #define MCX_REG_PPCNT_GRP_RFC36350x03 0x03 |
466 | #define MCX_REG_PPCNT_GRP_PER_PRIO0x10 0x10 |
467 | #define MCX_REG_PPCNT_GRP_PER_TC0x11 0x11 |
468 | #define MCX_REG_PPCNT_GRP_PER_RX_BUFFER0x11 0x11 |
469 | |
470 | uint8_t ppcnt_clr; |
471 | uint8_t ppcnt_reserved1[2]; |
472 | uint8_t ppcnt_prio_tc; |
473 | #define MCX_REG_PPCNT_CLR(1 << 7) (1 << 7) |
474 | |
475 | uint8_t ppcnt_counter_set[248]; |
476 | } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8))); |
477 | CTASSERT(sizeof(struct mcx_reg_ppcnt) == 256)extern char _ctassert[(sizeof(struct mcx_reg_ppcnt) == 256) ? 1 : -1 ] __attribute__((__unused__)); |
478 | CTASSERT((offsetof(struct mcx_reg_ppcnt, ppcnt_counter_set) %extern char _ctassert[((__builtin_offsetof(struct mcx_reg_ppcnt , ppcnt_counter_set) % sizeof(uint64_t)) == 0) ? 1 : -1 ] __attribute__ ((__unused__)) |
479 | sizeof(uint64_t)) == 0)extern char _ctassert[((__builtin_offsetof(struct mcx_reg_ppcnt , ppcnt_counter_set) % sizeof(uint64_t)) == 0) ? 1 : -1 ] __attribute__ ((__unused__)); |
480 | |
481 | enum mcx_ppcnt_ieee8023 { |
482 | frames_transmitted_ok, |
483 | frames_received_ok, |
484 | frame_check_sequence_errors, |
485 | alignment_errors, |
486 | octets_transmitted_ok, |
487 | octets_received_ok, |
488 | multicast_frames_xmitted_ok, |
489 | broadcast_frames_xmitted_ok, |
490 | multicast_frames_received_ok, |
491 | broadcast_frames_received_ok, |
492 | in_range_length_errors, |
493 | out_of_range_length_field, |
494 | frame_too_long_errors, |
495 | symbol_error_during_carrier, |
496 | mac_control_frames_transmitted, |
497 | mac_control_frames_received, |
498 | unsupported_opcodes_received, |
499 | pause_mac_ctrl_frames_received, |
500 | pause_mac_ctrl_frames_transmitted, |
501 | |
502 | mcx_ppcnt_ieee8023_count |
503 | }; |
504 | CTASSERT(mcx_ppcnt_ieee8023_count * sizeof(uint64_t) == 0x98)extern char _ctassert[(mcx_ppcnt_ieee8023_count * sizeof(uint64_t ) == 0x98) ? 1 : -1 ] __attribute__((__unused__)); |
505 | |
506 | enum mcx_ppcnt_rfc2863 { |
507 | in_octets, |
508 | in_ucast_pkts, |
509 | in_discards, |
510 | in_errors, |
511 | in_unknown_protos, |
512 | out_octets, |
513 | out_ucast_pkts, |
514 | out_discards, |
515 | out_errors, |
516 | in_multicast_pkts, |
517 | in_broadcast_pkts, |
518 | out_multicast_pkts, |
519 | out_broadcast_pkts, |
520 | |
521 | mcx_ppcnt_rfc2863_count |
522 | }; |
523 | CTASSERT(mcx_ppcnt_rfc2863_count * sizeof(uint64_t) == 0x68)extern char _ctassert[(mcx_ppcnt_rfc2863_count * sizeof(uint64_t ) == 0x68) ? 1 : -1 ] __attribute__((__unused__)); |
524 | |
525 | enum mcx_ppcnt_rfc2819 { |
526 | drop_events, |
527 | octets, |
528 | pkts, |
529 | broadcast_pkts, |
530 | multicast_pkts, |
531 | crc_align_errors, |
532 | undersize_pkts, |
533 | oversize_pkts, |
534 | fragments, |
535 | jabbers, |
536 | collisions, |
537 | pkts64octets, |
538 | pkts65to127octets, |
539 | pkts128to255octets, |
540 | pkts256to511octets, |
541 | pkts512to1023octets, |
542 | pkts1024to1518octets, |
543 | pkts1519to2047octets, |
544 | pkts2048to4095octets, |
545 | pkts4096to8191octets, |
546 | pkts8192to10239octets, |
547 | |
548 | mcx_ppcnt_rfc2819_count |
549 | }; |
550 | CTASSERT((mcx_ppcnt_rfc2819_count * sizeof(uint64_t)) == 0xa8)extern char _ctassert[((mcx_ppcnt_rfc2819_count * sizeof(uint64_t )) == 0xa8) ? 1 : -1 ] __attribute__((__unused__)); |
551 | |
552 | enum mcx_ppcnt_rfc3635 { |
553 | dot3stats_alignment_errors, |
554 | dot3stats_fcs_errors, |
555 | dot3stats_single_collision_frames, |
556 | dot3stats_multiple_collision_frames, |
557 | dot3stats_sqe_test_errors, |
558 | dot3stats_deferred_transmissions, |
559 | dot3stats_late_collisions, |
560 | dot3stats_excessive_collisions, |
561 | dot3stats_internal_mac_transmit_errors, |
562 | dot3stats_carrier_sense_errors, |
563 | dot3stats_frame_too_longs, |
564 | dot3stats_internal_mac_receive_errors, |
565 | dot3stats_symbol_errors, |
566 | dot3control_in_unknown_opcodes, |
567 | dot3in_pause_frames, |
568 | dot3out_pause_frames, |
569 | |
570 | mcx_ppcnt_rfc3635_count |
571 | }; |
572 | CTASSERT((mcx_ppcnt_rfc3635_count * sizeof(uint64_t)) == 0x80)extern char _ctassert[((mcx_ppcnt_rfc3635_count * sizeof(uint64_t )) == 0x80) ? 1 : -1 ] __attribute__((__unused__)); |
573 | |
574 | struct mcx_reg_mcam { |
575 | uint8_t _reserved1[1]; |
576 | uint8_t mcam_feature_group; |
577 | uint8_t _reserved2[1]; |
578 | uint8_t mcam_access_reg_group; |
579 | uint8_t _reserved3[4]; |
580 | uint8_t mcam_access_reg_cap_mask[16]; |
581 | uint8_t _reserved4[16]; |
582 | uint8_t mcam_feature_cap_mask[16]; |
583 | uint8_t _reserved5[16]; |
584 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
585 | |
586 | #define MCX_BITFIELD_BIT(bf, b)(bf[(sizeof bf - 1) - (b / 8)] & (b % 8)) (bf[(sizeof bf - 1) - (b / 8)] & (b % 8)) |
587 | |
588 | #define MCX_MCAM_FEATURE_CAP_SENSOR_MAP6 6 |
589 | |
590 | struct mcx_reg_mtcap { |
591 | uint8_t _reserved1[3]; |
592 | uint8_t mtcap_sensor_count; |
593 | uint8_t _reserved2[4]; |
594 | |
595 | uint64_t mtcap_sensor_map; |
596 | }; |
597 | |
598 | struct mcx_reg_mtmp { |
599 | uint8_t _reserved1[2]; |
600 | uint16_t mtmp_sensor_index; |
601 | |
602 | uint8_t _reserved2[2]; |
603 | uint16_t mtmp_temperature; |
604 | |
605 | uint16_t mtmp_mte_mtr; |
606 | #define MCX_REG_MTMP_MTE(1 << 15) (1 << 15) |
607 | #define MCX_REG_MTMP_MTR(1 << 14) (1 << 14) |
608 | uint16_t mtmp_max_temperature; |
609 | |
610 | uint16_t mtmp_tee; |
611 | #define MCX_REG_MTMP_TEE_NOPE(0 << 14) (0 << 14) |
612 | #define MCX_REG_MTMP_TEE_GENERATE(1 << 14) (1 << 14) |
613 | #define MCX_REG_MTMP_TEE_GENERATE_ONE(2 << 14) (2 << 14) |
614 | uint16_t mtmp_temperature_threshold_hi; |
615 | |
616 | uint8_t _reserved3[2]; |
617 | uint16_t mtmp_temperature_threshold_lo; |
618 | |
619 | uint8_t _reserved4[4]; |
620 | |
621 | uint8_t mtmp_sensor_name[8]; |
622 | }; |
623 | CTASSERT(sizeof(struct mcx_reg_mtmp) == 0x20)extern char _ctassert[(sizeof(struct mcx_reg_mtmp) == 0x20) ? 1 : -1 ] __attribute__((__unused__)); |
624 | CTASSERT(offsetof(struct mcx_reg_mtmp, mtmp_sensor_name) == 0x18)extern char _ctassert[(__builtin_offsetof(struct mcx_reg_mtmp , mtmp_sensor_name) == 0x18) ? 1 : -1 ] __attribute__((__unused__ )); |
625 | |
626 | #define MCX_MCIA_EEPROM_BYTES32 32 |
627 | struct mcx_reg_mcia { |
628 | uint8_t rm_l; |
629 | uint8_t rm_module; |
630 | uint8_t rm_reserved0; |
631 | uint8_t rm_status; |
632 | uint8_t rm_i2c_addr; |
633 | uint8_t rm_page_num; |
634 | uint16_t rm_dev_addr; |
635 | uint16_t rm_reserved1; |
636 | uint16_t rm_size; |
637 | uint32_t rm_reserved2; |
638 | uint8_t rm_data[48]; |
639 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
640 | |
641 | struct mcx_cmd_query_issi_in { |
642 | uint16_t cmd_opcode; |
643 | uint8_t cmd_reserved0[4]; |
644 | uint16_t cmd_op_mod; |
645 | uint8_t cmd_reserved1[8]; |
646 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
647 | |
648 | struct mcx_cmd_query_issi_il_out { |
649 | uint8_t cmd_status; |
650 | uint8_t cmd_reserved0[3]; |
651 | uint32_t cmd_syndrome; |
652 | uint8_t cmd_reserved1[2]; |
653 | uint16_t cmd_current_issi; |
654 | uint8_t cmd_reserved2[4]; |
655 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
656 | |
657 | CTASSERT(sizeof(struct mcx_cmd_query_issi_il_out) == MCX_CMDQ_INLINE_DATASIZE)extern char _ctassert[(sizeof(struct mcx_cmd_query_issi_il_out ) == 16) ? 1 : -1 ] __attribute__((__unused__)); |
658 | |
659 | struct mcx_cmd_query_issi_mb_out { |
660 | uint8_t cmd_reserved2[16]; |
661 | uint8_t cmd_supported_issi[80]; /* very big endian */ |
662 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
663 | |
664 | CTASSERT(sizeof(struct mcx_cmd_query_issi_mb_out) <= MCX_CMDQ_MAILBOX_DATASIZE)extern char _ctassert[(sizeof(struct mcx_cmd_query_issi_mb_out ) <= 512) ? 1 : -1 ] __attribute__((__unused__)); |
665 | |
666 | struct mcx_cmd_set_issi_in { |
667 | uint16_t cmd_opcode; |
668 | uint8_t cmd_reserved0[4]; |
669 | uint16_t cmd_op_mod; |
670 | uint8_t cmd_reserved1[2]; |
671 | uint16_t cmd_current_issi; |
672 | uint8_t cmd_reserved2[4]; |
673 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
674 | |
675 | CTASSERT(sizeof(struct mcx_cmd_set_issi_in) <= MCX_CMDQ_INLINE_DATASIZE)extern char _ctassert[(sizeof(struct mcx_cmd_set_issi_in) <= 16) ? 1 : -1 ] __attribute__((__unused__)); |
676 | |
677 | struct mcx_cmd_set_issi_out { |
678 | uint8_t cmd_status; |
679 | uint8_t cmd_reserved0[3]; |
680 | uint32_t cmd_syndrome; |
681 | uint8_t cmd_reserved1[8]; |
682 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
683 | |
684 | CTASSERT(sizeof(struct mcx_cmd_set_issi_out) <= MCX_CMDQ_INLINE_DATASIZE)extern char _ctassert[(sizeof(struct mcx_cmd_set_issi_out) <= 16) ? 1 : -1 ] __attribute__((__unused__)); |
685 | |
686 | struct mcx_cmd_query_pages_in { |
687 | uint16_t cmd_opcode; |
688 | uint8_t cmd_reserved0[4]; |
689 | uint16_t cmd_op_mod; |
690 | #define MCX_CMD_QUERY_PAGES_BOOT0x01 0x01 |
691 | #define MCX_CMD_QUERY_PAGES_INIT0x02 0x02 |
692 | #define MCX_CMD_QUERY_PAGES_REGULAR0x03 0x03 |
693 | uint8_t cmd_reserved1[8]; |
694 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
695 | |
696 | struct mcx_cmd_query_pages_out { |
697 | uint8_t cmd_status; |
698 | uint8_t cmd_reserved0[3]; |
699 | uint32_t cmd_syndrome; |
700 | uint8_t cmd_reserved1[2]; |
701 | uint16_t cmd_func_id; |
702 | int32_t cmd_num_pages; |
703 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
704 | |
705 | struct mcx_cmd_manage_pages_in { |
706 | uint16_t cmd_opcode; |
707 | uint8_t cmd_reserved0[4]; |
708 | uint16_t cmd_op_mod; |
709 | #define MCX_CMD_MANAGE_PAGES_ALLOC_FAIL0x00 \ |
710 | 0x00 |
711 | #define MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS0x01 \ |
712 | 0x01 |
713 | #define MCX_CMD_MANAGE_PAGES_HCA_RETURN_PAGES0x02 \ |
714 | 0x02 |
715 | uint8_t cmd_reserved1[2]; |
716 | uint16_t cmd_func_id; |
717 | uint32_t cmd_input_num_entries; |
718 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
719 | |
720 | CTASSERT(sizeof(struct mcx_cmd_manage_pages_in) == MCX_CMDQ_INLINE_DATASIZE)extern char _ctassert[(sizeof(struct mcx_cmd_manage_pages_in) == 16) ? 1 : -1 ] __attribute__((__unused__)); |
721 | |
722 | struct mcx_cmd_manage_pages_out { |
723 | uint8_t cmd_status; |
724 | uint8_t cmd_reserved0[3]; |
725 | uint32_t cmd_syndrome; |
726 | uint32_t cmd_output_num_entries; |
727 | uint8_t cmd_reserved1[4]; |
728 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
729 | |
730 | CTASSERT(sizeof(struct mcx_cmd_manage_pages_out) == MCX_CMDQ_INLINE_DATASIZE)extern char _ctassert[(sizeof(struct mcx_cmd_manage_pages_out ) == 16) ? 1 : -1 ] __attribute__((__unused__)); |
731 | |
732 | struct mcx_cmd_query_hca_cap_in { |
733 | uint16_t cmd_opcode; |
734 | uint8_t cmd_reserved0[4]; |
735 | uint16_t cmd_op_mod; |
736 | #define MCX_CMD_QUERY_HCA_CAP_MAX(0x0 << 0) (0x0 << 0) |
737 | #define MCX_CMD_QUERY_HCA_CAP_CURRENT(0x1 << 0) (0x1 << 0) |
738 | #define MCX_CMD_QUERY_HCA_CAP_DEVICE(0x0 << 1) (0x0 << 1) |
739 | #define MCX_CMD_QUERY_HCA_CAP_OFFLOAD(0x1 << 1) (0x1 << 1) |
740 | #define MCX_CMD_QUERY_HCA_CAP_FLOW(0x7 << 1) (0x7 << 1) |
741 | uint8_t cmd_reserved1[8]; |
742 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
743 | |
744 | struct mcx_cmd_query_hca_cap_out { |
745 | uint8_t cmd_status; |
746 | uint8_t cmd_reserved0[3]; |
747 | uint32_t cmd_syndrome; |
748 | uint8_t cmd_reserved1[8]; |
749 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
750 | |
751 | #define MCX_HCA_CAP_LEN0x1000 0x1000 |
752 | #define MCX_HCA_CAP_NMAILBOXES(0x1000 / 512) \ |
753 | (MCX_HCA_CAP_LEN0x1000 / MCX_CMDQ_MAILBOX_DATASIZE512) |
754 | |
755 | #if __GNUC_PREREQ__(4, 3)((4 > (4)) || (4 == (4) && 2 >= (3))) |
756 | #define __counter__756 __COUNTER__0 |
757 | #else |
758 | #define __counter__758 __LINE__758 |
759 | #endif |
760 | |
761 | #define __token(_tok, _num)_tok_num _tok##_num |
762 | #define _token(_tok, _num)_tok_num __token(_tok, _num)_tok_num |
763 | #define __reserved____reserved763 _token(__reserved, __counter__)__reserved763 |
764 | |
765 | struct mcx_cap_device { |
766 | uint8_t reserved0[16]; |
767 | |
768 | uint8_t log_max_srq_sz; |
769 | uint8_t log_max_qp_sz; |
770 | uint8_t __reserved____reserved770[1]; |
771 | uint8_t log_max_qp; /* 5 bits */ |
772 | #define MCX_CAP_DEVICE_LOG_MAX_QP0x1f 0x1f |
773 | |
774 | uint8_t __reserved____reserved774[1]; |
775 | uint8_t log_max_srq; /* 5 bits */ |
776 | #define MCX_CAP_DEVICE_LOG_MAX_SRQ0x1f 0x1f |
777 | uint8_t __reserved____reserved777[2]; |
778 | |
779 | uint8_t __reserved____reserved779[1]; |
780 | uint8_t log_max_cq_sz; |
781 | uint8_t __reserved____reserved781[1]; |
782 | uint8_t log_max_cq; /* 5 bits */ |
783 | #define MCX_CAP_DEVICE_LOG_MAX_CQ0x1f 0x1f |
784 | |
785 | uint8_t log_max_eq_sz; |
786 | uint8_t log_max_mkey; /* 6 bits */ |
787 | #define MCX_CAP_DEVICE_LOG_MAX_MKEY0x3f 0x3f |
788 | uint8_t __reserved____reserved788[1]; |
789 | uint8_t log_max_eq; /* 4 bits */ |
790 | #define MCX_CAP_DEVICE_LOG_MAX_EQ0x0f 0x0f |
791 | |
792 | uint8_t max_indirection; |
793 | uint8_t log_max_mrw_sz; /* 7 bits */ |
794 | #define MCX_CAP_DEVICE_LOG_MAX_MRW_SZ0x7f 0x7f |
795 | uint8_t teardown_log_max_msf_list_size; |
796 | #define MCX_CAP_DEVICE_FORCE_TEARDOWN0x80 0x80 |
797 | #define MCX_CAP_DEVICE_LOG_MAX_MSF_LIST_SIZE0x3f \ |
798 | 0x3f |
799 | uint8_t log_max_klm_list_size; /* 6 bits */ |
800 | #define MCX_CAP_DEVICE_LOG_MAX_KLM_LIST_SIZE0x3f \ |
801 | 0x3f |
802 | |
803 | uint8_t __reserved____reserved803[1]; |
804 | uint8_t log_max_ra_req_dc; /* 6 bits */ |
805 | #define MCX_CAP_DEVICE_LOG_MAX_REQ_DC0x3f 0x3f |
806 | uint8_t __reserved____reserved806[1]; |
807 | uint8_t log_max_ra_res_dc; /* 6 bits */ |
808 | #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_DC0x3f \ |
809 | 0x3f |
810 | |
811 | uint8_t __reserved____reserved811[1]; |
812 | uint8_t log_max_ra_req_qp; /* 6 bits */ |
813 | #define MCX_CAP_DEVICE_LOG_MAX_RA_REQ_QP0x3f \ |
814 | 0x3f |
815 | uint8_t __reserved____reserved815[1]; |
816 | uint8_t log_max_ra_res_qp; /* 6 bits */ |
817 | #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_QP0x3f \ |
818 | 0x3f |
819 | |
820 | uint8_t flags1; |
821 | #define MCX_CAP_DEVICE_END_PAD0x80 0x80 |
822 | #define MCX_CAP_DEVICE_CC_QUERY_ALLOWED0x40 0x40 |
823 | #define MCX_CAP_DEVICE_CC_MODIFY_ALLOWED0x20 \ |
824 | 0x20 |
825 | #define MCX_CAP_DEVICE_START_PAD0x10 0x10 |
826 | #define MCX_CAP_DEVICE_128BYTE_CACHELINE0x08 \ |
827 | 0x08 |
828 | uint8_t __reserved____reserved828[1]; |
829 | uint16_t gid_table_size; |
830 | |
831 | uint16_t flags2; |
832 | #define MCX_CAP_DEVICE_OUT_OF_SEQ_CNT0x8000 0x8000 |
833 | #define MCX_CAP_DEVICE_VPORT_COUNTERS0x4000 0x4000 |
834 | #define MCX_CAP_DEVICE_RETRANSMISSION_Q_COUNTERS0x2000 \ |
835 | 0x2000 |
836 | #define MCX_CAP_DEVICE_DEBUG0x1000 0x1000 |
837 | #define MCX_CAP_DEVICE_MODIFY_RQ_COUNTERS_SET_ID0x8000 \ |
838 | 0x8000 |
839 | #define MCX_CAP_DEVICE_RQ_DELAY_DROP0x4000 0x4000 |
840 | #define MCX_CAP_DEVICe_MAX_QP_CNT_MASK0x03ff 0x03ff |
841 | uint16_t pkey_table_size; |
842 | |
843 | uint8_t flags3; |
844 | #define MCX_CAP_DEVICE_VPORT_GROUP_MANAGER0x80 \ |
845 | 0x80 |
846 | #define MCX_CAP_DEVICE_VHCA_GROUP_MANAGER0x40 \ |
847 | 0x40 |
848 | #define MCX_CAP_DEVICE_IB_VIRTUAL0x20 0x20 |
849 | #define MCX_CAP_DEVICE_ETH_VIRTUAL0x10 0x10 |
850 | #define MCX_CAP_DEVICE_ETS0x04 0x04 |
851 | #define MCX_CAP_DEVICE_NIC_FLOW_TABLE0x02 0x02 |
852 | #define MCX_CAP_DEVICE_ESWITCH_FLOW_TABLE0x01 \ |
853 | 0x01 |
854 | uint8_t local_ca_ack_delay; /* 5 bits */ |
855 | #define MCX_CAP_DEVICE_LOCAL_CA_ACK_DELAY0x1f \ |
856 | 0x1f |
857 | #define MCX_CAP_DEVICE_MCAM_REG0x40 0x40 |
858 | uint8_t port_type; |
859 | #define MCX_CAP_DEVICE_PORT_MODULE_EVENT0x80 \ |
860 | 0x80 |
861 | #define MCX_CAP_DEVICE_PORT_TYPE0x03 0x03 |
862 | #define MCX_CAP_DEVICE_PORT_TYPE_ETH0x01 0x01 |
863 | uint8_t num_ports; |
864 | |
865 | uint8_t snapshot_log_max_msg; |
866 | #define MCX_CAP_DEVICE_SNAPSHOT0x80 0x80 |
867 | #define MCX_CAP_DEVICE_LOG_MAX_MSG0x1f 0x1f |
868 | uint8_t max_tc; /* 4 bits */ |
869 | #define MCX_CAP_DEVICE_MAX_TC0x0f 0x0f |
870 | uint8_t flags4; |
871 | #define MCX_CAP_DEVICE_TEMP_WARN_EVENT0x80 0x80 |
872 | #define MCX_CAP_DEVICE_DCBX0x40 0x40 |
873 | #define MCX_CAP_DEVICE_ROL_S0x02 0x02 |
874 | #define MCX_CAP_DEVICE_ROL_G0x01 0x01 |
875 | uint8_t wol; |
876 | #define MCX_CAP_DEVICE_WOL_S0x40 0x40 |
877 | #define MCX_CAP_DEVICE_WOL_G0x20 0x20 |
878 | #define MCX_CAP_DEVICE_WOL_A0x10 0x10 |
879 | #define MCX_CAP_DEVICE_WOL_B0x08 0x08 |
880 | #define MCX_CAP_DEVICE_WOL_M0x04 0x04 |
881 | #define MCX_CAP_DEVICE_WOL_U0x02 0x02 |
882 | #define MCX_CAP_DEVICE_WOL_P0x01 0x01 |
883 | |
884 | uint16_t stat_rate_support; |
885 | uint8_t __reserved____reserved885[1]; |
886 | uint8_t cqe_version; /* 4 bits */ |
887 | #define MCX_CAP_DEVICE_CQE_VERSION0x0f 0x0f |
888 | |
889 | uint32_t flags5; |
890 | #define MCX_CAP_DEVICE_COMPACT_ADDRESS_VECTOR0x80000000 \ |
891 | 0x80000000 |
892 | #define MCX_CAP_DEVICE_STRIDING_RQ0x40000000 0x40000000 |
893 | #define MCX_CAP_DEVICE_IPOIP_ENHANCED_OFFLOADS0x10000000 \ |
894 | 0x10000000 |
895 | #define MCX_CAP_DEVICE_IPOIP_IPOIP_OFFLOADS0x08000000 \ |
896 | 0x08000000 |
897 | #define MCX_CAP_DEVICE_DC_CONNECT_CP0x00040000 0x00040000 |
898 | #define MCX_CAP_DEVICE_DC_CNAK_DRACE0x00020000 0x00020000 |
899 | #define MCX_CAP_DEVICE_DRAIN_SIGERR0x00010000 0x00010000 |
900 | #define MCX_CAP_DEVICE_DRAIN_SIGERR0x00010000 0x00010000 |
901 | #define MCX_CAP_DEVICE_CMDIF_CHECKSUM0x0000c000 0x0000c000 |
902 | #define MCX_CAP_DEVICE_SIGERR_QCE0x00002000 0x00002000 |
903 | #define MCX_CAP_DEVICE_WQ_SIGNATURE0x00000800 0x00000800 |
904 | #define MCX_CAP_DEVICE_SCTR_DATA_CQE0x00000400 0x00000400 |
905 | #define MCX_CAP_DEVICE_SHO0x00000100 0x00000100 |
906 | #define MCX_CAP_DEVICE_TPH0x00000080 0x00000080 |
907 | #define MCX_CAP_DEVICE_RF0x00000040 0x00000040 |
908 | #define MCX_CAP_DEVICE_DCT0x00000020 0x00000020 |
909 | #define MCX_CAP_DEVICE_QOS0x00000010 0x00000010 |
910 | #define MCX_CAP_DEVICe_ETH_NET_OFFLOADS0x00000008 0x00000008 |
911 | #define MCX_CAP_DEVICE_ROCE0x00000004 0x00000004 |
912 | #define MCX_CAP_DEVICE_ATOMIC0x00000002 0x00000002 |
913 | |
914 | uint32_t flags6; |
915 | #define MCX_CAP_DEVICE_CQ_OI0x80000000 0x80000000 |
916 | #define MCX_CAP_DEVICE_CQ_RESIZE0x40000000 0x40000000 |
917 | #define MCX_CAP_DEVICE_CQ_MODERATION0x20000000 0x20000000 |
918 | #define MCX_CAP_DEVICE_CQ_PERIOD_MODE_MODIFY0x10000000 \ |
919 | 0x10000000 |
920 | #define MCX_CAP_DEVICE_CQ_INVALIDATE0x08000000 0x08000000 |
921 | #define MCX_CAP_DEVICE_RESERVED_AT_2550x04000000 0x04000000 |
922 | #define MCX_CAP_DEVICE_CQ_EQ_REMAP0x02000000 0x02000000 |
923 | #define MCX_CAP_DEVICE_PG0x01000000 0x01000000 |
924 | #define MCX_CAP_DEVICE_BLOCK_LB_MC0x00800000 0x00800000 |
925 | #define MCX_CAP_DEVICE_EXPONENTIAL_BACKOFF0x00400000 \ |
926 | 0x00400000 |
927 | #define MCX_CAP_DEVICE_SCQE_BREAK_MODERATION0x00200000 \ |
928 | 0x00200000 |
929 | #define MCX_CAP_DEVICE_CQ_PERIOD_START_FROM_CQE0x00100000 \ |
930 | 0x00100000 |
931 | #define MCX_CAP_DEVICE_CD0x00080000 0x00080000 |
932 | #define MCX_CAP_DEVICE_ATM0x00040000 0x00040000 |
933 | #define MCX_CAP_DEVICE_APM0x00020000 0x00020000 |
934 | #define MCX_CAP_DEVICE_IMAICL0x00010000 0x00010000 |
935 | #define MCX_CAP_DEVICE_QKV0x00000200 0x00000200 |
936 | #define MCX_CAP_DEVICE_PKV0x00000100 0x00000100 |
937 | #define MCX_CAP_DEVICE_SET_DETH_SQPN0x00000080 0x00000080 |
938 | #define MCX_CAP_DEVICE_XRC0x00000008 0x00000008 |
939 | #define MCX_CAP_DEVICE_UD0x00000004 0x00000004 |
940 | #define MCX_CAP_DEVICE_UC0x00000002 0x00000002 |
941 | #define MCX_CAP_DEVICE_RC0x00000001 0x00000001 |
942 | |
943 | uint8_t uar_flags; |
944 | #define MCX_CAP_DEVICE_UAR_4K0x80 0x80 |
945 | uint8_t uar_sz; /* 6 bits */ |
946 | #define MCX_CAP_DEVICE_UAR_SZ0x3f 0x3f |
947 | uint8_t __reserved____reserved947[1]; |
948 | uint8_t log_pg_sz; |
949 | |
950 | uint8_t flags7; |
951 | #define MCX_CAP_DEVICE_BF0x80 0x80 |
952 | #define MCX_CAP_DEVICE_DRIVER_VERSION0x40 0x40 |
953 | #define MCX_CAP_DEVICE_PAD_TX_ETH_PACKET0x20 \ |
954 | 0x20 |
955 | uint8_t log_bf_reg_size; /* 5 bits */ |
956 | #define MCX_CAP_DEVICE_LOG_BF_REG_SIZE0x1f 0x1f |
957 | uint8_t __reserved____reserved957[2]; |
958 | |
959 | uint16_t num_of_diagnostic_counters; |
960 | uint16_t max_wqe_sz_sq; |
961 | |
962 | uint8_t __reserved____reserved962[2]; |
963 | uint16_t max_wqe_sz_rq; |
964 | |
965 | uint8_t __reserved____reserved965[2]; |
966 | uint16_t max_wqe_sz_sq_dc; |
967 | |
968 | uint32_t max_qp_mcg; /* 25 bits */ |
969 | #define MCX_CAP_DEVICE_MAX_QP_MCG0x1ffffff 0x1ffffff |
970 | |
971 | uint8_t __reserved____reserved971[3]; |
972 | uint8_t log_max_mcq; |
973 | |
974 | uint8_t log_max_transport_domain; /* 5 bits */ |
975 | #define MCX_CAP_DEVICE_LOG_MAX_TRANSORT_DOMAIN0x1f \ |
976 | 0x1f |
977 | uint8_t log_max_pd; /* 5 bits */ |
978 | #define MCX_CAP_DEVICE_LOG_MAX_PD0x1f 0x1f |
979 | uint8_t __reserved____reserved979[1]; |
980 | uint8_t log_max_xrcd; /* 5 bits */ |
981 | #define MCX_CAP_DEVICE_LOG_MAX_XRCD0x1f 0x1f |
982 | |
983 | uint8_t __reserved____reserved983[2]; |
984 | uint16_t max_flow_counter; |
985 | |
986 | uint8_t log_max_rq; /* 5 bits */ |
987 | #define MCX_CAP_DEVICE_LOG_MAX_RQ0x1f 0x1f |
988 | uint8_t log_max_sq; /* 5 bits */ |
989 | #define MCX_CAP_DEVICE_LOG_MAX_SQ0x1f 0x1f |
990 | uint8_t log_max_tir; /* 5 bits */ |
991 | #define MCX_CAP_DEVICE_LOG_MAX_TIR0x1f 0x1f |
992 | uint8_t log_max_tis; /* 5 bits */ |
993 | #define MCX_CAP_DEVICE_LOG_MAX_TIS0x1f 0x1f |
994 | |
995 | uint8_t flags8; |
996 | #define MCX_CAP_DEVICE_BASIC_CYCLIC_RCV_WQE0x80 \ |
997 | 0x80 |
998 | #define MCX_CAP_DEVICE_LOG_MAX_RMP0x1f 0x1f |
999 | uint8_t log_max_rqt; /* 5 bits */ |
1000 | #define MCX_CAP_DEVICE_LOG_MAX_RQT0x1f 0x1f |
1001 | uint8_t log_max_rqt_size; /* 5 bits */ |
1002 | #define MCX_CAP_DEVICE_LOG_MAX_RQT_SIZE0x1f 0x1f |
1003 | uint8_t log_max_tis_per_sq; /* 5 bits */ |
1004 | #define MCX_CAP_DEVICE_LOG_MAX_TIS_PER_SQ0x1f \ |
1005 | 0x1f |
1006 | |
1007 | uint8_t flags9; |
1008 | #define MXC_CAP_DEVICE_EXT_STRIDE_NUM_RANGES0x80 \ |
1009 | 0x80 |
1010 | #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_RQ0x1f \ |
1011 | 0x1f |
1012 | uint8_t log_min_stride_sz_rq; /* 5 bits */ |
1013 | #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_RQ0x1f \ |
1014 | 0x1f |
1015 | uint8_t log_max_stride_sz_sq; /* 5 bits */ |
1016 | #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_SQ0x1f \ |
1017 | 0x1f |
1018 | uint8_t log_min_stride_sz_sq; /* 5 bits */ |
1019 | #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_SQ0x1f \ |
1020 | 0x1f |
1021 | |
1022 | uint8_t log_max_hairpin_queues; |
1023 | #define MXC_CAP_DEVICE_HAIRPIN0x80 0x80 |
1024 | #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_QUEUES0x1f \ |
1025 | 0x1f |
1026 | uint8_t log_min_hairpin_queues; |
1027 | #define MXC_CAP_DEVICE_LOG_MIN_HAIRPIN_QUEUES0x1f \ |
1028 | 0x1f |
1029 | uint8_t log_max_hairpin_num_packets; |
1030 | #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_NUM_PACKETS0x1f \ |
1031 | 0x1f |
1032 | uint8_t log_max_mq_sz; |
1033 | #define MXC_CAP_DEVICE_LOG_MAX_WQ_SZ0x1f \ |
1034 | 0x1f |
1035 | |
1036 | uint8_t log_min_hairpin_wq_data_sz; |
1037 | #define MXC_CAP_DEVICE_NIC_VPORT_CHANGE_EVENT0x80 \ |
1038 | 0x80 |
1039 | #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_UC0x40 \ |
1040 | 0x40 |
1041 | #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_MC0x20 \ |
1042 | 0x20 |
1043 | #define MCX_CAP_DEVICE_LOG_MIN_HAIRPIN_WQ_DATA_SZ0x1f \ |
1044 | 0x1f |
1045 | uint8_t log_max_vlan_list; |
1046 | #define MXC_CAP_DEVICE_SYSTEM_IMAGE_GUID_MODIFIABLE0x80 \ |
1047 | 0x80 |
1048 | #define MXC_CAP_DEVICE_LOG_MAX_VLAN_LIST0x1f \ |
1049 | 0x1f |
1050 | uint8_t log_max_current_mc_list; |
1051 | #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_MC_LIST0x1f \ |
1052 | 0x1f |
1053 | uint8_t log_max_current_uc_list; |
1054 | #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_UC_LIST0x1f \ |
1055 | 0x1f |
1056 | |
1057 | uint8_t __reserved____reserved1057[4]; |
1058 | |
1059 | uint32_t create_qp_start_hint; /* 24 bits */ |
1060 | |
1061 | uint8_t log_max_uctx; /* 5 bits */ |
1062 | #define MXC_CAP_DEVICE_LOG_MAX_UCTX0x1f 0x1f |
1063 | uint8_t log_max_umem; /* 5 bits */ |
1064 | #define MXC_CAP_DEVICE_LOG_MAX_UMEM0x1f 0x1f |
1065 | uint16_t max_num_eqs; |
1066 | |
1067 | uint8_t log_max_l2_table; /* 5 bits */ |
1068 | #define MXC_CAP_DEVICE_LOG_MAX_L2_TABLE0x1f 0x1f |
1069 | uint8_t __reserved____reserved1069[1]; |
1070 | uint16_t log_uar_page_sz; |
1071 | |
1072 | uint8_t __reserved____reserved1072[8]; |
1073 | |
1074 | uint32_t device_frequency_mhz; |
1075 | uint32_t device_frequency_khz; |
1076 | } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8))); |
1077 | |
1078 | CTASSERT(offsetof(struct mcx_cap_device, max_indirection) == 0x20)extern char _ctassert[(__builtin_offsetof(struct mcx_cap_device , max_indirection) == 0x20) ? 1 : -1 ] __attribute__((__unused__ )); |
1079 | CTASSERT(offsetof(struct mcx_cap_device, flags1) == 0x2c)extern char _ctassert[(__builtin_offsetof(struct mcx_cap_device , flags1) == 0x2c) ? 1 : -1 ] __attribute__((__unused__)); |
1080 | CTASSERT(offsetof(struct mcx_cap_device, flags2) == 0x30)extern char _ctassert[(__builtin_offsetof(struct mcx_cap_device , flags2) == 0x30) ? 1 : -1 ] __attribute__((__unused__)); |
1081 | CTASSERT(offsetof(struct mcx_cap_device, snapshot_log_max_msg) == 0x38)extern char _ctassert[(__builtin_offsetof(struct mcx_cap_device , snapshot_log_max_msg) == 0x38) ? 1 : -1 ] __attribute__((__unused__ )); |
1082 | CTASSERT(offsetof(struct mcx_cap_device, flags5) == 0x40)extern char _ctassert[(__builtin_offsetof(struct mcx_cap_device , flags5) == 0x40) ? 1 : -1 ] __attribute__((__unused__)); |
1083 | CTASSERT(offsetof(struct mcx_cap_device, flags7) == 0x4c)extern char _ctassert[(__builtin_offsetof(struct mcx_cap_device , flags7) == 0x4c) ? 1 : -1 ] __attribute__((__unused__)); |
1084 | CTASSERT(offsetof(struct mcx_cap_device, device_frequency_mhz) == 0x98)extern char _ctassert[(__builtin_offsetof(struct mcx_cap_device , device_frequency_mhz) == 0x98) ? 1 : -1 ] __attribute__((__unused__ )); |
1085 | CTASSERT(offsetof(struct mcx_cap_device, device_frequency_khz) == 0x9c)extern char _ctassert[(__builtin_offsetof(struct mcx_cap_device , device_frequency_khz) == 0x9c) ? 1 : -1 ] __attribute__((__unused__ )); |
1086 | CTASSERT(sizeof(struct mcx_cap_device) <= MCX_CMDQ_MAILBOX_DATASIZE)extern char _ctassert[(sizeof(struct mcx_cap_device) <= 512 ) ? 1 : -1 ] __attribute__((__unused__)); |
1087 | |
1088 | struct mcx_cmd_set_driver_version_in { |
1089 | uint16_t cmd_opcode; |
1090 | uint8_t cmd_reserved0[4]; |
1091 | uint16_t cmd_op_mod; |
1092 | uint8_t cmd_reserved1[8]; |
1093 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1094 | |
1095 | struct mcx_cmd_set_driver_version_out { |
1096 | uint8_t cmd_status; |
1097 | uint8_t cmd_reserved0[3]; |
1098 | uint32_t cmd_syndrome; |
1099 | uint8_t cmd_reserved1[8]; |
1100 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1101 | |
1102 | struct mcx_cmd_set_driver_version { |
1103 | uint8_t cmd_driver_version[64]; |
1104 | } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8))); |
1105 | |
1106 | struct mcx_cmd_modify_nic_vport_context_in { |
1107 | uint16_t cmd_opcode; |
1108 | uint8_t cmd_reserved0[4]; |
1109 | uint16_t cmd_op_mod; |
1110 | uint8_t cmd_reserved1[4]; |
1111 | uint32_t cmd_field_select; |
1112 | #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_ADDR0x04 0x04 |
1113 | #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC0x10 0x10 |
1114 | #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU0x40 0x40 |
1115 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1116 | |
1117 | struct mcx_cmd_modify_nic_vport_context_out { |
1118 | uint8_t cmd_status; |
1119 | uint8_t cmd_reserved0[3]; |
1120 | uint32_t cmd_syndrome; |
1121 | uint8_t cmd_reserved1[8]; |
1122 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1123 | |
1124 | struct mcx_cmd_query_nic_vport_context_in { |
1125 | uint16_t cmd_opcode; |
1126 | uint8_t cmd_reserved0[4]; |
1127 | uint16_t cmd_op_mod; |
1128 | uint8_t cmd_reserved1[4]; |
1129 | uint8_t cmd_allowed_list_type; |
1130 | uint8_t cmd_reserved2[3]; |
1131 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1132 | |
1133 | struct mcx_cmd_query_nic_vport_context_out { |
1134 | uint8_t cmd_status; |
1135 | uint8_t cmd_reserved0[3]; |
1136 | uint32_t cmd_syndrome; |
1137 | uint8_t cmd_reserved1[8]; |
1138 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1139 | |
1140 | struct mcx_nic_vport_ctx { |
1141 | uint32_t vp_min_wqe_inline_mode; |
1142 | uint8_t vp_reserved0[32]; |
1143 | uint32_t vp_mtu; |
1144 | uint8_t vp_reserved1[200]; |
1145 | uint16_t vp_flags; |
1146 | #define MCX_NIC_VPORT_CTX_LIST_UC_MAC(0) (0) |
1147 | #define MCX_NIC_VPORT_CTX_LIST_MC_MAC(1 << 24) (1 << 24) |
1148 | #define MCX_NIC_VPORT_CTX_LIST_VLAN(2 << 24) (2 << 24) |
1149 | #define MCX_NIC_VPORT_CTX_PROMISC_ALL(1 << 13) (1 << 13) |
1150 | #define MCX_NIC_VPORT_CTX_PROMISC_MCAST(1 << 14) (1 << 14) |
1151 | #define MCX_NIC_VPORT_CTX_PROMISC_UCAST(1 << 15) (1 << 15) |
1152 | uint16_t vp_allowed_list_size; |
1153 | uint64_t vp_perm_addr; |
1154 | uint8_t vp_reserved2[4]; |
1155 | /* allowed list follows */ |
1156 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1157 | |
1158 | struct mcx_counter { |
1159 | uint64_t packets; |
1160 | uint64_t octets; |
1161 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1162 | |
1163 | struct mcx_nic_vport_counters { |
1164 | struct mcx_counter rx_err; |
1165 | struct mcx_counter tx_err; |
1166 | uint8_t reserved0[64]; /* 0x30 */ |
1167 | struct mcx_counter rx_bcast; |
1168 | struct mcx_counter tx_bcast; |
1169 | struct mcx_counter rx_ucast; |
1170 | struct mcx_counter tx_ucast; |
1171 | struct mcx_counter rx_mcast; |
1172 | struct mcx_counter tx_mcast; |
1173 | uint8_t reserved1[0x210 - 0xd0]; |
1174 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1175 | |
1176 | struct mcx_cmd_query_vport_counters_in { |
1177 | uint16_t cmd_opcode; |
1178 | uint8_t cmd_reserved0[4]; |
1179 | uint16_t cmd_op_mod; |
1180 | uint8_t cmd_reserved1[8]; |
1181 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1182 | |
1183 | struct mcx_cmd_query_vport_counters_mb_in { |
1184 | uint8_t cmd_reserved0[8]; |
1185 | uint8_t cmd_clear; |
1186 | uint8_t cmd_reserved1[7]; |
1187 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1188 | |
1189 | struct mcx_cmd_query_vport_counters_out { |
1190 | uint8_t cmd_status; |
1191 | uint8_t cmd_reserved0[3]; |
1192 | uint32_t cmd_syndrome; |
1193 | uint8_t cmd_reserved1[8]; |
1194 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1195 | |
1196 | struct mcx_cmd_query_flow_counter_in { |
1197 | uint16_t cmd_opcode; |
1198 | uint8_t cmd_reserved0[4]; |
1199 | uint16_t cmd_op_mod; |
1200 | uint8_t cmd_reserved1[8]; |
1201 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1202 | |
1203 | struct mcx_cmd_query_flow_counter_mb_in { |
1204 | uint8_t cmd_reserved0[8]; |
1205 | uint8_t cmd_clear; |
1206 | uint8_t cmd_reserved1[5]; |
1207 | uint16_t cmd_flow_counter_id; |
1208 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1209 | |
1210 | struct mcx_cmd_query_flow_counter_out { |
1211 | uint8_t cmd_status; |
1212 | uint8_t cmd_reserved0[3]; |
1213 | uint32_t cmd_syndrome; |
1214 | uint8_t cmd_reserved1[8]; |
1215 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1216 | |
1217 | struct mcx_cmd_alloc_uar_in { |
1218 | uint16_t cmd_opcode; |
1219 | uint8_t cmd_reserved0[4]; |
1220 | uint16_t cmd_op_mod; |
1221 | uint8_t cmd_reserved1[8]; |
1222 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1223 | |
1224 | struct mcx_cmd_alloc_uar_out { |
1225 | uint8_t cmd_status; |
1226 | uint8_t cmd_reserved0[3]; |
1227 | uint32_t cmd_syndrome; |
1228 | uint32_t cmd_uar; |
1229 | uint8_t cmd_reserved1[4]; |
1230 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1231 | |
1232 | struct mcx_cmd_query_special_ctx_in { |
1233 | uint16_t cmd_opcode; |
1234 | uint8_t cmd_reserved0[4]; |
1235 | uint16_t cmd_op_mod; |
1236 | uint8_t cmd_reserved1[8]; |
1237 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1238 | |
1239 | struct mcx_cmd_query_special_ctx_out { |
1240 | uint8_t cmd_status; |
1241 | uint8_t cmd_reserved0[3]; |
1242 | uint32_t cmd_syndrome; |
1243 | uint8_t cmd_reserved1[4]; |
1244 | uint32_t cmd_resd_lkey; |
1245 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1246 | |
1247 | struct mcx_eq_ctx { |
1248 | uint32_t eq_status; |
1249 | #define MCX_EQ_CTX_STATE_SHIFT8 8 |
1250 | #define MCX_EQ_CTX_STATE_MASK(0xf << 8) (0xf << MCX_EQ_CTX_STATE_SHIFT8) |
1251 | #define MCX_EQ_CTX_STATE_ARMED0x9 0x9 |
1252 | #define MCX_EQ_CTX_STATE_FIRED0xa 0xa |
1253 | #define MCX_EQ_CTX_OI_SHIFT17 17 |
1254 | #define MCX_EQ_CTX_OI(1 << 17) (1 << MCX_EQ_CTX_OI_SHIFT17) |
1255 | #define MCX_EQ_CTX_EC_SHIFT18 18 |
1256 | #define MCX_EQ_CTX_EC(1 << 18) (1 << MCX_EQ_CTX_EC_SHIFT18) |
1257 | #define MCX_EQ_CTX_STATUS_SHIFT28 28 |
1258 | #define MCX_EQ_CTX_STATUS_MASK(0xf << 28) (0xf << MCX_EQ_CTX_STATUS_SHIFT28) |
1259 | #define MCX_EQ_CTX_STATUS_OK0x0 0x0 |
1260 | #define MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE0xa 0xa |
1261 | uint32_t eq_reserved1; |
1262 | uint32_t eq_page_offset; |
1263 | #define MCX_EQ_CTX_PAGE_OFFSET_SHIFT5 5 |
1264 | uint32_t eq_uar_size; |
1265 | #define MCX_EQ_CTX_UAR_PAGE_MASK0xffffff 0xffffff |
1266 | #define MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT24 24 |
1267 | uint32_t eq_reserved2; |
1268 | uint8_t eq_reserved3[3]; |
1269 | uint8_t eq_intr; |
1270 | uint32_t eq_log_page_size; |
1271 | #define MCX_EQ_CTX_LOG_PAGE_SIZE_SHIFT24 24 |
1272 | uint32_t eq_reserved4[3]; |
1273 | uint32_t eq_consumer_counter; |
1274 | uint32_t eq_producer_counter; |
1275 | #define MCX_EQ_CTX_COUNTER_MASK0xffffff 0xffffff |
1276 | uint32_t eq_reserved5[4]; |
1277 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1278 | |
1279 | CTASSERT(sizeof(struct mcx_eq_ctx) == 64)extern char _ctassert[(sizeof(struct mcx_eq_ctx) == 64) ? 1 : -1 ] __attribute__((__unused__)); |
1280 | |
1281 | struct mcx_cmd_create_eq_in { |
1282 | uint16_t cmd_opcode; |
1283 | uint8_t cmd_reserved0[4]; |
1284 | uint16_t cmd_op_mod; |
1285 | uint8_t cmd_reserved1[8]; |
1286 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1287 | |
1288 | struct mcx_cmd_create_eq_mb_in { |
1289 | struct mcx_eq_ctx cmd_eq_ctx; |
1290 | uint8_t cmd_reserved0[8]; |
1291 | uint64_t cmd_event_bitmask; |
1292 | #define MCX_EVENT_TYPE_COMPLETION0x00 0x00 |
1293 | #define MCX_EVENT_TYPE_CQ_ERROR0x04 0x04 |
1294 | #define MCX_EVENT_TYPE_INTERNAL_ERROR0x08 0x08 |
1295 | #define MCX_EVENT_TYPE_PORT_CHANGE0x09 0x09 |
1296 | #define MCX_EVENT_TYPE_CMD_COMPLETION0x0a 0x0a |
1297 | #define MCX_EVENT_TYPE_PAGE_REQUEST0x0b 0x0b |
1298 | #define MCX_EVENT_TYPE_LAST_WQE0x13 0x13 |
1299 | uint8_t cmd_reserved1[176]; |
1300 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1301 | |
1302 | struct mcx_cmd_create_eq_out { |
1303 | uint8_t cmd_status; |
1304 | uint8_t cmd_reserved0[3]; |
1305 | uint32_t cmd_syndrome; |
1306 | uint32_t cmd_eqn; |
1307 | uint8_t cmd_reserved1[4]; |
1308 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1309 | |
1310 | struct mcx_cmd_query_eq_in { |
1311 | uint16_t cmd_opcode; |
1312 | uint8_t cmd_reserved0[4]; |
1313 | uint16_t cmd_op_mod; |
1314 | uint32_t cmd_eqn; |
1315 | uint8_t cmd_reserved1[4]; |
1316 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1317 | |
1318 | struct mcx_cmd_query_eq_out { |
1319 | uint8_t cmd_status; |
1320 | uint8_t cmd_reserved0[3]; |
1321 | uint32_t cmd_syndrome; |
1322 | uint8_t cmd_reserved1[8]; |
1323 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1324 | |
1325 | struct mcx_eq_entry { |
1326 | uint8_t eq_reserved1; |
1327 | uint8_t eq_event_type; |
1328 | uint8_t eq_reserved2; |
1329 | uint8_t eq_event_sub_type; |
1330 | |
1331 | uint8_t eq_reserved3[28]; |
1332 | uint32_t eq_event_data[7]; |
1333 | uint8_t eq_reserved4[2]; |
1334 | uint8_t eq_signature; |
1335 | uint8_t eq_owner; |
1336 | #define MCX_EQ_ENTRY_OWNER_INIT1 1 |
1337 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1338 | |
1339 | CTASSERT(sizeof(struct mcx_eq_entry) == 64)extern char _ctassert[(sizeof(struct mcx_eq_entry) == 64) ? 1 : -1 ] __attribute__((__unused__)); |
1340 | |
1341 | struct mcx_cmd_alloc_pd_in { |
1342 | uint16_t cmd_opcode; |
1343 | uint8_t cmd_reserved0[4]; |
1344 | uint16_t cmd_op_mod; |
1345 | uint8_t cmd_reserved1[8]; |
1346 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1347 | |
1348 | struct mcx_cmd_alloc_pd_out { |
1349 | uint8_t cmd_status; |
1350 | uint8_t cmd_reserved0[3]; |
1351 | uint32_t cmd_syndrome; |
1352 | uint32_t cmd_pd; |
1353 | uint8_t cmd_reserved1[4]; |
1354 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1355 | |
1356 | struct mcx_cmd_alloc_td_in { |
1357 | uint16_t cmd_opcode; |
1358 | uint8_t cmd_reserved0[4]; |
1359 | uint16_t cmd_op_mod; |
1360 | uint8_t cmd_reserved1[8]; |
1361 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1362 | |
1363 | struct mcx_cmd_alloc_td_out { |
1364 | uint8_t cmd_status; |
1365 | uint8_t cmd_reserved0[3]; |
1366 | uint32_t cmd_syndrome; |
1367 | uint32_t cmd_tdomain; |
1368 | uint8_t cmd_reserved1[4]; |
1369 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1370 | |
1371 | struct mcx_cmd_create_tir_in { |
1372 | uint16_t cmd_opcode; |
1373 | uint8_t cmd_reserved0[4]; |
1374 | uint16_t cmd_op_mod; |
1375 | uint8_t cmd_reserved1[8]; |
1376 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1377 | |
1378 | struct mcx_cmd_create_tir_mb_in { |
1379 | uint8_t cmd_reserved0[20]; |
1380 | uint32_t cmd_disp_type; |
1381 | #define MCX_TIR_CTX_DISP_TYPE_DIRECT0 0 |
1382 | #define MCX_TIR_CTX_DISP_TYPE_INDIRECT1 1 |
1383 | #define MCX_TIR_CTX_DISP_TYPE_SHIFT28 28 |
1384 | uint8_t cmd_reserved1[8]; |
1385 | uint32_t cmd_lro; |
1386 | uint8_t cmd_reserved2[8]; |
1387 | uint32_t cmd_inline_rqn; |
1388 | uint32_t cmd_indir_table; |
1389 | uint32_t cmd_tdomain; |
1390 | #define MCX_TIR_CTX_HASH_TOEPLITZ2 2 |
1391 | #define MCX_TIR_CTX_HASH_SHIFT28 28 |
1392 | uint8_t cmd_rx_hash_key[40]; |
1393 | uint32_t cmd_rx_hash_sel_outer; |
1394 | #define MCX_TIR_CTX_HASH_SEL_SRC_IP(1 << 0) (1 << 0) |
1395 | #define MCX_TIR_CTX_HASH_SEL_DST_IP(1 << 1) (1 << 1) |
1396 | #define MCX_TIR_CTX_HASH_SEL_SPORT(1 << 2) (1 << 2) |
1397 | #define MCX_TIR_CTX_HASH_SEL_DPORT(1 << 3) (1 << 3) |
1398 | #define MCX_TIR_CTX_HASH_SEL_IPV4(0 << 31) (0 << 31) |
1399 | #define MCX_TIR_CTX_HASH_SEL_IPV6(1 << 31) (1 << 31) |
1400 | #define MCX_TIR_CTX_HASH_SEL_TCP(0 << 30) (0 << 30) |
1401 | #define MCX_TIR_CTX_HASH_SEL_UDP(1 << 30) (1 << 30) |
1402 | uint32_t cmd_rx_hash_sel_inner; |
1403 | uint8_t cmd_reserved3[152]; |
1404 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1405 | |
1406 | struct mcx_cmd_create_tir_out { |
1407 | uint8_t cmd_status; |
1408 | uint8_t cmd_reserved0[3]; |
1409 | uint32_t cmd_syndrome; |
1410 | uint32_t cmd_tirn; |
1411 | uint8_t cmd_reserved1[4]; |
1412 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1413 | |
1414 | struct mcx_cmd_destroy_tir_in { |
1415 | uint16_t cmd_opcode; |
1416 | uint8_t cmd_reserved0[4]; |
1417 | uint16_t cmd_op_mod; |
1418 | uint32_t cmd_tirn; |
1419 | uint8_t cmd_reserved1[4]; |
1420 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1421 | |
1422 | struct mcx_cmd_destroy_tir_out { |
1423 | uint8_t cmd_status; |
1424 | uint8_t cmd_reserved0[3]; |
1425 | uint32_t cmd_syndrome; |
1426 | uint8_t cmd_reserved1[8]; |
1427 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1428 | |
1429 | struct mcx_cmd_create_tis_in { |
1430 | uint16_t cmd_opcode; |
1431 | uint8_t cmd_reserved0[4]; |
1432 | uint16_t cmd_op_mod; |
1433 | uint8_t cmd_reserved1[8]; |
1434 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1435 | |
1436 | struct mcx_cmd_create_tis_mb_in { |
1437 | uint8_t cmd_reserved[16]; |
1438 | uint32_t cmd_prio; |
1439 | uint8_t cmd_reserved1[32]; |
1440 | uint32_t cmd_tdomain; |
1441 | uint8_t cmd_reserved2[120]; |
1442 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1443 | |
1444 | struct mcx_cmd_create_tis_out { |
1445 | uint8_t cmd_status; |
1446 | uint8_t cmd_reserved0[3]; |
1447 | uint32_t cmd_syndrome; |
1448 | uint32_t cmd_tisn; |
1449 | uint8_t cmd_reserved1[4]; |
1450 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1451 | |
1452 | struct mcx_cmd_destroy_tis_in { |
1453 | uint16_t cmd_opcode; |
1454 | uint8_t cmd_reserved0[4]; |
1455 | uint16_t cmd_op_mod; |
1456 | uint32_t cmd_tisn; |
1457 | uint8_t cmd_reserved1[4]; |
1458 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1459 | |
1460 | struct mcx_cmd_destroy_tis_out { |
1461 | uint8_t cmd_status; |
1462 | uint8_t cmd_reserved0[3]; |
1463 | uint32_t cmd_syndrome; |
1464 | uint8_t cmd_reserved1[8]; |
1465 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1466 | |
1467 | struct mcx_cmd_create_rqt_in { |
1468 | uint16_t cmd_opcode; |
1469 | uint8_t cmd_reserved0[4]; |
1470 | uint16_t cmd_op_mod; |
1471 | uint8_t cmd_reserved1[8]; |
1472 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1473 | |
1474 | struct mcx_rqt_ctx { |
1475 | uint8_t cmd_reserved0[20]; |
1476 | uint16_t cmd_reserved1; |
1477 | uint16_t cmd_rqt_max_size; |
1478 | uint16_t cmd_reserved2; |
1479 | uint16_t cmd_rqt_actual_size; |
1480 | uint8_t cmd_reserved3[212]; |
1481 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1482 | |
1483 | struct mcx_cmd_create_rqt_mb_in { |
1484 | uint8_t cmd_reserved0[16]; |
1485 | struct mcx_rqt_ctx cmd_rqt; |
1486 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1487 | |
1488 | struct mcx_cmd_create_rqt_out { |
1489 | uint8_t cmd_status; |
1490 | uint8_t cmd_reserved0[3]; |
1491 | uint32_t cmd_syndrome; |
1492 | uint32_t cmd_rqtn; |
1493 | uint8_t cmd_reserved1[4]; |
1494 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1495 | |
1496 | struct mcx_cmd_destroy_rqt_in { |
1497 | uint16_t cmd_opcode; |
1498 | uint8_t cmd_reserved0[4]; |
1499 | uint16_t cmd_op_mod; |
1500 | uint32_t cmd_rqtn; |
1501 | uint8_t cmd_reserved1[4]; |
1502 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1503 | |
1504 | struct mcx_cmd_destroy_rqt_out { |
1505 | uint8_t cmd_status; |
1506 | uint8_t cmd_reserved0[3]; |
1507 | uint32_t cmd_syndrome; |
1508 | uint8_t cmd_reserved1[8]; |
1509 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1510 | |
1511 | struct mcx_cq_ctx { |
1512 | uint32_t cq_status; |
1513 | #define MCX_CQ_CTX_STATUS_SHIFT28 28 |
1514 | #define MCX_CQ_CTX_STATUS_MASK(0xf << 28) (0xf << MCX_CQ_CTX_STATUS_SHIFT28) |
1515 | #define MCX_CQ_CTX_STATUS_OK0x0 0x0 |
1516 | #define MCX_CQ_CTX_STATUS_OVERFLOW0x9 0x9 |
1517 | #define MCX_CQ_CTX_STATUS_WRITE_FAIL0xa 0xa |
1518 | #define MCX_CQ_CTX_STATE_SHIFT8 8 |
1519 | #define MCX_CQ_CTX_STATE_MASK(0xf << 8) (0xf << MCX_CQ_CTX_STATE_SHIFT8) |
1520 | #define MCX_CQ_CTX_STATE_SOLICITED0x6 0x6 |
1521 | #define MCX_CQ_CTX_STATE_ARMED0x9 0x9 |
1522 | #define MCX_CQ_CTX_STATE_FIRED0xa 0xa |
1523 | uint32_t cq_reserved1; |
1524 | uint32_t cq_page_offset; |
1525 | uint32_t cq_uar_size; |
1526 | #define MCX_CQ_CTX_UAR_PAGE_MASK0xffffff 0xffffff |
1527 | #define MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT24 24 |
1528 | uint32_t cq_period_max_count; |
1529 | #define MCX_CQ_CTX_PERIOD_SHIFT16 16 |
1530 | uint32_t cq_eqn; |
1531 | uint32_t cq_log_page_size; |
1532 | #define MCX_CQ_CTX_LOG_PAGE_SIZE_SHIFT24 24 |
1533 | uint32_t cq_reserved2; |
1534 | uint32_t cq_last_notified; |
1535 | uint32_t cq_last_solicit; |
1536 | uint32_t cq_consumer_counter; |
1537 | uint32_t cq_producer_counter; |
1538 | uint8_t cq_reserved3[8]; |
1539 | uint64_t cq_doorbell; |
1540 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1541 | |
1542 | CTASSERT(sizeof(struct mcx_cq_ctx) == 64)extern char _ctassert[(sizeof(struct mcx_cq_ctx) == 64) ? 1 : -1 ] __attribute__((__unused__)); |
1543 | |
1544 | struct mcx_cmd_create_cq_in { |
1545 | uint16_t cmd_opcode; |
1546 | uint8_t cmd_reserved0[4]; |
1547 | uint16_t cmd_op_mod; |
1548 | uint8_t cmd_reserved1[8]; |
1549 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1550 | |
1551 | struct mcx_cmd_create_cq_mb_in { |
1552 | struct mcx_cq_ctx cmd_cq_ctx; |
1553 | uint8_t cmd_reserved1[192]; |
1554 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1555 | |
1556 | struct mcx_cmd_create_cq_out { |
1557 | uint8_t cmd_status; |
1558 | uint8_t cmd_reserved0[3]; |
1559 | uint32_t cmd_syndrome; |
1560 | uint32_t cmd_cqn; |
1561 | uint8_t cmd_reserved1[4]; |
1562 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1563 | |
1564 | struct mcx_cmd_destroy_cq_in { |
1565 | uint16_t cmd_opcode; |
1566 | uint8_t cmd_reserved0[4]; |
1567 | uint16_t cmd_op_mod; |
1568 | uint32_t cmd_cqn; |
1569 | uint8_t cmd_reserved1[4]; |
1570 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1571 | |
1572 | struct mcx_cmd_destroy_cq_out { |
1573 | uint8_t cmd_status; |
1574 | uint8_t cmd_reserved0[3]; |
1575 | uint32_t cmd_syndrome; |
1576 | uint8_t cmd_reserved1[8]; |
1577 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1578 | |
1579 | struct mcx_cmd_query_cq_in { |
1580 | uint16_t cmd_opcode; |
1581 | uint8_t cmd_reserved0[4]; |
1582 | uint16_t cmd_op_mod; |
1583 | uint32_t cmd_cqn; |
1584 | uint8_t cmd_reserved1[4]; |
1585 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1586 | |
1587 | struct mcx_cmd_query_cq_out { |
1588 | uint8_t cmd_status; |
1589 | uint8_t cmd_reserved0[3]; |
1590 | uint32_t cmd_syndrome; |
1591 | uint8_t cmd_reserved1[8]; |
1592 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1593 | |
1594 | struct mcx_cq_entry { |
1595 | uint32_t __reserved____reserved1595; |
1596 | uint32_t cq_lro; |
1597 | uint32_t cq_lro_ack_seq_num; |
1598 | uint32_t cq_rx_hash; |
1599 | uint8_t cq_rx_hash_type; |
1600 | uint8_t cq_ml_path; |
1601 | uint16_t __reserved____reserved1601; |
1602 | uint32_t cq_checksum; |
1603 | uint32_t __reserved____reserved1603; |
1604 | uint32_t cq_flags; |
1605 | #define MCX_CQ_ENTRY_FLAGS_L4_OK(1 << 26) (1 << 26) |
1606 | #define MCX_CQ_ENTRY_FLAGS_L3_OK(1 << 25) (1 << 25) |
1607 | #define MCX_CQ_ENTRY_FLAGS_L2_OK(1 << 24) (1 << 24) |
1608 | #define MCX_CQ_ENTRY_FLAGS_CV(1 << 16) (1 << 16) |
1609 | #define MCX_CQ_ENTRY_FLAGS_VLAN_MASK(0xffff) (0xffff) |
1610 | |
1611 | uint32_t cq_lro_srqn; |
1612 | uint32_t __reserved____reserved1612[2]; |
1613 | uint32_t cq_byte_cnt; |
1614 | uint64_t cq_timestamp; |
1615 | uint8_t cq_rx_drops; |
1616 | uint8_t cq_flow_tag[3]; |
1617 | uint16_t cq_wqe_count; |
1618 | uint8_t cq_signature; |
1619 | uint8_t cq_opcode_owner; |
1620 | #define MCX_CQ_ENTRY_FLAG_OWNER(1 << 0) (1 << 0) |
1621 | #define MCX_CQ_ENTRY_FLAG_SE(1 << 1) (1 << 1) |
1622 | #define MCX_CQ_ENTRY_FORMAT_SHIFT2 2 |
1623 | #define MCX_CQ_ENTRY_OPCODE_SHIFT4 4 |
1624 | |
1625 | #define MCX_CQ_ENTRY_FORMAT_NO_INLINE0 0 |
1626 | #define MCX_CQ_ENTRY_FORMAT_INLINE_321 1 |
1627 | #define MCX_CQ_ENTRY_FORMAT_INLINE_642 2 |
1628 | #define MCX_CQ_ENTRY_FORMAT_COMPRESSED3 3 |
1629 | |
1630 | #define MCX_CQ_ENTRY_OPCODE_REQ0 0 |
1631 | #define MCX_CQ_ENTRY_OPCODE_SEND2 2 |
1632 | #define MCX_CQ_ENTRY_OPCODE_REQ_ERR13 13 |
1633 | #define MCX_CQ_ENTRY_OPCODE_SEND_ERR14 14 |
1634 | #define MCX_CQ_ENTRY_OPCODE_INVALID15 15 |
1635 | |
1636 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1637 | |
1638 | CTASSERT(sizeof(struct mcx_cq_entry) == 64)extern char _ctassert[(sizeof(struct mcx_cq_entry) == 64) ? 1 : -1 ] __attribute__((__unused__)); |
1639 | |
1640 | struct mcx_cq_doorbell { |
1641 | uint32_t db_update_ci; |
1642 | uint32_t db_arm_ci; |
1643 | #define MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT28 28 |
1644 | #define MCX_CQ_DOORBELL_ARM_CMD(1 << 24) (1 << 24) |
1645 | #define MCX_CQ_DOORBELL_ARM_CI_MASK(0xffffff) (0xffffff) |
1646 | } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8))); |
1647 | |
1648 | struct mcx_wq_ctx { |
1649 | uint8_t wq_type; |
1650 | #define MCX_WQ_CTX_TYPE_CYCLIC(1 << 4) (1 << 4) |
1651 | #define MCX_WQ_CTX_TYPE_SIGNATURE(1 << 3) (1 << 3) |
1652 | uint8_t wq_reserved0[5]; |
1653 | uint16_t wq_lwm; |
1654 | uint32_t wq_pd; |
1655 | uint32_t wq_uar_page; |
1656 | uint64_t wq_doorbell; |
1657 | uint32_t wq_hw_counter; |
1658 | uint32_t wq_sw_counter; |
1659 | uint16_t wq_log_stride; |
1660 | uint8_t wq_log_page_sz; |
1661 | uint8_t wq_log_size; |
1662 | uint8_t wq_reserved1[156]; |
1663 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1664 | |
1665 | CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0)extern char _ctassert[(sizeof(struct mcx_wq_ctx) == 0xC0) ? 1 : -1 ] __attribute__((__unused__)); |
1666 | |
1667 | struct mcx_sq_ctx { |
1668 | uint32_t sq_flags; |
1669 | #define MCX_SQ_CTX_RLKEY(1 << 31) (1 << 31) |
1670 | #define MCX_SQ_CTX_FRE_SHIFT(1 << 29) (1 << 29) |
1671 | #define MCX_SQ_CTX_FLUSH_IN_ERROR(1 << 28) (1 << 28) |
1672 | #define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT24 24 |
1673 | #define MCX_SQ_CTX_STATE_SHIFT20 20 |
1674 | #define MCX_SQ_CTX_STATE_MASK(0xf << 20) (0xf << 20) |
1675 | #define MCX_SQ_CTX_STATE_RST0 0 |
1676 | #define MCX_SQ_CTX_STATE_RDY1 1 |
1677 | #define MCX_SQ_CTX_STATE_ERR3 3 |
1678 | uint32_t sq_user_index; |
1679 | uint32_t sq_cqn; |
1680 | uint32_t sq_reserved1[5]; |
1681 | uint32_t sq_tis_lst_sz; |
1682 | #define MCX_SQ_CTX_TIS_LST_SZ_SHIFT16 16 |
1683 | uint32_t sq_reserved2[2]; |
1684 | uint32_t sq_tis_num; |
1685 | struct mcx_wq_ctx sq_wq; |
1686 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1687 | |
1688 | struct mcx_sq_entry_seg { |
1689 | uint32_t sqs_byte_count; |
1690 | uint32_t sqs_lkey; |
1691 | uint64_t sqs_addr; |
1692 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1693 | |
1694 | struct mcx_sq_entry { |
1695 | /* control segment */ |
1696 | uint32_t sqe_opcode_index; |
1697 | #define MCX_SQE_WQE_INDEX_SHIFT8 8 |
1698 | #define MCX_SQE_WQE_OPCODE_NOP0x00 0x00 |
1699 | #define MCX_SQE_WQE_OPCODE_SEND0x0a 0x0a |
1700 | uint32_t sqe_ds_sq_num; |
1701 | #define MCX_SQE_SQ_NUM_SHIFT8 8 |
1702 | uint32_t sqe_signature; |
1703 | #define MCX_SQE_SIGNATURE_SHIFT24 24 |
1704 | #define MCX_SQE_SOLICITED_EVENT0x02 0x02 |
1705 | #define MCX_SQE_CE_CQE_ON_ERR0x00 0x00 |
1706 | #define MCX_SQE_CE_CQE_FIRST_ERR0x04 0x04 |
1707 | #define MCX_SQE_CE_CQE_ALWAYS0x08 0x08 |
1708 | #define MCX_SQE_CE_CQE_SOLICIT0x0C 0x0C |
1709 | #define MCX_SQE_FM_NO_FENCE0x00 0x00 |
1710 | #define MCX_SQE_FM_SMALL_FENCE0x40 0x40 |
1711 | uint32_t sqe_mkey; |
1712 | |
1713 | /* ethernet segment */ |
1714 | uint32_t sqe_reserved1; |
1715 | uint32_t sqe_mss_csum; |
1716 | #define MCX_SQE_L4_CSUM(1 << 31) (1 << 31) |
1717 | #define MCX_SQE_L3_CSUM(1 << 30) (1 << 30) |
1718 | uint32_t sqe_reserved2; |
1719 | uint16_t sqe_inline_header_size; |
1720 | uint16_t sqe_inline_headers[9]; |
1721 | |
1722 | /* data segment */ |
1723 | struct mcx_sq_entry_seg sqe_segs[1]; |
1724 | } __packed__attribute__((__packed__)) __aligned(64)__attribute__((__aligned__(64))); |
1725 | |
1726 | CTASSERT(sizeof(struct mcx_sq_entry) == 64)extern char _ctassert[(sizeof(struct mcx_sq_entry) == 64) ? 1 : -1 ] __attribute__((__unused__)); |
1727 | |
1728 | struct mcx_cmd_create_sq_in { |
1729 | uint16_t cmd_opcode; |
1730 | uint8_t cmd_reserved0[4]; |
1731 | uint16_t cmd_op_mod; |
1732 | uint8_t cmd_reserved1[8]; |
1733 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1734 | |
1735 | struct mcx_cmd_create_sq_out { |
1736 | uint8_t cmd_status; |
1737 | uint8_t cmd_reserved0[3]; |
1738 | uint32_t cmd_syndrome; |
1739 | uint32_t cmd_sqn; |
1740 | uint8_t cmd_reserved1[4]; |
1741 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1742 | |
1743 | struct mcx_cmd_modify_sq_in { |
1744 | uint16_t cmd_opcode; |
1745 | uint8_t cmd_reserved0[4]; |
1746 | uint16_t cmd_op_mod; |
1747 | uint32_t cmd_sq_state; |
1748 | uint8_t cmd_reserved1[4]; |
1749 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1750 | |
1751 | struct mcx_cmd_modify_sq_mb_in { |
1752 | uint32_t cmd_modify_hi; |
1753 | uint32_t cmd_modify_lo; |
1754 | uint8_t cmd_reserved0[8]; |
1755 | struct mcx_sq_ctx cmd_sq_ctx; |
1756 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1757 | |
1758 | struct mcx_cmd_modify_sq_out { |
1759 | uint8_t cmd_status; |
1760 | uint8_t cmd_reserved0[3]; |
1761 | uint32_t cmd_syndrome; |
1762 | uint8_t cmd_reserved1[8]; |
1763 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1764 | |
1765 | struct mcx_cmd_destroy_sq_in { |
1766 | uint16_t cmd_opcode; |
1767 | uint8_t cmd_reserved0[4]; |
1768 | uint16_t cmd_op_mod; |
1769 | uint32_t cmd_sqn; |
1770 | uint8_t cmd_reserved1[4]; |
1771 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1772 | |
1773 | struct mcx_cmd_destroy_sq_out { |
1774 | uint8_t cmd_status; |
1775 | uint8_t cmd_reserved0[3]; |
1776 | uint32_t cmd_syndrome; |
1777 | uint8_t cmd_reserved1[8]; |
1778 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1779 | |
1780 | |
1781 | struct mcx_rq_ctx { |
1782 | uint32_t rq_flags; |
1783 | #define MCX_RQ_CTX_RLKEY(1 << 31) (1 << 31) |
1784 | #define MCX_RQ_CTX_VLAN_STRIP_DIS(1 << 28) (1 << 28) |
1785 | #define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT24 24 |
1786 | #define MCX_RQ_CTX_STATE_SHIFT20 20 |
1787 | #define MCX_RQ_CTX_STATE_MASK(0xf << 20) (0xf << 20) |
1788 | #define MCX_RQ_CTX_STATE_RST0 0 |
1789 | #define MCX_RQ_CTX_STATE_RDY1 1 |
1790 | #define MCX_RQ_CTX_STATE_ERR3 3 |
1791 | #define MCX_RQ_CTX_FLUSH_IN_ERROR(1 << 18) (1 << 18) |
1792 | uint32_t rq_user_index; |
1793 | uint32_t rq_cqn; |
1794 | uint32_t rq_reserved1; |
1795 | uint32_t rq_rmpn; |
1796 | uint32_t rq_reserved2[7]; |
1797 | struct mcx_wq_ctx rq_wq; |
1798 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1799 | |
1800 | struct mcx_rq_entry { |
1801 | uint32_t rqe_byte_count; |
1802 | uint32_t rqe_lkey; |
1803 | uint64_t rqe_addr; |
1804 | } __packed__attribute__((__packed__)) __aligned(16)__attribute__((__aligned__(16))); |
1805 | |
1806 | struct mcx_cmd_create_rq_in { |
1807 | uint16_t cmd_opcode; |
1808 | uint8_t cmd_reserved0[4]; |
1809 | uint16_t cmd_op_mod; |
1810 | uint8_t cmd_reserved1[8]; |
1811 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1812 | |
1813 | struct mcx_cmd_create_rq_out { |
1814 | uint8_t cmd_status; |
1815 | uint8_t cmd_reserved0[3]; |
1816 | uint32_t cmd_syndrome; |
1817 | uint32_t cmd_rqn; |
1818 | uint8_t cmd_reserved1[4]; |
1819 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1820 | |
1821 | struct mcx_cmd_modify_rq_in { |
1822 | uint16_t cmd_opcode; |
1823 | uint8_t cmd_reserved0[4]; |
1824 | uint16_t cmd_op_mod; |
1825 | uint32_t cmd_rq_state; |
1826 | uint8_t cmd_reserved1[4]; |
1827 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1828 | |
1829 | struct mcx_cmd_modify_rq_mb_in { |
1830 | uint32_t cmd_modify_hi; |
1831 | uint32_t cmd_modify_lo; |
1832 | uint8_t cmd_reserved0[8]; |
1833 | struct mcx_rq_ctx cmd_rq_ctx; |
1834 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1835 | |
1836 | struct mcx_cmd_modify_rq_out { |
1837 | uint8_t cmd_status; |
1838 | uint8_t cmd_reserved0[3]; |
1839 | uint32_t cmd_syndrome; |
1840 | uint8_t cmd_reserved1[8]; |
1841 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1842 | |
1843 | struct mcx_cmd_destroy_rq_in { |
1844 | uint16_t cmd_opcode; |
1845 | uint8_t cmd_reserved0[4]; |
1846 | uint16_t cmd_op_mod; |
1847 | uint32_t cmd_rqn; |
1848 | uint8_t cmd_reserved1[4]; |
1849 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1850 | |
1851 | struct mcx_cmd_destroy_rq_out { |
1852 | uint8_t cmd_status; |
1853 | uint8_t cmd_reserved0[3]; |
1854 | uint32_t cmd_syndrome; |
1855 | uint8_t cmd_reserved1[8]; |
1856 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1857 | |
1858 | struct mcx_cmd_create_flow_table_in { |
1859 | uint16_t cmd_opcode; |
1860 | uint8_t cmd_reserved0[4]; |
1861 | uint16_t cmd_op_mod; |
1862 | uint8_t cmd_reserved1[8]; |
1863 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1864 | |
1865 | struct mcx_flow_table_ctx { |
1866 | uint8_t ft_miss_action; |
1867 | uint8_t ft_level; |
1868 | uint8_t ft_reserved0; |
1869 | uint8_t ft_log_size; |
1870 | uint32_t ft_table_miss_id; |
1871 | uint8_t ft_reserved1[28]; |
1872 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1873 | |
1874 | struct mcx_cmd_create_flow_table_mb_in { |
1875 | uint8_t cmd_table_type; |
1876 | uint8_t cmd_reserved0[7]; |
1877 | struct mcx_flow_table_ctx cmd_ctx; |
1878 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1879 | |
1880 | struct mcx_cmd_create_flow_table_out { |
1881 | uint8_t cmd_status; |
1882 | uint8_t cmd_reserved0[3]; |
1883 | uint32_t cmd_syndrome; |
1884 | uint32_t cmd_table_id; |
1885 | uint8_t cmd_reserved1[4]; |
1886 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1887 | |
1888 | struct mcx_cmd_destroy_flow_table_in { |
1889 | uint16_t cmd_opcode; |
1890 | uint8_t cmd_reserved0[4]; |
1891 | uint16_t cmd_op_mod; |
1892 | uint8_t cmd_reserved1[8]; |
1893 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1894 | |
1895 | struct mcx_cmd_destroy_flow_table_mb_in { |
1896 | uint8_t cmd_table_type; |
1897 | uint8_t cmd_reserved0[3]; |
1898 | uint32_t cmd_table_id; |
1899 | uint8_t cmd_reserved1[40]; |
1900 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1901 | |
1902 | struct mcx_cmd_destroy_flow_table_out { |
1903 | uint8_t cmd_status; |
1904 | uint8_t cmd_reserved0[3]; |
1905 | uint32_t cmd_syndrome; |
1906 | uint8_t cmd_reserved1[8]; |
1907 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1908 | |
1909 | struct mcx_cmd_set_flow_table_root_in { |
1910 | uint16_t cmd_opcode; |
1911 | uint8_t cmd_reserved0[4]; |
1912 | uint16_t cmd_op_mod; |
1913 | uint8_t cmd_reserved1[8]; |
1914 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1915 | |
1916 | struct mcx_cmd_set_flow_table_root_mb_in { |
1917 | uint8_t cmd_table_type; |
1918 | uint8_t cmd_reserved0[3]; |
1919 | uint32_t cmd_table_id; |
1920 | uint8_t cmd_reserved1[56]; |
1921 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1922 | |
1923 | struct mcx_cmd_set_flow_table_root_out { |
1924 | uint8_t cmd_status; |
1925 | uint8_t cmd_reserved0[3]; |
1926 | uint32_t cmd_syndrome; |
1927 | uint8_t cmd_reserved1[8]; |
1928 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1929 | |
1930 | struct mcx_flow_match { |
1931 | /* outer headers */ |
1932 | uint8_t mc_src_mac[6]; |
1933 | uint16_t mc_ethertype; |
1934 | uint8_t mc_dest_mac[6]; |
1935 | uint16_t mc_first_vlan; |
1936 | uint8_t mc_ip_proto; |
1937 | uint8_t mc_ip_dscp_ecn; |
1938 | uint8_t mc_vlan_flags; |
1939 | #define MCX_FLOW_MATCH_IP_FRAG(1 << 5) (1 << 5) |
1940 | uint8_t mc_tcp_flags; |
1941 | uint16_t mc_tcp_sport; |
1942 | uint16_t mc_tcp_dport; |
1943 | uint32_t mc_reserved0; |
1944 | uint16_t mc_udp_sport; |
1945 | uint16_t mc_udp_dport; |
1946 | uint8_t mc_src_ip[16]; |
1947 | uint8_t mc_dest_ip[16]; |
1948 | |
1949 | /* misc parameters */ |
1950 | uint8_t mc_reserved1[8]; |
1951 | uint16_t mc_second_vlan; |
1952 | uint8_t mc_reserved2[2]; |
1953 | uint8_t mc_second_vlan_flags; |
1954 | uint8_t mc_reserved3[15]; |
1955 | uint32_t mc_outer_ipv6_flow_label; |
1956 | uint8_t mc_reserved4[32]; |
1957 | |
1958 | uint8_t mc_reserved[384]; |
1959 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1960 | |
1961 | CTASSERT(sizeof(struct mcx_flow_match) == 512)extern char _ctassert[(sizeof(struct mcx_flow_match) == 512) ? 1 : -1 ] __attribute__((__unused__)); |
1962 | |
1963 | struct mcx_cmd_create_flow_group_in { |
1964 | uint16_t cmd_opcode; |
1965 | uint8_t cmd_reserved0[4]; |
1966 | uint16_t cmd_op_mod; |
1967 | uint8_t cmd_reserved1[8]; |
1968 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1969 | |
1970 | struct mcx_cmd_create_flow_group_mb_in { |
1971 | uint8_t cmd_table_type; |
1972 | uint8_t cmd_reserved0[3]; |
1973 | uint32_t cmd_table_id; |
1974 | uint8_t cmd_reserved1[4]; |
1975 | uint32_t cmd_start_flow_index; |
1976 | uint8_t cmd_reserved2[4]; |
1977 | uint32_t cmd_end_flow_index; |
1978 | uint8_t cmd_reserved3[23]; |
1979 | uint8_t cmd_match_criteria_enable; |
1980 | #define MCX_CREATE_FLOW_GROUP_CRIT_OUTER(1 << 0) (1 << 0) |
1981 | #define MCX_CREATE_FLOW_GROUP_CRIT_MISC(1 << 1) (1 << 1) |
1982 | #define MCX_CREATE_FLOW_GROUP_CRIT_INNER(1 << 2) (1 << 2) |
1983 | struct mcx_flow_match cmd_match_criteria; |
1984 | uint8_t cmd_reserved4[448]; |
1985 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1986 | |
1987 | struct mcx_cmd_create_flow_group_out { |
1988 | uint8_t cmd_status; |
1989 | uint8_t cmd_reserved0[3]; |
1990 | uint32_t cmd_syndrome; |
1991 | uint32_t cmd_group_id; |
1992 | uint8_t cmd_reserved1[4]; |
1993 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
1994 | |
1995 | struct mcx_flow_ctx { |
1996 | uint8_t fc_reserved0[4]; |
1997 | uint32_t fc_group_id; |
1998 | uint32_t fc_flow_tag; |
1999 | uint32_t fc_action; |
2000 | #define MCX_FLOW_CONTEXT_ACTION_ALLOW(1 << 0) (1 << 0) |
2001 | #define MCX_FLOW_CONTEXT_ACTION_DROP(1 << 1) (1 << 1) |
2002 | #define MCX_FLOW_CONTEXT_ACTION_FORWARD(1 << 2) (1 << 2) |
2003 | #define MCX_FLOW_CONTEXT_ACTION_COUNT(1 << 3) (1 << 3) |
2004 | uint32_t fc_dest_list_size; |
2005 | uint32_t fc_counter_list_size; |
2006 | uint8_t fc_reserved1[40]; |
2007 | struct mcx_flow_match fc_match_value; |
2008 | uint8_t fc_reserved2[192]; |
2009 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2010 | |
2011 | #define MCX_FLOW_CONTEXT_DEST_TYPE_TABLE(1 << 24) (1 << 24) |
2012 | #define MCX_FLOW_CONTEXT_DEST_TYPE_TIR(2 << 24) (2 << 24) |
2013 | |
2014 | struct mcx_cmd_destroy_flow_group_in { |
2015 | uint16_t cmd_opcode; |
2016 | uint8_t cmd_reserved0[4]; |
2017 | uint16_t cmd_op_mod; |
2018 | uint8_t cmd_reserved1[8]; |
2019 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2020 | |
2021 | struct mcx_cmd_destroy_flow_group_mb_in { |
2022 | uint8_t cmd_table_type; |
2023 | uint8_t cmd_reserved0[3]; |
2024 | uint32_t cmd_table_id; |
2025 | uint32_t cmd_group_id; |
2026 | uint8_t cmd_reserved1[36]; |
2027 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2028 | |
2029 | struct mcx_cmd_destroy_flow_group_out { |
2030 | uint8_t cmd_status; |
2031 | uint8_t cmd_reserved0[3]; |
2032 | uint32_t cmd_syndrome; |
2033 | uint8_t cmd_reserved1[8]; |
2034 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2035 | |
2036 | struct mcx_cmd_set_flow_table_entry_in { |
2037 | uint16_t cmd_opcode; |
2038 | uint8_t cmd_reserved0[4]; |
2039 | uint16_t cmd_op_mod; |
2040 | uint8_t cmd_reserved1[8]; |
2041 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2042 | |
2043 | struct mcx_cmd_set_flow_table_entry_mb_in { |
2044 | uint8_t cmd_table_type; |
2045 | uint8_t cmd_reserved0[3]; |
2046 | uint32_t cmd_table_id; |
2047 | uint32_t cmd_modify_enable_mask; |
2048 | uint8_t cmd_reserved1[4]; |
2049 | uint32_t cmd_flow_index; |
2050 | uint8_t cmd_reserved2[28]; |
2051 | struct mcx_flow_ctx cmd_flow_ctx; |
2052 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2053 | |
2054 | struct mcx_cmd_set_flow_table_entry_out { |
2055 | uint8_t cmd_status; |
2056 | uint8_t cmd_reserved0[3]; |
2057 | uint32_t cmd_syndrome; |
2058 | uint8_t cmd_reserved1[8]; |
2059 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2060 | |
2061 | struct mcx_cmd_query_flow_table_entry_in { |
2062 | uint16_t cmd_opcode; |
2063 | uint8_t cmd_reserved0[4]; |
2064 | uint16_t cmd_op_mod; |
2065 | uint8_t cmd_reserved1[8]; |
2066 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2067 | |
2068 | struct mcx_cmd_query_flow_table_entry_mb_in { |
2069 | uint8_t cmd_table_type; |
2070 | uint8_t cmd_reserved0[3]; |
2071 | uint32_t cmd_table_id; |
2072 | uint8_t cmd_reserved1[8]; |
2073 | uint32_t cmd_flow_index; |
2074 | uint8_t cmd_reserved2[28]; |
2075 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2076 | |
2077 | struct mcx_cmd_query_flow_table_entry_out { |
2078 | uint8_t cmd_status; |
2079 | uint8_t cmd_reserved0[3]; |
2080 | uint32_t cmd_syndrome; |
2081 | uint8_t cmd_reserved1[8]; |
2082 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2083 | |
2084 | struct mcx_cmd_query_flow_table_entry_mb_out { |
2085 | uint8_t cmd_reserved0[48]; |
2086 | struct mcx_flow_ctx cmd_flow_ctx; |
2087 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2088 | |
2089 | struct mcx_cmd_delete_flow_table_entry_in { |
2090 | uint16_t cmd_opcode; |
2091 | uint8_t cmd_reserved0[4]; |
2092 | uint16_t cmd_op_mod; |
2093 | uint8_t cmd_reserved1[8]; |
2094 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2095 | |
2096 | struct mcx_cmd_delete_flow_table_entry_mb_in { |
2097 | uint8_t cmd_table_type; |
2098 | uint8_t cmd_reserved0[3]; |
2099 | uint32_t cmd_table_id; |
2100 | uint8_t cmd_reserved1[8]; |
2101 | uint32_t cmd_flow_index; |
2102 | uint8_t cmd_reserved2[28]; |
2103 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2104 | |
2105 | struct mcx_cmd_delete_flow_table_entry_out { |
2106 | uint8_t cmd_status; |
2107 | uint8_t cmd_reserved0[3]; |
2108 | uint32_t cmd_syndrome; |
2109 | uint8_t cmd_reserved1[8]; |
2110 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2111 | |
2112 | struct mcx_cmd_query_flow_group_in { |
2113 | uint16_t cmd_opcode; |
2114 | uint8_t cmd_reserved0[4]; |
2115 | uint16_t cmd_op_mod; |
2116 | uint8_t cmd_reserved1[8]; |
2117 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2118 | |
2119 | struct mcx_cmd_query_flow_group_mb_in { |
2120 | uint8_t cmd_table_type; |
2121 | uint8_t cmd_reserved0[3]; |
2122 | uint32_t cmd_table_id; |
2123 | uint32_t cmd_group_id; |
2124 | uint8_t cmd_reserved1[36]; |
2125 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2126 | |
2127 | struct mcx_cmd_query_flow_group_out { |
2128 | uint8_t cmd_status; |
2129 | uint8_t cmd_reserved0[3]; |
2130 | uint32_t cmd_syndrome; |
2131 | uint8_t cmd_reserved1[8]; |
2132 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2133 | |
2134 | struct mcx_cmd_query_flow_group_mb_out { |
2135 | uint8_t cmd_reserved0[12]; |
2136 | uint32_t cmd_start_flow_index; |
2137 | uint8_t cmd_reserved1[4]; |
2138 | uint32_t cmd_end_flow_index; |
2139 | uint8_t cmd_reserved2[20]; |
2140 | uint32_t cmd_match_criteria_enable; |
2141 | uint8_t cmd_match_criteria[512]; |
2142 | uint8_t cmd_reserved4[448]; |
2143 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2144 | |
2145 | struct mcx_cmd_query_flow_table_in { |
2146 | uint16_t cmd_opcode; |
2147 | uint8_t cmd_reserved0[4]; |
2148 | uint16_t cmd_op_mod; |
2149 | uint8_t cmd_reserved1[8]; |
2150 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2151 | |
2152 | struct mcx_cmd_query_flow_table_mb_in { |
2153 | uint8_t cmd_table_type; |
2154 | uint8_t cmd_reserved0[3]; |
2155 | uint32_t cmd_table_id; |
2156 | uint8_t cmd_reserved1[40]; |
2157 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2158 | |
2159 | struct mcx_cmd_query_flow_table_out { |
2160 | uint8_t cmd_status; |
2161 | uint8_t cmd_reserved0[3]; |
2162 | uint32_t cmd_syndrome; |
2163 | uint8_t cmd_reserved1[8]; |
2164 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2165 | |
2166 | struct mcx_cmd_query_flow_table_mb_out { |
2167 | uint8_t cmd_reserved0[4]; |
2168 | struct mcx_flow_table_ctx cmd_ctx; |
2169 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2170 | |
2171 | struct mcx_cmd_alloc_flow_counter_in { |
2172 | uint16_t cmd_opcode; |
2173 | uint8_t cmd_reserved0[4]; |
2174 | uint16_t cmd_op_mod; |
2175 | uint8_t cmd_reserved1[8]; |
2176 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2177 | |
2178 | struct mcx_cmd_query_rq_in { |
2179 | uint16_t cmd_opcode; |
2180 | uint8_t cmd_reserved0[4]; |
2181 | uint16_t cmd_op_mod; |
2182 | uint32_t cmd_rqn; |
2183 | uint8_t cmd_reserved1[4]; |
2184 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2185 | |
2186 | struct mcx_cmd_query_rq_out { |
2187 | uint8_t cmd_status; |
2188 | uint8_t cmd_reserved0[3]; |
2189 | uint32_t cmd_syndrome; |
2190 | uint8_t cmd_reserved1[8]; |
2191 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2192 | |
2193 | struct mcx_cmd_query_rq_mb_out { |
2194 | uint8_t cmd_reserved0[16]; |
2195 | struct mcx_rq_ctx cmd_ctx; |
2196 | }; |
2197 | |
2198 | struct mcx_cmd_query_sq_in { |
2199 | uint16_t cmd_opcode; |
2200 | uint8_t cmd_reserved0[4]; |
2201 | uint16_t cmd_op_mod; |
2202 | uint32_t cmd_sqn; |
2203 | uint8_t cmd_reserved1[4]; |
2204 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2205 | |
2206 | struct mcx_cmd_query_sq_out { |
2207 | uint8_t cmd_status; |
2208 | uint8_t cmd_reserved0[3]; |
2209 | uint32_t cmd_syndrome; |
2210 | uint8_t cmd_reserved1[8]; |
2211 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2212 | |
2213 | struct mcx_cmd_query_sq_mb_out { |
2214 | uint8_t cmd_reserved0[16]; |
2215 | struct mcx_sq_ctx cmd_ctx; |
2216 | }; |
2217 | |
2218 | struct mcx_cmd_alloc_flow_counter_out { |
2219 | uint8_t cmd_status; |
2220 | uint8_t cmd_reserved0[3]; |
2221 | uint32_t cmd_syndrome; |
2222 | uint8_t cmd_reserved1[2]; |
2223 | uint16_t cmd_flow_counter_id; |
2224 | uint8_t cmd_reserved2[4]; |
2225 | } __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4))); |
2226 | |
2227 | struct mcx_wq_doorbell { |
2228 | uint32_t db_recv_counter; |
2229 | uint32_t db_send_counter; |
2230 | } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8))); |
2231 | |
2232 | struct mcx_dmamem { |
2233 | bus_dmamap_t mxm_map; |
2234 | bus_dma_segment_t mxm_seg; |
2235 | int mxm_nsegs; |
2236 | size_t mxm_size; |
2237 | caddr_t mxm_kva; |
2238 | }; |
2239 | #define MCX_DMA_MAP(_mxm)((_mxm)->mxm_map) ((_mxm)->mxm_map) |
2240 | #define MCX_DMA_DVA(_mxm)((_mxm)->mxm_map->dm_segs[0].ds_addr) ((_mxm)->mxm_map->dm_segs[0].ds_addr) |
2241 | #define MCX_DMA_KVA(_mxm)((void *)(_mxm)->mxm_kva) ((void *)(_mxm)->mxm_kva) |
2242 | #define MCX_DMA_OFF(_mxm, _off)((void *)((_mxm)->mxm_kva + (_off))) ((void *)((_mxm)->mxm_kva + (_off))) |
2243 | #define MCX_DMA_LEN(_mxm)((_mxm)->mxm_size) ((_mxm)->mxm_size) |
2244 | |
2245 | struct mcx_hwmem { |
2246 | bus_dmamap_t mhm_map; |
2247 | bus_dma_segment_t *mhm_segs; |
2248 | unsigned int mhm_seg_count; |
2249 | unsigned int mhm_npages; |
2250 | }; |
2251 | |
2252 | struct mcx_slot { |
2253 | bus_dmamap_t ms_map; |
2254 | struct mbuf *ms_m; |
2255 | }; |
2256 | |
2257 | struct mcx_eq { |
2258 | int eq_n; |
2259 | uint32_t eq_cons; |
2260 | struct mcx_dmamem eq_mem; |
2261 | }; |
2262 | |
2263 | struct mcx_cq { |
2264 | int cq_n; |
2265 | struct mcx_dmamem cq_mem; |
2266 | bus_addr_t cq_doorbell; |
2267 | uint32_t cq_cons; |
2268 | uint32_t cq_count; |
2269 | }; |
2270 | |
2271 | struct mcx_calibration { |
2272 | uint64_t c_timestamp; /* previous mcx chip time */ |
2273 | uint64_t c_uptime; /* previous kernel nanouptime */ |
2274 | uint64_t c_tbase; /* mcx chip time */ |
2275 | uint64_t c_ubase; /* kernel nanouptime */ |
2276 | uint64_t c_ratio; |
2277 | }; |
2278 | |
2279 | #define MCX_CALIBRATE_FIRST2 2 |
2280 | #define MCX_CALIBRATE_NORMAL32 32 |
2281 | |
2282 | struct mcx_rx { |
2283 | struct mcx_softc *rx_softc; |
2284 | struct ifiqueue *rx_ifiq; |
2285 | |
2286 | int rx_rqn; |
2287 | struct mcx_dmamem rx_rq_mem; |
2288 | struct mcx_slot *rx_slots; |
2289 | bus_addr_t rx_doorbell; |
2290 | |
2291 | uint32_t rx_prod; |
2292 | struct timeout rx_refill; |
2293 | struct if_rxring rx_rxr; |
2294 | } __aligned(64)__attribute__((__aligned__(64))); |
2295 | |
2296 | struct mcx_tx { |
2297 | struct mcx_softc *tx_softc; |
2298 | struct ifqueue *tx_ifq; |
2299 | |
2300 | int tx_uar; |
2301 | int tx_sqn; |
2302 | struct mcx_dmamem tx_sq_mem; |
2303 | struct mcx_slot *tx_slots; |
2304 | bus_addr_t tx_doorbell; |
2305 | int tx_bf_offset; |
2306 | |
2307 | uint32_t tx_cons; |
2308 | uint32_t tx_prod; |
2309 | } __aligned(64)__attribute__((__aligned__(64))); |
2310 | |
2311 | struct mcx_queues { |
2312 | char q_name[16]; |
2313 | void *q_ihc; |
2314 | struct mcx_softc *q_sc; |
2315 | int q_uar; |
2316 | int q_index; |
2317 | struct mcx_rx q_rx; |
2318 | struct mcx_tx q_tx; |
2319 | struct mcx_cq q_cq; |
2320 | struct mcx_eq q_eq; |
2321 | #if NKSTAT0 > 0 |
2322 | struct kstat *q_kstat; |
2323 | #endif |
2324 | }; |
2325 | |
2326 | struct mcx_flow_group { |
2327 | int g_id; |
2328 | int g_table; |
2329 | int g_start; |
2330 | int g_size; |
2331 | }; |
2332 | |
2333 | #define MCX_FLOW_GROUP_PROMISC0 0 |
2334 | #define MCX_FLOW_GROUP_ALLMULTI1 1 |
2335 | #define MCX_FLOW_GROUP_MAC2 2 |
2336 | #define MCX_FLOW_GROUP_RSS_L43 3 |
2337 | #define MCX_FLOW_GROUP_RSS_L34 4 |
2338 | #define MCX_FLOW_GROUP_RSS_NONE5 5 |
2339 | #define MCX_NUM_FLOW_GROUPS6 6 |
2340 | |
2341 | #define MCX_HASH_SEL_L3(1 << 0) | (1 << 1) MCX_TIR_CTX_HASH_SEL_SRC_IP(1 << 0) | \ |
2342 | MCX_TIR_CTX_HASH_SEL_DST_IP(1 << 1) |
2343 | #define MCX_HASH_SEL_L4(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) MCX_HASH_SEL_L3(1 << 0) | (1 << 1) | MCX_TIR_CTX_HASH_SEL_SPORT(1 << 2) | \ |
2344 | MCX_TIR_CTX_HASH_SEL_DPORT(1 << 3) |
2345 | |
2346 | #define MCX_RSS_HASH_SEL_V4_TCP(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (0 << 30) | (0 << 31) MCX_HASH_SEL_L4(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | MCX_TIR_CTX_HASH_SEL_TCP(0 << 30) |\ |
2347 | MCX_TIR_CTX_HASH_SEL_IPV4(0 << 31) |
2348 | #define MCX_RSS_HASH_SEL_V6_TCP(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (0 << 30) | (1 << 31) MCX_HASH_SEL_L4(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | MCX_TIR_CTX_HASH_SEL_TCP(0 << 30) | \ |
2349 | MCX_TIR_CTX_HASH_SEL_IPV6(1 << 31) |
2350 | #define MCX_RSS_HASH_SEL_V4_UDP(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 30) | (0 << 31) MCX_HASH_SEL_L4(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | MCX_TIR_CTX_HASH_SEL_UDP(1 << 30) | \ |
2351 | MCX_TIR_CTX_HASH_SEL_IPV4(0 << 31) |
2352 | #define MCX_RSS_HASH_SEL_V6_UDP(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 30) | (1 << 31) MCX_HASH_SEL_L4(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | MCX_TIR_CTX_HASH_SEL_UDP(1 << 30) | \ |
2353 | MCX_TIR_CTX_HASH_SEL_IPV6(1 << 31) |
2354 | #define MCX_RSS_HASH_SEL_V4(1 << 0) | (1 << 1) | (0 << 31) MCX_HASH_SEL_L3(1 << 0) | (1 << 1) | MCX_TIR_CTX_HASH_SEL_IPV4(0 << 31) |
2355 | #define MCX_RSS_HASH_SEL_V6(1 << 0) | (1 << 1) | (1 << 31) MCX_HASH_SEL_L3(1 << 0) | (1 << 1) | MCX_TIR_CTX_HASH_SEL_IPV6(1 << 31) |
2356 | |
2357 | /* |
2358 | * There are a few different pieces involved in configuring RSS. |
2359 | * A Receive Queue Table (RQT) is the indirection table that maps packets to |
2360 | * different rx queues based on a hash value. We only create one, because |
2361 | * we want to scatter any traffic we can apply RSS to across all our rx |
2362 | * queues. Anything else will only be delivered to the first rx queue, |
2363 | * which doesn't require an RQT. |
2364 | * |
2365 | * A Transport Interface Receive (TIR) delivers packets to either a single rx |
2366 | * queue or an RQT, and in the latter case, specifies the set of fields |
2367 | * hashed, the hash function, and the hash key. We need one of these for each |
2368 | * type of RSS traffic - v4 TCP, v6 TCP, v4 UDP, v6 UDP, other v4, other v6, |
2369 | * and one for non-RSS traffic. |
2370 | * |
2371 | * Flow tables hold flow table entries in sequence. The first entry that |
2372 | * matches a packet is applied, sending the packet to either another flow |
2373 | * table or a TIR. We use one flow table to select packets based on |
2374 | * destination MAC address, and a second to apply RSS. The entries in the |
2375 | * first table send matching packets to the second, and the entries in the |
2376 | * RSS table send packets to RSS TIRs if possible, or the non-RSS TIR. |
2377 | * |
2378 | * The flow table entry that delivers packets to an RSS TIR must include match |
2379 | * criteria that ensure packets delivered to the TIR include all the fields |
2380 | * that the TIR hashes on - so for a v4 TCP TIR, the flow table entry must |
2381 | * only accept v4 TCP packets. Accordingly, we need flow table entries for |
2382 | * each TIR. |
2383 | * |
2384 | * All of this is a lot more flexible than we need, and we can describe most |
2385 | * of the stuff we need with a simple array. |
2386 | * |
2387 | * An RSS config creates a TIR with hashing enabled on a set of fields, |
2388 | * pointing to either the first rx queue or the RQT containing all the rx |
2389 | * queues, and a flow table entry that matches on an ether type and |
2390 | * optionally an ip proto, that delivers packets to the TIR. |
2391 | */ |
2392 | static struct mcx_rss_rule { |
2393 | int hash_sel; |
2394 | int flow_group; |
2395 | int ethertype; |
2396 | int ip_proto; |
2397 | } mcx_rss_config[] = { |
2398 | /* udp and tcp for v4/v6 */ |
2399 | { MCX_RSS_HASH_SEL_V4_TCP(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (0 << 30) | (0 << 31), MCX_FLOW_GROUP_RSS_L43, |
2400 | ETHERTYPE_IP0x0800, IPPROTO_TCP6 }, |
2401 | { MCX_RSS_HASH_SEL_V6_TCP(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (0 << 30) | (1 << 31), MCX_FLOW_GROUP_RSS_L43, |
2402 | ETHERTYPE_IPV60x86DD, IPPROTO_TCP6 }, |
2403 | { MCX_RSS_HASH_SEL_V4_UDP(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 30) | (0 << 31), MCX_FLOW_GROUP_RSS_L43, |
2404 | ETHERTYPE_IP0x0800, IPPROTO_UDP17 }, |
2405 | { MCX_RSS_HASH_SEL_V6_UDP(1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 30) | (1 << 31), MCX_FLOW_GROUP_RSS_L43, |
2406 | ETHERTYPE_IPV60x86DD, IPPROTO_UDP17 }, |
2407 | |
2408 | /* other v4/v6 */ |
2409 | { MCX_RSS_HASH_SEL_V4(1 << 0) | (1 << 1) | (0 << 31), MCX_FLOW_GROUP_RSS_L34, |
2410 | ETHERTYPE_IP0x0800, 0 }, |
2411 | { MCX_RSS_HASH_SEL_V6(1 << 0) | (1 << 1) | (1 << 31), MCX_FLOW_GROUP_RSS_L34, |
2412 | ETHERTYPE_IPV60x86DD, 0 }, |
2413 | |
2414 | /* non v4/v6 */ |
2415 | { 0, MCX_FLOW_GROUP_RSS_NONE5, 0, 0 } |
2416 | }; |
2417 | |
2418 | struct mcx_softc { |
2419 | struct device sc_dev; |
2420 | struct arpcom sc_ac; |
2421 | struct ifmedia sc_media; |
2422 | uint64_t sc_media_status; |
2423 | uint64_t sc_media_active; |
2424 | |
2425 | pci_chipset_tag_t sc_pc; |
2426 | pci_intr_handle_t sc_ih; |
2427 | void *sc_ihc; |
2428 | pcitag_t sc_tag; |
2429 | |
2430 | bus_dma_tag_t sc_dmat; |
2431 | bus_space_tag_t sc_memt; |
2432 | bus_space_handle_t sc_memh; |
2433 | bus_size_t sc_mems; |
2434 | |
2435 | struct mcx_dmamem sc_cmdq_mem; |
2436 | unsigned int sc_cmdq_mask; |
2437 | unsigned int sc_cmdq_size; |
2438 | |
2439 | unsigned int sc_cmdq_token; |
2440 | |
2441 | struct mcx_hwmem sc_boot_pages; |
2442 | struct mcx_hwmem sc_init_pages; |
2443 | struct mcx_hwmem sc_regular_pages; |
2444 | |
2445 | int sc_uar; |
2446 | int sc_pd; |
2447 | int sc_tdomain; |
2448 | uint32_t sc_lkey; |
2449 | int sc_tis; |
2450 | int sc_tir[nitems(mcx_rss_config)(sizeof((mcx_rss_config)) / sizeof((mcx_rss_config)[0]))]; |
2451 | int sc_rqt; |
2452 | |
2453 | struct mcx_dmamem sc_doorbell_mem; |
2454 | |
2455 | struct mcx_eq sc_admin_eq; |
2456 | struct mcx_eq sc_queue_eq; |
2457 | |
2458 | int sc_hardmtu; |
2459 | int sc_rxbufsz; |
2460 | |
2461 | int sc_bf_size; |
2462 | int sc_max_rqt_size; |
2463 | |
2464 | struct task sc_port_change; |
2465 | |
2466 | int sc_mac_flow_table_id; |
2467 | int sc_rss_flow_table_id; |
2468 | struct mcx_flow_group sc_flow_group[MCX_NUM_FLOW_GROUPS6]; |
2469 | int sc_promisc_flow_enabled; |
2470 | int sc_allmulti_flow_enabled; |
2471 | int sc_mcast_flow_base; |
2472 | int sc_extra_mcast; |
2473 | uint8_t sc_mcast_flows[MCX_NUM_MCAST_FLOWS((1 << 5) - 4)][ETHER_ADDR_LEN6]; |
2474 | |
2475 | struct mcx_calibration sc_calibration[2]; |
2476 | unsigned int sc_calibration_gen; |
2477 | struct timeout sc_calibrate; |
2478 | uint32_t sc_mhz; |
2479 | uint32_t sc_khz; |
2480 | |
2481 | struct intrmap *sc_intrmap; |
2482 | struct mcx_queues *sc_queues; |
2483 | |
2484 | int sc_mcam_reg; |
2485 | |
2486 | #if NKSTAT0 > 0 |
2487 | struct kstat *sc_kstat_ieee8023; |
2488 | struct kstat *sc_kstat_rfc2863; |
2489 | struct kstat *sc_kstat_rfc2819; |
2490 | struct kstat *sc_kstat_rfc3635; |
2491 | unsigned int sc_kstat_mtmp_count; |
2492 | struct kstat **sc_kstat_mtmp; |
2493 | #endif |
2494 | |
2495 | struct timecounter sc_timecounter; |
2496 | }; |
2497 | #define DEVNAME(_sc)((_sc)->sc_dev.dv_xname) ((_sc)->sc_dev.dv_xname) |
2498 | |
2499 | static int mcx_match(struct device *, void *, void *); |
2500 | static void mcx_attach(struct device *, struct device *, void *); |
2501 | |
2502 | #if NKSTAT0 > 0 |
2503 | static void mcx_kstat_attach(struct mcx_softc *); |
2504 | #endif |
2505 | |
2506 | static void mcx_timecounter_attach(struct mcx_softc *); |
2507 | |
2508 | static int mcx_version(struct mcx_softc *); |
2509 | static int mcx_init_wait(struct mcx_softc *); |
2510 | static int mcx_enable_hca(struct mcx_softc *); |
2511 | static int mcx_teardown_hca(struct mcx_softc *, uint16_t); |
2512 | static int mcx_access_hca_reg(struct mcx_softc *, uint16_t, int, void *, |
2513 | int); |
2514 | static int mcx_issi(struct mcx_softc *); |
2515 | static int mcx_pages(struct mcx_softc *, struct mcx_hwmem *, uint16_t); |
2516 | static int mcx_hca_max_caps(struct mcx_softc *); |
2517 | static int mcx_hca_set_caps(struct mcx_softc *); |
2518 | static int mcx_init_hca(struct mcx_softc *); |
2519 | static int mcx_set_driver_version(struct mcx_softc *); |
2520 | static int mcx_iff(struct mcx_softc *); |
2521 | static int mcx_alloc_uar(struct mcx_softc *, int *); |
2522 | static int mcx_alloc_pd(struct mcx_softc *); |
2523 | static int mcx_alloc_tdomain(struct mcx_softc *); |
2524 | static int mcx_create_eq(struct mcx_softc *, struct mcx_eq *, int, |
2525 | uint64_t, int); |
2526 | static int mcx_query_nic_vport_context(struct mcx_softc *); |
2527 | static int mcx_query_special_contexts(struct mcx_softc *); |
2528 | static int mcx_set_port_mtu(struct mcx_softc *, int); |
2529 | static int mcx_create_cq(struct mcx_softc *, struct mcx_cq *, int, int, |
2530 | int); |
2531 | static int mcx_destroy_cq(struct mcx_softc *, struct mcx_cq *); |
2532 | static int mcx_create_sq(struct mcx_softc *, struct mcx_tx *, int, int, |
2533 | int); |
2534 | static int mcx_destroy_sq(struct mcx_softc *, struct mcx_tx *); |
2535 | static int mcx_ready_sq(struct mcx_softc *, struct mcx_tx *); |
2536 | static int mcx_create_rq(struct mcx_softc *, struct mcx_rx *, int, int); |
2537 | static int mcx_destroy_rq(struct mcx_softc *, struct mcx_rx *); |
2538 | static int mcx_ready_rq(struct mcx_softc *, struct mcx_rx *); |
2539 | static int mcx_create_tir_direct(struct mcx_softc *, struct mcx_rx *, |
2540 | int *); |
2541 | static int mcx_create_tir_indirect(struct mcx_softc *, int, uint32_t, |
2542 | int *); |
2543 | static int mcx_destroy_tir(struct mcx_softc *, int); |
2544 | static int mcx_create_tis(struct mcx_softc *, int *); |
2545 | static int mcx_destroy_tis(struct mcx_softc *, int); |
2546 | static int mcx_create_rqt(struct mcx_softc *, int, int *, int *); |
2547 | static int mcx_destroy_rqt(struct mcx_softc *, int); |
2548 | static int mcx_create_flow_table(struct mcx_softc *, int, int, int *); |
2549 | static int mcx_set_flow_table_root(struct mcx_softc *, int); |
2550 | static int mcx_destroy_flow_table(struct mcx_softc *, int); |
2551 | static int mcx_create_flow_group(struct mcx_softc *, int, int, int, |
2552 | int, int, struct mcx_flow_match *); |
2553 | static int mcx_destroy_flow_group(struct mcx_softc *, int); |
2554 | static int mcx_set_flow_table_entry_mac(struct mcx_softc *, int, int, |
2555 | uint8_t *, uint32_t); |
2556 | static int mcx_set_flow_table_entry_proto(struct mcx_softc *, int, int, |
2557 | int, int, uint32_t); |
2558 | static int mcx_delete_flow_table_entry(struct mcx_softc *, int, int); |
2559 | |
2560 | #if NKSTAT0 > 0 |
2561 | static int mcx_query_rq(struct mcx_softc *, struct mcx_rx *, struct mcx_rq_ctx *); |
2562 | static int mcx_query_sq(struct mcx_softc *, struct mcx_tx *, struct mcx_sq_ctx *); |
2563 | static int mcx_query_cq(struct mcx_softc *, struct mcx_cq *, struct mcx_cq_ctx *); |
2564 | static int mcx_query_eq(struct mcx_softc *, struct mcx_eq *, struct mcx_eq_ctx *); |
2565 | #endif |
2566 | |
2567 | #if 0 |
2568 | static int mcx_dump_flow_table(struct mcx_softc *, int); |
2569 | static int mcx_dump_flow_table_entry(struct mcx_softc *, int, int); |
2570 | static int mcx_dump_flow_group(struct mcx_softc *, int); |
2571 | #endif |
2572 | |
2573 | |
2574 | /* |
2575 | static void mcx_cmdq_dump(const struct mcx_cmdq_entry *); |
2576 | static void mcx_cmdq_mbox_dump(struct mcx_dmamem *, int); |
2577 | */ |
2578 | static void mcx_refill(void *); |
2579 | static int mcx_process_rx(struct mcx_softc *, struct mcx_rx *, |
2580 | struct mcx_cq_entry *, struct mbuf_list *, |
2581 | const struct mcx_calibration *); |
2582 | static int mcx_process_txeof(struct mcx_softc *, struct mcx_tx *, |
2583 | struct mcx_cq_entry *); |
2584 | static void mcx_process_cq(struct mcx_softc *, struct mcx_queues *, |
2585 | struct mcx_cq *); |
2586 | |
2587 | static void mcx_arm_cq(struct mcx_softc *, struct mcx_cq *, int); |
2588 | static void mcx_arm_eq(struct mcx_softc *, struct mcx_eq *, int); |
2589 | static int mcx_admin_intr(void *); |
2590 | static int mcx_cq_intr(void *); |
2591 | |
2592 | static int mcx_up(struct mcx_softc *); |
2593 | static void mcx_down(struct mcx_softc *); |
2594 | static int mcx_ioctl(struct ifnet *, u_long, caddr_t); |
2595 | static int mcx_rxrinfo(struct mcx_softc *, struct if_rxrinfo *); |
2596 | static void mcx_start(struct ifqueue *); |
2597 | static void mcx_watchdog(struct ifnet *); |
2598 | static void mcx_media_add_types(struct mcx_softc *); |
2599 | static void mcx_media_status(struct ifnet *, struct ifmediareq *); |
2600 | static int mcx_media_change(struct ifnet *); |
2601 | static int mcx_get_sffpage(struct ifnet *, struct if_sffpage *); |
2602 | static void mcx_port_change(void *); |
2603 | |
2604 | static void mcx_calibrate_first(struct mcx_softc *); |
2605 | static void mcx_calibrate(void *); |
2606 | |
2607 | static inline uint32_t |
2608 | mcx_rd(struct mcx_softc *, bus_size_t); |
2609 | static inline void |
2610 | mcx_wr(struct mcx_softc *, bus_size_t, uint32_t); |
2611 | static inline void |
2612 | mcx_bar(struct mcx_softc *, bus_size_t, bus_size_t, int); |
2613 | |
2614 | static uint64_t mcx_timer(struct mcx_softc *); |
2615 | |
2616 | static int mcx_dmamem_alloc(struct mcx_softc *, struct mcx_dmamem *, |
2617 | bus_size_t, u_int align); |
2618 | static void mcx_dmamem_zero(struct mcx_dmamem *); |
2619 | static void mcx_dmamem_free(struct mcx_softc *, struct mcx_dmamem *); |
2620 | |
2621 | static int mcx_hwmem_alloc(struct mcx_softc *, struct mcx_hwmem *, |
2622 | unsigned int); |
2623 | static void mcx_hwmem_free(struct mcx_softc *, struct mcx_hwmem *); |
2624 | |
2625 | struct cfdriver mcx_cd = { |
2626 | NULL((void *)0), |
2627 | "mcx", |
2628 | DV_IFNET, |
2629 | }; |
2630 | |
2631 | struct cfattach mcx_ca = { |
2632 | sizeof(struct mcx_softc), |
2633 | mcx_match, |
2634 | mcx_attach, |
2635 | }; |
2636 | |
2637 | static const struct pci_matchid mcx_devices[] = { |
2638 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT277000x1013 }, |
2639 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT27700VF0x1014 }, |
2640 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT277100x1015 }, |
2641 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT27710VF0x1016 }, |
2642 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT278000x1017 }, |
2643 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT27800VF0x1018 }, |
2644 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT288000x1019 }, |
2645 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT28800VF0x101a }, |
2646 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT289080x101b }, |
2647 | { PCI_VENDOR_MELLANOX0x15b3, PCI_PRODUCT_MELLANOX_MT28920x101d }, |
2648 | }; |
2649 | |
2650 | struct mcx_eth_proto_capability { |
2651 | uint64_t cap_media; |
2652 | uint64_t cap_baudrate; |
2653 | }; |
2654 | |
2655 | static const struct mcx_eth_proto_capability mcx_eth_cap_map[] = { |
2656 | [MCX_ETHER_CAP_SGMII0] = { IFM_1000_SGMII36, IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2657 | [MCX_ETHER_CAP_1000_KX1] = { IFM_1000_KX28, IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2658 | [MCX_ETHER_CAP_10G_CX42] = { IFM_10G_CX420, IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2659 | [MCX_ETHER_CAP_10G_KX43] = { IFM_10G_KX429, IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2660 | [MCX_ETHER_CAP_10G_KR4] = { IFM_10G_KR30, IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2661 | [MCX_ETHER_CAP_40G_CR46] = { IFM_40G_CR425, IF_Gbps(40)((((((40) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2662 | [MCX_ETHER_CAP_40G_KR47] = { IFM_40G_KR440, IF_Gbps(40)((((((40) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2663 | [MCX_ETHER_CAP_10G_CR12] = { IFM_10G_SFP_CU23, IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2664 | [MCX_ETHER_CAP_10G_SR13] = { IFM_10G_SR19, IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2665 | [MCX_ETHER_CAP_10G_LR14] = { IFM_10G_LR18, IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2666 | [MCX_ETHER_CAP_40G_SR415] = { IFM_40G_SR426, IF_Gbps(40)((((((40) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2667 | [MCX_ETHER_CAP_40G_LR416] = { IFM_40G_LR427, IF_Gbps(40)((((((40) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2668 | [MCX_ETHER_CAP_50G_SR218] = { 0 /*IFM_50G_SR2*/, IF_Gbps(50)((((((50) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2669 | [MCX_ETHER_CAP_100G_CR420] = { IFM_100G_CR442, IF_Gbps(100)((((((100) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2670 | [MCX_ETHER_CAP_100G_SR421] = { IFM_100G_SR443, IF_Gbps(100)((((((100) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2671 | [MCX_ETHER_CAP_100G_KR422] = { IFM_100G_KR444, IF_Gbps(100)((((((100) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2672 | [MCX_ETHER_CAP_25G_CR27] = { IFM_25G_CR47, IF_Gbps(25)((((((25) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2673 | [MCX_ETHER_CAP_25G_KR28] = { IFM_25G_KR48, IF_Gbps(25)((((((25) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2674 | [MCX_ETHER_CAP_25G_SR29] = { IFM_25G_SR49, IF_Gbps(25)((((((25) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2675 | [MCX_ETHER_CAP_50G_CR230] = { IFM_50G_CR250, IF_Gbps(50)((((((50) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2676 | [MCX_ETHER_CAP_50G_KR231] = { IFM_50G_KR251, IF_Gbps(50)((((((50) * 1000ULL) * 1000ULL) * 1000ULL))) }, |
2677 | }; |
2678 | |
2679 | static int |
2680 | mcx_get_id(uint32_t val) |
2681 | { |
2682 | return betoh32(val)(__uint32_t)(__builtin_constant_p(val) ? (__uint32_t)(((__uint32_t )(val) & 0xff) << 24 | ((__uint32_t)(val) & 0xff00 ) << 8 | ((__uint32_t)(val) & 0xff0000) >> 8 | ((__uint32_t)(val) & 0xff000000) >> 24) : __swap32md (val)) & 0x00ffffff; |
2683 | } |
2684 | |
2685 | static int |
2686 | mcx_match(struct device *parent, void *match, void *aux) |
2687 | { |
2688 | return (pci_matchbyid(aux, mcx_devices, nitems(mcx_devices)(sizeof((mcx_devices)) / sizeof((mcx_devices)[0])))); |
2689 | } |
2690 | |
2691 | void |
2692 | mcx_attach(struct device *parent, struct device *self, void *aux) |
2693 | { |
2694 | struct mcx_softc *sc = (struct mcx_softc *)self; |
2695 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
2696 | struct pci_attach_args *pa = aux; |
2697 | pcireg_t memtype; |
2698 | uint32_t r; |
2699 | unsigned int cq_stride; |
2700 | unsigned int cq_size; |
2701 | const char *intrstr; |
2702 | int i, msix; |
2703 | |
2704 | sc->sc_pc = pa->pa_pc; |
2705 | sc->sc_tag = pa->pa_tag; |
2706 | sc->sc_dmat = pa->pa_dmat; |
2707 | |
2708 | /* Map the PCI memory space */ |
2709 | memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MCX_HCA_BAR0x10); |
2710 | if (pci_mapreg_map(pa, MCX_HCA_BAR0x10, memtype, |
2711 | BUS_SPACE_MAP_PREFETCHABLE0x0008, &sc->sc_memt, &sc->sc_memh, |
2712 | NULL((void *)0), &sc->sc_mems, 0)) { |
2713 | printf(": unable to map register memory\n"); |
2714 | return; |
2715 | } |
2716 | |
2717 | if (mcx_version(sc) != 0) { |
2718 | /* error printed by mcx_version */ |
2719 | goto unmap; |
2720 | } |
2721 | |
2722 | r = mcx_rd(sc, MCX_CMDQ_ADDR_LO0x0014); |
2723 | cq_stride = 1 << MCX_CMDQ_LOG_STRIDE(r)((r) >> 0 & 0xf); /* size of the entries */ |
2724 | cq_size = 1 << MCX_CMDQ_LOG_SIZE(r)((r) >> 4 & 0xf); /* number of entries */ |
2725 | if (cq_size > MCX_MAX_CQE32) { |
2726 | printf(", command queue size overflow %u\n", cq_size); |
2727 | goto unmap; |
2728 | } |
2729 | if (cq_stride < sizeof(struct mcx_cmdq_entry)) { |
2730 | printf(", command queue entry size underflow %u\n", cq_stride); |
2731 | goto unmap; |
2732 | } |
2733 | if (cq_stride * cq_size > MCX_PAGE_SIZE(1 << 12)) { |
2734 | printf(", command queue page overflow\n"); |
2735 | goto unmap; |
2736 | } |
2737 | |
2738 | if (mcx_dmamem_alloc(sc, &sc->sc_doorbell_mem, MCX_DOORBELL_AREA_SIZE(1 << 12), |
2739 | MCX_PAGE_SIZE(1 << 12)) != 0) { |
2740 | printf(", unable to allocate doorbell memory\n"); |
2741 | goto unmap; |
2742 | } |
2743 | |
2744 | if (mcx_dmamem_alloc(sc, &sc->sc_cmdq_mem, MCX_PAGE_SIZE(1 << 12), |
2745 | MCX_PAGE_SIZE(1 << 12)) != 0) { |
2746 | printf(", unable to allocate command queue\n"); |
2747 | goto dbfree; |
2748 | } |
2749 | |
2750 | mcx_wr(sc, MCX_CMDQ_ADDR_HI0x0010, MCX_DMA_DVA(&sc->sc_cmdq_mem)((&sc->sc_cmdq_mem)->mxm_map->dm_segs[0].ds_addr ) >> 32); |
2751 | mcx_bar(sc, MCX_CMDQ_ADDR_HI0x0010, sizeof(uint32_t), |
2752 | BUS_SPACE_BARRIER_WRITE0x02); |
2753 | mcx_wr(sc, MCX_CMDQ_ADDR_LO0x0014, MCX_DMA_DVA(&sc->sc_cmdq_mem)((&sc->sc_cmdq_mem)->mxm_map->dm_segs[0].ds_addr )); |
2754 | mcx_bar(sc, MCX_CMDQ_ADDR_LO0x0014, sizeof(uint32_t), |
2755 | BUS_SPACE_BARRIER_WRITE0x02); |
2756 | |
2757 | if (mcx_init_wait(sc) != 0) { |
2758 | printf(", timeout waiting for init\n"); |
2759 | goto cqfree; |
2760 | } |
2761 | |
2762 | sc->sc_cmdq_mask = cq_size - 1; |
2763 | sc->sc_cmdq_size = cq_stride; |
2764 | |
2765 | if (mcx_enable_hca(sc) != 0) { |
2766 | /* error printed by mcx_enable_hca */ |
2767 | goto cqfree; |
2768 | } |
2769 | |
2770 | if (mcx_issi(sc) != 0) { |
2771 | /* error printed by mcx_issi */ |
2772 | goto teardown; |
2773 | } |
2774 | |
2775 | if (mcx_pages(sc, &sc->sc_boot_pages, |
2776 | htobe16(MCX_CMD_QUERY_PAGES_BOOT)(__uint16_t)(__builtin_constant_p(0x01) ? (__uint16_t)(((__uint16_t )(0x01) & 0xffU) << 8 | ((__uint16_t)(0x01) & 0xff00U ) >> 8) : __swap16md(0x01))) != 0) { |
2777 | /* error printed by mcx_pages */ |
2778 | goto teardown; |
2779 | } |
2780 | |
2781 | if (mcx_hca_max_caps(sc) != 0) { |
2782 | /* error printed by mcx_hca_max_caps */ |
2783 | goto teardown; |
2784 | } |
2785 | |
2786 | if (mcx_hca_set_caps(sc) != 0) { |
2787 | /* error printed by mcx_hca_set_caps */ |
2788 | goto teardown; |
2789 | } |
2790 | |
2791 | if (mcx_pages(sc, &sc->sc_init_pages, |
2792 | htobe16(MCX_CMD_QUERY_PAGES_INIT)(__uint16_t)(__builtin_constant_p(0x02) ? (__uint16_t)(((__uint16_t )(0x02) & 0xffU) << 8 | ((__uint16_t)(0x02) & 0xff00U ) >> 8) : __swap16md(0x02))) != 0) { |
2793 | /* error printed by mcx_pages */ |
2794 | goto teardown; |
2795 | } |
2796 | |
2797 | if (mcx_init_hca(sc) != 0) { |
2798 | /* error printed by mcx_init_hca */ |
2799 | goto teardown; |
2800 | } |
2801 | |
2802 | if (mcx_pages(sc, &sc->sc_regular_pages, |
2803 | htobe16(MCX_CMD_QUERY_PAGES_REGULAR)(__uint16_t)(__builtin_constant_p(0x03) ? (__uint16_t)(((__uint16_t )(0x03) & 0xffU) << 8 | ((__uint16_t)(0x03) & 0xff00U ) >> 8) : __swap16md(0x03))) != 0) { |
2804 | /* error printed by mcx_pages */ |
2805 | goto teardown; |
2806 | } |
2807 | |
2808 | /* apparently not necessary? */ |
2809 | if (mcx_set_driver_version(sc) != 0) { |
2810 | /* error printed by mcx_set_driver_version */ |
2811 | goto teardown; |
2812 | } |
2813 | |
2814 | if (mcx_iff(sc) != 0) { /* modify nic vport context */ |
2815 | /* error printed by mcx_iff? */ |
2816 | goto teardown; |
2817 | } |
2818 | |
2819 | if (mcx_alloc_uar(sc, &sc->sc_uar) != 0) { |
2820 | /* error printed by mcx_alloc_uar */ |
2821 | goto teardown; |
2822 | } |
2823 | |
2824 | if (mcx_alloc_pd(sc) != 0) { |
2825 | /* error printed by mcx_alloc_pd */ |
2826 | goto teardown; |
2827 | } |
2828 | |
2829 | if (mcx_alloc_tdomain(sc) != 0) { |
2830 | /* error printed by mcx_alloc_tdomain */ |
2831 | goto teardown; |
2832 | } |
2833 | |
2834 | msix = pci_intr_msix_count(pa); |
2835 | if (msix < 2) { |
2836 | printf(": not enough msi-x vectors\n"); |
2837 | goto teardown; |
2838 | } |
2839 | |
2840 | /* |
2841 | * PRM makes no mention of msi interrupts, just legacy and msi-x. |
2842 | * mellanox support tells me legacy interrupts are not supported, |
2843 | * so we're stuck with just msi-x. |
2844 | */ |
2845 | if (pci_intr_map_msix(pa, 0, &sc->sc_ih) != 0) { |
2846 | printf(": unable to map interrupt\n"); |
2847 | goto teardown; |
2848 | } |
2849 | intrstr = pci_intr_string(sc->sc_pc, sc->sc_ih); |
2850 | sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih, |
2851 | IPL_NET0x7 | IPL_MPSAFE0x100, mcx_admin_intr, sc, DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
2852 | if (sc->sc_ihc == NULL((void *)0)) { |
2853 | printf(": unable to establish interrupt"); |
2854 | if (intrstr != NULL((void *)0)) |
2855 | printf(" at %s", intrstr); |
2856 | printf("\n"); |
2857 | goto teardown; |
2858 | } |
2859 | |
2860 | if (mcx_create_eq(sc, &sc->sc_admin_eq, sc->sc_uar, |
2861 | (1ull << MCX_EVENT_TYPE_INTERNAL_ERROR0x08) | |
2862 | (1ull << MCX_EVENT_TYPE_PORT_CHANGE0x09) | |
2863 | (1ull << MCX_EVENT_TYPE_CMD_COMPLETION0x0a) | |
2864 | (1ull << MCX_EVENT_TYPE_PAGE_REQUEST0x0b), 0) != 0) { |
2865 | /* error printed by mcx_create_eq */ |
2866 | goto teardown; |
2867 | } |
2868 | |
2869 | if (mcx_query_nic_vport_context(sc) != 0) { |
2870 | /* error printed by mcx_query_nic_vport_context */ |
2871 | goto teardown; |
2872 | } |
2873 | |
2874 | if (mcx_query_special_contexts(sc) != 0) { |
2875 | /* error printed by mcx_query_special_contexts */ |
2876 | goto teardown; |
2877 | } |
2878 | |
2879 | if (mcx_set_port_mtu(sc, MCX_HARDMTU9500) != 0) { |
2880 | /* error printed by mcx_set_port_mtu */ |
2881 | goto teardown; |
2882 | } |
2883 | |
2884 | printf(", %s, address %s\n", intrstr, |
2885 | ether_sprintf(sc->sc_ac.ac_enaddr)); |
2886 | |
2887 | msix--; /* admin ops took one */ |
2888 | sc->sc_intrmap = intrmap_create(&sc->sc_dev, msix, MCX_MAX_QUEUES16, |
2889 | INTRMAP_POWEROF2(1 << 0)); |
2890 | if (sc->sc_intrmap == NULL((void *)0)) { |
2891 | printf("%s: unable to create interrupt map\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
2892 | goto teardown; |
2893 | } |
2894 | sc->sc_queues = mallocarray(intrmap_count(sc->sc_intrmap), |
2895 | sizeof(*sc->sc_queues), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008); |
2896 | if (sc->sc_queues == NULL((void *)0)) { |
2897 | printf("%s: unable to create queues\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
2898 | goto intrunmap; |
2899 | } |
2900 | |
2901 | strlcpy(ifp->if_xname, DEVNAME(sc)((sc)->sc_dev.dv_xname), IFNAMSIZ16); |
2902 | ifp->if_softc = sc; |
2903 | ifp->if_flags = IFF_BROADCAST0x2 | IFF_MULTICAST0x8000 | IFF_SIMPLEX0x800; |
2904 | ifp->if_xflags = IFXF_MPSAFE0x1; |
2905 | ifp->if_ioctl = mcx_ioctl; |
2906 | ifp->if_qstart = mcx_start; |
2907 | ifp->if_watchdog = mcx_watchdog; |
2908 | ifp->if_hardmtu = sc->sc_hardmtu; |
2909 | ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010 | IFCAP_CSUM_IPv40x00000001 | |
2910 | IFCAP_CSUM_UDPv40x00000004 | IFCAP_CSUM_UDPv60x00000100 | IFCAP_CSUM_TCPv40x00000002 | |
2911 | IFCAP_CSUM_TCPv60x00000080; |
2912 | #if NVLAN1 > 0 |
2913 | ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020; |
2914 | #endif |
2915 | ifq_set_maxlen(&ifp->if_snd, 1024)((&ifp->if_snd)->ifq_maxlen = (1024)); |
2916 | |
2917 | ifmedia_init(&sc->sc_media, IFM_IMASK0xff00000000000000ULL, mcx_media_change, |
2918 | mcx_media_status); |
2919 | mcx_media_add_types(sc); |
2920 | ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0)); |
2921 | ifmedia_set(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL); |
2922 | |
2923 | if_attach(ifp); |
2924 | ether_ifattach(ifp); |
2925 | |
2926 | if_attach_iqueues(ifp, intrmap_count(sc->sc_intrmap)); |
2927 | if_attach_queues(ifp, intrmap_count(sc->sc_intrmap)); |
2928 | for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) { |
2929 | struct ifiqueue *ifiq = ifp->if_iqs[i]; |
2930 | struct ifqueue *ifq = ifp->if_ifqs[i]; |
2931 | struct mcx_queues *q = &sc->sc_queues[i]; |
2932 | struct mcx_rx *rx = &q->q_rx; |
2933 | struct mcx_tx *tx = &q->q_tx; |
2934 | pci_intr_handle_t ih; |
2935 | int vec; |
2936 | |
2937 | vec = i + 1; |
2938 | q->q_sc = sc; |
2939 | q->q_index = i; |
2940 | |
2941 | if (mcx_alloc_uar(sc, &q->q_uar) != 0) { |
2942 | printf("%s: unable to alloc uar %d\n", |
2943 | DEVNAME(sc)((sc)->sc_dev.dv_xname), i); |
2944 | goto intrdisestablish; |
2945 | } |
2946 | |
2947 | if (mcx_create_eq(sc, &q->q_eq, q->q_uar, 0, vec) != 0) { |
2948 | printf("%s: unable to create event queue %d\n", |
2949 | DEVNAME(sc)((sc)->sc_dev.dv_xname), i); |
2950 | goto intrdisestablish; |
2951 | } |
2952 | |
2953 | rx->rx_softc = sc; |
2954 | rx->rx_ifiq = ifiq; |
2955 | timeout_set(&rx->rx_refill, mcx_refill, rx); |
2956 | ifiq->ifiq_softc_ifiq_ptr._ifiq_softc = rx; |
2957 | |
2958 | tx->tx_softc = sc; |
2959 | tx->tx_ifq = ifq; |
2960 | ifq->ifq_softc_ifq_ptr._ifq_softc = tx; |
2961 | |
2962 | if (pci_intr_map_msix(pa, vec, &ih) != 0) { |
2963 | printf("%s: unable to map queue interrupt %d\n", |
2964 | DEVNAME(sc)((sc)->sc_dev.dv_xname), i); |
2965 | goto intrdisestablish; |
2966 | } |
2967 | snprintf(q->q_name, sizeof(q->q_name), "%s:%d", |
2968 | DEVNAME(sc)((sc)->sc_dev.dv_xname), i); |
2969 | q->q_ihc = pci_intr_establish_cpu(sc->sc_pc, ih, |
2970 | IPL_NET0x7 | IPL_MPSAFE0x100, intrmap_cpu(sc->sc_intrmap, i), |
2971 | mcx_cq_intr, q, q->q_name); |
2972 | if (q->q_ihc == NULL((void *)0)) { |
2973 | printf("%s: unable to establish interrupt %d\n", |
2974 | DEVNAME(sc)((sc)->sc_dev.dv_xname), i); |
2975 | goto intrdisestablish; |
2976 | } |
2977 | } |
2978 | |
2979 | timeout_set(&sc->sc_calibrate, mcx_calibrate, sc); |
2980 | |
2981 | task_set(&sc->sc_port_change, mcx_port_change, sc); |
2982 | mcx_port_change(sc); |
2983 | |
2984 | sc->sc_mac_flow_table_id = -1; |
2985 | sc->sc_rss_flow_table_id = -1; |
2986 | sc->sc_rqt = -1; |
2987 | for (i = 0; i < MCX_NUM_FLOW_GROUPS6; i++) { |
2988 | struct mcx_flow_group *mfg = &sc->sc_flow_group[i]; |
2989 | mfg->g_id = -1; |
2990 | mfg->g_table = -1; |
2991 | mfg->g_size = 0; |
2992 | mfg->g_start = 0; |
2993 | } |
2994 | sc->sc_extra_mcast = 0; |
2995 | memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows))__builtin_memset((sc->sc_mcast_flows), (0), (sizeof(sc-> sc_mcast_flows))); |
2996 | |
2997 | #if NKSTAT0 > 0 |
2998 | mcx_kstat_attach(sc); |
2999 | #endif |
3000 | mcx_timecounter_attach(sc); |
3001 | return; |
3002 | |
3003 | intrdisestablish: |
3004 | for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) { |
3005 | struct mcx_queues *q = &sc->sc_queues[i]; |
3006 | if (q->q_ihc == NULL((void *)0)) |
3007 | continue; |
3008 | pci_intr_disestablish(sc->sc_pc, q->q_ihc); |
3009 | q->q_ihc = NULL((void *)0); |
3010 | } |
3011 | free(sc->sc_queues, M_DEVBUF2, |
3012 | intrmap_count(sc->sc_intrmap) * sizeof(*sc->sc_queues)); |
3013 | intrunmap: |
3014 | intrmap_destroy(sc->sc_intrmap); |
3015 | sc->sc_intrmap = NULL((void *)0); |
3016 | teardown: |
3017 | mcx_teardown_hca(sc, htobe16(MCX_CMD_TEARDOWN_HCA_GRACEFUL)(__uint16_t)(__builtin_constant_p(0x0) ? (__uint16_t)(((__uint16_t )(0x0) & 0xffU) << 8 | ((__uint16_t)(0x0) & 0xff00U ) >> 8) : __swap16md(0x0))); |
3018 | /* error printed by mcx_teardown_hca, and we're already unwinding */ |
3019 | cqfree: |
3020 | mcx_wr(sc, MCX_CMDQ_ADDR_HI0x0010, MCX_DMA_DVA(&sc->sc_cmdq_mem)((&sc->sc_cmdq_mem)->mxm_map->dm_segs[0].ds_addr ) >> 32); |
3021 | mcx_bar(sc, MCX_CMDQ_ADDR_HI0x0010, sizeof(uint64_t), |
3022 | BUS_SPACE_BARRIER_WRITE0x02); |
3023 | mcx_wr(sc, MCX_CMDQ_ADDR_LO0x0014, MCX_DMA_DVA(&sc->sc_cmdq_mem)((&sc->sc_cmdq_mem)->mxm_map->dm_segs[0].ds_addr ) | |
3024 | MCX_CMDQ_INTERFACE_DISABLED(0x1 << 8)); |
3025 | mcx_bar(sc, MCX_CMDQ_ADDR_LO0x0014, sizeof(uint64_t), |
3026 | BUS_SPACE_BARRIER_WRITE0x02); |
3027 | |
3028 | mcx_wr(sc, MCX_CMDQ_ADDR_HI0x0010, 0); |
3029 | mcx_bar(sc, MCX_CMDQ_ADDR_HI0x0010, sizeof(uint64_t), |
3030 | BUS_SPACE_BARRIER_WRITE0x02); |
3031 | mcx_wr(sc, MCX_CMDQ_ADDR_LO0x0014, MCX_CMDQ_INTERFACE_DISABLED(0x1 << 8)); |
3032 | |
3033 | mcx_dmamem_free(sc, &sc->sc_cmdq_mem); |
3034 | dbfree: |
3035 | mcx_dmamem_free(sc, &sc->sc_doorbell_mem); |
3036 | unmap: |
3037 | bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); |
3038 | sc->sc_mems = 0; |
3039 | } |
3040 | |
3041 | static int |
3042 | mcx_version(struct mcx_softc *sc) |
3043 | { |
3044 | uint32_t fw0, fw1; |
3045 | uint16_t cmdif; |
3046 | |
3047 | fw0 = mcx_rd(sc, MCX_FW_VER0x0000); |
3048 | fw1 = mcx_rd(sc, MCX_CMDIF_FW_SUBVER0x0004); |
3049 | |
3050 | printf(": FW %u.%u.%04u", MCX_FW_VER_MAJOR(fw0)((fw0) & 0xffff), |
3051 | MCX_FW_VER_MINOR(fw0)((fw0) >> 16), MCX_FW_VER_SUBMINOR(fw1)((fw1) & 0xffff)); |
3052 | |
3053 | cmdif = MCX_CMDIF(fw1)((fw1) >> 16); |
3054 | if (cmdif != MCX_CMD_IF_SUPPORTED5) { |
3055 | printf(", unsupported command interface %u\n", cmdif); |
3056 | return (-1); |
3057 | } |
3058 | |
3059 | return (0); |
3060 | } |
3061 | |
3062 | static int |
3063 | mcx_init_wait(struct mcx_softc *sc) |
3064 | { |
3065 | unsigned int i; |
3066 | uint32_t r; |
3067 | |
3068 | for (i = 0; i < 2000; i++) { |
3069 | r = mcx_rd(sc, MCX_STATE0x01fc); |
3070 | if ((r & MCX_STATE_MASK(1 << 31)) == MCX_STATE_READY(0 << 31)) |
3071 | return (0); |
3072 | |
3073 | delay(1000)(*delay_func)(1000); |
3074 | mcx_bar(sc, MCX_STATE0x01fc, sizeof(uint32_t), |
3075 | BUS_SPACE_BARRIER_READ0x01); |
3076 | } |
3077 | |
3078 | return (-1); |
3079 | } |
3080 | |
3081 | static uint8_t |
3082 | mcx_cmdq_poll(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, |
3083 | unsigned int msec) |
3084 | { |
3085 | unsigned int i; |
3086 | |
3087 | for (i = 0; i < msec; i++) { |
3088 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_cmdq_mem)->mxm_map)), (0), (((&sc->sc_cmdq_mem )->mxm_size)), ((0x02|0x08))) |
3089 | 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_cmdq_mem)->mxm_map)), (0), (((&sc->sc_cmdq_mem )->mxm_size)), ((0x02|0x08))); |
3090 | |
3091 | if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK0x1) == |
3092 | MCX_CQ_STATUS_OWN_SW0x0) |
3093 | return (0); |
3094 | |
3095 | delay(1000)(*delay_func)(1000); |
3096 | } |
3097 | |
3098 | return (ETIMEDOUT60); |
3099 | } |
3100 | |
3101 | static uint32_t |
3102 | mcx_mix_u64(uint32_t xor, uint64_t u64) |
3103 | { |
3104 | xor ^= u64 >> 32; |
3105 | xor ^= u64; |
3106 | |
3107 | return (xor); |
3108 | } |
3109 | |
3110 | static uint32_t |
3111 | mcx_mix_u32(uint32_t xor, uint32_t u32) |
3112 | { |
3113 | xor ^= u32; |
3114 | |
3115 | return (xor); |
3116 | } |
3117 | |
3118 | static uint32_t |
3119 | mcx_mix_u8(uint32_t xor, uint8_t u8) |
3120 | { |
3121 | xor ^= u8; |
3122 | |
3123 | return (xor); |
3124 | } |
3125 | |
3126 | static uint8_t |
3127 | mcx_mix_done(uint32_t xor) |
3128 | { |
3129 | xor ^= xor >> 16; |
3130 | xor ^= xor >> 8; |
3131 | |
3132 | return (xor); |
3133 | } |
3134 | |
3135 | static uint8_t |
3136 | mcx_xor(const void *buf, size_t len) |
3137 | { |
3138 | const uint32_t *dwords = buf; |
3139 | uint32_t xor = 0xff; |
3140 | size_t i; |
3141 | |
3142 | len /= sizeof(*dwords); |
3143 | |
3144 | for (i = 0; i < len; i++) |
3145 | xor ^= dwords[i]; |
3146 | |
3147 | return (mcx_mix_done(xor)); |
3148 | } |
3149 | |
3150 | static uint8_t |
3151 | mcx_cmdq_token(struct mcx_softc *sc) |
3152 | { |
3153 | uint8_t token; |
3154 | |
3155 | do { |
3156 | token = ++sc->sc_cmdq_token; |
3157 | } while (token == 0); |
3158 | |
3159 | return (token); |
3160 | } |
3161 | |
3162 | static void |
3163 | mcx_cmdq_init(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, |
3164 | uint32_t ilen, uint32_t olen, uint8_t token) |
3165 | { |
3166 | memset(cqe, 0, sc->sc_cmdq_size)__builtin_memset((cqe), (0), (sc->sc_cmdq_size)); |
3167 | |
3168 | cqe->cq_type = MCX_CMDQ_TYPE_PCIE0x7; |
3169 | htobem32(&cqe->cq_input_length, ilen)(*(__uint32_t *)(&cqe->cq_input_length) = (__uint32_t) (__builtin_constant_p(ilen) ? (__uint32_t)(((__uint32_t)(ilen ) & 0xff) << 24 | ((__uint32_t)(ilen) & 0xff00) << 8 | ((__uint32_t)(ilen) & 0xff0000) >> 8 | ((__uint32_t)(ilen) & 0xff000000) >> 24) : __swap32md (ilen))); |
3170 | htobem32(&cqe->cq_output_length, olen)(*(__uint32_t *)(&cqe->cq_output_length) = (__uint32_t )(__builtin_constant_p(olen) ? (__uint32_t)(((__uint32_t)(olen ) & 0xff) << 24 | ((__uint32_t)(olen) & 0xff00) << 8 | ((__uint32_t)(olen) & 0xff0000) >> 8 | ((__uint32_t)(olen) & 0xff000000) >> 24) : __swap32md (olen))); |
3171 | cqe->cq_token = token; |
3172 | cqe->cq_status = MCX_CQ_STATUS_OWN_HW0x1; |
3173 | } |
3174 | |
3175 | static void |
3176 | mcx_cmdq_sign(struct mcx_cmdq_entry *cqe) |
3177 | { |
3178 | cqe->cq_signature = ~mcx_xor(cqe, sizeof(*cqe)); |
3179 | } |
3180 | |
3181 | static int |
3182 | mcx_cmdq_verify(const struct mcx_cmdq_entry *cqe) |
3183 | { |
3184 | /* return (mcx_xor(cqe, sizeof(*cqe)) ? -1 : 0); */ |
3185 | return (0); |
3186 | } |
3187 | |
3188 | static void * |
3189 | mcx_cmdq_in(struct mcx_cmdq_entry *cqe) |
3190 | { |
3191 | return (&cqe->cq_input_data); |
3192 | } |
3193 | |
3194 | static void * |
3195 | mcx_cmdq_out(struct mcx_cmdq_entry *cqe) |
3196 | { |
3197 | return (&cqe->cq_output_data); |
3198 | } |
3199 | |
3200 | static void |
3201 | mcx_cmdq_post(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, |
3202 | unsigned int slot) |
3203 | { |
3204 | mcx_cmdq_sign(cqe); |
3205 | |
3206 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_cmdq_mem)->mxm_map)), (0), (((&sc->sc_cmdq_mem )->mxm_size)), ((0x01|0x04))) |
3207 | 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_PRERW)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_cmdq_mem)->mxm_map)), (0), (((&sc->sc_cmdq_mem )->mxm_size)), ((0x01|0x04))); |
3208 | |
3209 | mcx_wr(sc, MCX_CMDQ_DOORBELL0x0018, 1U << slot); |
3210 | mcx_bar(sc, MCX_CMDQ_DOORBELL0x0018, sizeof(uint32_t), |
3211 | BUS_SPACE_BARRIER_WRITE0x02); |
3212 | } |
3213 | |
3214 | static int |
3215 | mcx_enable_hca(struct mcx_softc *sc) |
3216 | { |
3217 | struct mcx_cmdq_entry *cqe; |
3218 | struct mcx_cmd_enable_hca_in *in; |
3219 | struct mcx_cmd_enable_hca_out *out; |
3220 | int error; |
3221 | uint8_t status; |
3222 | |
3223 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
3224 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
3225 | |
3226 | in = mcx_cmdq_in(cqe); |
3227 | in->cmd_opcode = htobe16(MCX_CMD_ENABLE_HCA)(__uint16_t)(__builtin_constant_p(0x104) ? (__uint16_t)(((__uint16_t )(0x104) & 0xffU) << 8 | ((__uint16_t)(0x104) & 0xff00U) >> 8) : __swap16md(0x104)); |
3228 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
3229 | in->cmd_function_id = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
3230 | |
3231 | mcx_cmdq_post(sc, cqe, 0); |
3232 | |
3233 | error = mcx_cmdq_poll(sc, cqe, 1000); |
3234 | if (error != 0) { |
3235 | printf(", hca enable timeout\n"); |
3236 | return (-1); |
3237 | } |
3238 | if (mcx_cmdq_verify(cqe) != 0) { |
3239 | printf(", hca enable command corrupt\n"); |
3240 | return (-1); |
3241 | } |
3242 | |
3243 | status = cqe->cq_output_data[0]; |
3244 | if (status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
3245 | printf(", hca enable failed (%x)\n", status); |
3246 | return (-1); |
3247 | } |
3248 | |
3249 | return (0); |
3250 | } |
3251 | |
3252 | static int |
3253 | mcx_teardown_hca(struct mcx_softc *sc, uint16_t profile) |
3254 | { |
3255 | struct mcx_cmdq_entry *cqe; |
3256 | struct mcx_cmd_teardown_hca_in *in; |
3257 | struct mcx_cmd_teardown_hca_out *out; |
3258 | int error; |
3259 | uint8_t status; |
3260 | |
3261 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
3262 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
3263 | |
3264 | in = mcx_cmdq_in(cqe); |
3265 | in->cmd_opcode = htobe16(MCX_CMD_TEARDOWN_HCA)(__uint16_t)(__builtin_constant_p(0x103) ? (__uint16_t)(((__uint16_t )(0x103) & 0xffU) << 8 | ((__uint16_t)(0x103) & 0xff00U) >> 8) : __swap16md(0x103)); |
3266 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
3267 | in->cmd_profile = profile; |
3268 | |
3269 | mcx_cmdq_post(sc, cqe, 0); |
3270 | |
3271 | error = mcx_cmdq_poll(sc, cqe, 1000); |
3272 | if (error != 0) { |
3273 | printf(", hca teardown timeout\n"); |
3274 | return (-1); |
3275 | } |
3276 | if (mcx_cmdq_verify(cqe) != 0) { |
3277 | printf(", hca teardown command corrupt\n"); |
3278 | return (-1); |
3279 | } |
3280 | |
3281 | status = cqe->cq_output_data[0]; |
3282 | if (status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
3283 | printf(", hca teardown failed (%x)\n", status); |
3284 | return (-1); |
3285 | } |
3286 | |
3287 | return (0); |
3288 | } |
3289 | |
3290 | static int |
3291 | mcx_cmdq_mboxes_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm, |
3292 | unsigned int nmb, uint64_t *ptr, uint8_t token) |
3293 | { |
3294 | caddr_t kva; |
3295 | uint64_t dva; |
3296 | int i; |
3297 | int error; |
3298 | |
3299 | error = mcx_dmamem_alloc(sc, mxm, |
3300 | nmb * MCX_CMDQ_MAILBOX_SIZE((((sizeof(struct mcx_cmdq_mailbox))+(((1 << 10))-1))/( (1 << 10)))*((1 << 10))), MCX_CMDQ_MAILBOX_ALIGN(1 << 10)); |
3301 | if (error != 0) |
3302 | return (error); |
3303 | |
3304 | mcx_dmamem_zero(mxm); |
3305 | |
3306 | dva = MCX_DMA_DVA(mxm)((mxm)->mxm_map->dm_segs[0].ds_addr); |
3307 | kva = MCX_DMA_KVA(mxm)((void *)(mxm)->mxm_kva); |
3308 | for (i = 0; i < nmb; i++) { |
3309 | struct mcx_cmdq_mailbox *mbox = (struct mcx_cmdq_mailbox *)kva; |
3310 | |
3311 | /* patch the cqe or mbox pointing at this one */ |
3312 | htobem64(ptr, dva)(*(__uint64_t *)(ptr) = (__uint64_t)(__builtin_constant_p(dva ) ? (__uint64_t)((((__uint64_t)(dva) & 0xff) << 56) | ((__uint64_t)(dva) & 0xff00ULL) << 40 | ((__uint64_t )(dva) & 0xff0000ULL) << 24 | ((__uint64_t)(dva) & 0xff000000ULL) << 8 | ((__uint64_t)(dva) & 0xff00000000ULL ) >> 8 | ((__uint64_t)(dva) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(dva) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(dva) & 0xff00000000000000ULL) >> 56 ) : __swap64md(dva))); |
3313 | |
3314 | /* fill in this mbox */ |
3315 | htobem32(&mbox->mb_block_number, i)(*(__uint32_t *)(&mbox->mb_block_number) = (__uint32_t )(__builtin_constant_p(i) ? (__uint32_t)(((__uint32_t)(i) & 0xff) << 24 | ((__uint32_t)(i) & 0xff00) << 8 | ((__uint32_t)(i) & 0xff0000) >> 8 | ((__uint32_t )(i) & 0xff000000) >> 24) : __swap32md(i))); |
3316 | mbox->mb_token = token; |
3317 | |
3318 | /* move to the next one */ |
3319 | ptr = &mbox->mb_next_ptr; |
3320 | |
3321 | dva += MCX_CMDQ_MAILBOX_SIZE((((sizeof(struct mcx_cmdq_mailbox))+(((1 << 10))-1))/( (1 << 10)))*((1 << 10))); |
3322 | kva += MCX_CMDQ_MAILBOX_SIZE((((sizeof(struct mcx_cmdq_mailbox))+(((1 << 10))-1))/( (1 << 10)))*((1 << 10))); |
3323 | } |
3324 | |
3325 | return (0); |
3326 | } |
3327 | |
3328 | static uint32_t |
3329 | mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox *mb) |
3330 | { |
3331 | uint32_t xor = 0xff; |
3332 | |
3333 | /* only 3 fields get set, so mix them directly */ |
3334 | xor = mcx_mix_u64(xor, mb->mb_next_ptr); |
3335 | xor = mcx_mix_u32(xor, mb->mb_block_number); |
3336 | xor = mcx_mix_u8(xor, mb->mb_token); |
3337 | |
3338 | return (mcx_mix_done(xor)); |
3339 | } |
3340 | |
3341 | static void |
3342 | mcx_cmdq_mboxes_sign(struct mcx_dmamem *mxm, unsigned int nmb) |
3343 | { |
3344 | caddr_t kva; |
3345 | int i; |
3346 | |
3347 | kva = MCX_DMA_KVA(mxm)((void *)(mxm)->mxm_kva); |
3348 | |
3349 | for (i = 0; i < nmb; i++) { |
3350 | struct mcx_cmdq_mailbox *mb = (struct mcx_cmdq_mailbox *)kva; |
3351 | uint8_t sig = mcx_cmdq_mbox_ctrl_sig(mb); |
3352 | mb->mb_ctrl_signature = sig; |
3353 | mb->mb_signature = sig ^ |
3354 | mcx_xor(mb->mb_data, sizeof(mb->mb_data)); |
3355 | |
3356 | kva += MCX_CMDQ_MAILBOX_SIZE((((sizeof(struct mcx_cmdq_mailbox))+(((1 << 10))-1))/( (1 << 10)))*((1 << 10))); |
3357 | } |
3358 | } |
3359 | |
3360 | static void |
3361 | mcx_cmdq_mboxes_sync(struct mcx_softc *sc, struct mcx_dmamem *mxm, int ops) |
3362 | { |
3363 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(mxm),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mxm )->mxm_map)), (0), (((mxm)->mxm_size)), (ops)) |
3364 | 0, MCX_DMA_LEN(mxm), ops)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mxm )->mxm_map)), (0), (((mxm)->mxm_size)), (ops)); |
3365 | } |
3366 | |
3367 | static struct mcx_cmdq_mailbox * |
3368 | mcx_cq_mbox(struct mcx_dmamem *mxm, unsigned int i) |
3369 | { |
3370 | caddr_t kva; |
3371 | |
3372 | kva = MCX_DMA_KVA(mxm)((void *)(mxm)->mxm_kva); |
3373 | kva += i * MCX_CMDQ_MAILBOX_SIZE((((sizeof(struct mcx_cmdq_mailbox))+(((1 << 10))-1))/( (1 << 10)))*((1 << 10))); |
3374 | |
3375 | return ((struct mcx_cmdq_mailbox *)kva); |
3376 | } |
3377 | |
3378 | static inline void * |
3379 | mcx_cq_mbox_data(struct mcx_cmdq_mailbox *mb) |
3380 | { |
3381 | return (&mb->mb_data); |
3382 | } |
3383 | |
3384 | static void |
3385 | mcx_cmdq_mboxes_copyin(struct mcx_dmamem *mxm, unsigned int nmb, |
3386 | void *b, size_t len) |
3387 | { |
3388 | caddr_t buf = b; |
3389 | struct mcx_cmdq_mailbox *mb; |
3390 | int i; |
3391 | |
3392 | mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm)((void *)(mxm)->mxm_kva); |
3393 | for (i = 0; i < nmb; i++) { |
3394 | |
3395 | memcpy(mb->mb_data, buf, min(sizeof(mb->mb_data), len))__builtin_memcpy((mb->mb_data), (buf), (min(sizeof(mb-> mb_data), len))); |
3396 | |
3397 | if (sizeof(mb->mb_data) >= len) |
3398 | break; |
3399 | |
3400 | buf += sizeof(mb->mb_data); |
3401 | len -= sizeof(mb->mb_data); |
3402 | mb++; |
3403 | } |
3404 | } |
3405 | |
3406 | static void |
3407 | mcx_cmdq_mboxes_pas(struct mcx_dmamem *mxm, int offset, int npages, |
3408 | struct mcx_dmamem *buf) |
3409 | { |
3410 | uint64_t *pas; |
3411 | int mbox, mbox_pages, i; |
3412 | |
3413 | mbox = offset / MCX_CMDQ_MAILBOX_DATASIZE512; |
3414 | offset %= MCX_CMDQ_MAILBOX_DATASIZE512; |
3415 | |
3416 | pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox)); |
3417 | pas += (offset / sizeof(*pas)); |
3418 | mbox_pages = (MCX_CMDQ_MAILBOX_DATASIZE512 - offset) / sizeof(*pas); |
3419 | for (i = 0; i < npages; i++) { |
3420 | if (i == mbox_pages) { |
3421 | mbox++; |
3422 | pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox)); |
3423 | mbox_pages += MCX_CMDQ_MAILBOX_DATASIZE512 / sizeof(*pas); |
3424 | } |
3425 | *pas = htobe64(MCX_DMA_DVA(buf) + (i * MCX_PAGE_SIZE))(__uint64_t)(__builtin_constant_p(((buf)->mxm_map->dm_segs [0].ds_addr) + (i * (1 << 12))) ? (__uint64_t)((((__uint64_t )(((buf)->mxm_map->dm_segs[0].ds_addr) + (i * (1 << 12))) & 0xff) << 56) | ((__uint64_t)(((buf)->mxm_map ->dm_segs[0].ds_addr) + (i * (1 << 12))) & 0xff00ULL ) << 40 | ((__uint64_t)(((buf)->mxm_map->dm_segs[ 0].ds_addr) + (i * (1 << 12))) & 0xff0000ULL) << 24 | ((__uint64_t)(((buf)->mxm_map->dm_segs[0].ds_addr ) + (i * (1 << 12))) & 0xff000000ULL) << 8 | ( (__uint64_t)(((buf)->mxm_map->dm_segs[0].ds_addr) + (i * (1 << 12))) & 0xff00000000ULL) >> 8 | ((__uint64_t )(((buf)->mxm_map->dm_segs[0].ds_addr) + (i * (1 << 12))) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(( (buf)->mxm_map->dm_segs[0].ds_addr) + (i * (1 << 12 ))) & 0xff000000000000ULL) >> 40 | ((__uint64_t)((( buf)->mxm_map->dm_segs[0].ds_addr) + (i * (1 << 12 ))) & 0xff00000000000000ULL) >> 56) : __swap64md((( buf)->mxm_map->dm_segs[0].ds_addr) + (i * (1 << 12 )))); |
3426 | pas++; |
3427 | } |
3428 | } |
3429 | |
3430 | static void |
3431 | mcx_cmdq_mboxes_copyout(struct mcx_dmamem *mxm, int nmb, void *b, size_t len) |
3432 | { |
3433 | caddr_t buf = b; |
3434 | struct mcx_cmdq_mailbox *mb; |
3435 | int i; |
3436 | |
3437 | mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm)((void *)(mxm)->mxm_kva); |
3438 | for (i = 0; i < nmb; i++) { |
3439 | memcpy(buf, mb->mb_data, min(sizeof(mb->mb_data), len))__builtin_memcpy((buf), (mb->mb_data), (min(sizeof(mb-> mb_data), len))); |
3440 | |
3441 | if (sizeof(mb->mb_data) >= len) |
3442 | break; |
3443 | |
3444 | buf += sizeof(mb->mb_data); |
3445 | len -= sizeof(mb->mb_data); |
3446 | mb++; |
3447 | } |
3448 | } |
3449 | |
3450 | static void |
3451 | mcx_cq_mboxes_free(struct mcx_softc *sc, struct mcx_dmamem *mxm) |
3452 | { |
3453 | mcx_dmamem_free(sc, mxm); |
3454 | } |
3455 | |
3456 | #if 0 |
3457 | static void |
3458 | mcx_cmdq_dump(const struct mcx_cmdq_entry *cqe) |
3459 | { |
3460 | unsigned int i; |
3461 | |
3462 | printf(" type %02x, ilen %u, iptr %016llx", cqe->cq_type, |
3463 | bemtoh32(&cqe->cq_input_length)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&cqe-> cq_input_length)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t * )(&cqe->cq_input_length)) & 0xff) << 24 | (( __uint32_t)(*(__uint32_t *)(&cqe->cq_input_length)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(&cqe-> cq_input_length)) & 0xff0000) >> 8 | ((__uint32_t)( *(__uint32_t *)(&cqe->cq_input_length)) & 0xff000000 ) >> 24) : __swap32md(*(__uint32_t *)(&cqe->cq_input_length ))), bemtoh64(&cqe->cq_input_ptr)(__uint64_t)(__builtin_constant_p(*(__uint64_t *)(&cqe-> cq_input_ptr)) ? (__uint64_t)((((__uint64_t)(*(__uint64_t *)( &cqe->cq_input_ptr)) & 0xff) << 56) | ((__uint64_t )(*(__uint64_t *)(&cqe->cq_input_ptr)) & 0xff00ULL ) << 40 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_input_ptr )) & 0xff0000ULL) << 24 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_input_ptr)) & 0xff000000ULL) << 8 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_input_ptr) ) & 0xff00000000ULL) >> 8 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_input_ptr)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_input_ptr )) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_input_ptr)) & 0xff00000000000000ULL) >> 56) : __swap64md(*(__uint64_t *)(&cqe->cq_input_ptr)) )); |
3464 | |
3465 | printf(", idata "); |
3466 | for (i = 0; i < sizeof(cqe->cq_input_data); i++) |
3467 | printf("%02x", cqe->cq_input_data[i]); |
3468 | |
3469 | printf(", odata "); |
3470 | for (i = 0; i < sizeof(cqe->cq_output_data); i++) |
3471 | printf("%02x", cqe->cq_output_data[i]); |
3472 | |
3473 | printf(", optr %016llx, olen %u, token %02x, sig %02x, status %02x", |
3474 | bemtoh64(&cqe->cq_output_ptr)(__uint64_t)(__builtin_constant_p(*(__uint64_t *)(&cqe-> cq_output_ptr)) ? (__uint64_t)((((__uint64_t)(*(__uint64_t *) (&cqe->cq_output_ptr)) & 0xff) << 56) | ((__uint64_t )(*(__uint64_t *)(&cqe->cq_output_ptr)) & 0xff00ULL ) << 40 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_output_ptr )) & 0xff0000ULL) << 24 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_output_ptr)) & 0xff000000ULL) << 8 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_output_ptr )) & 0xff00000000ULL) >> 8 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_output_ptr)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_output_ptr )) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_output_ptr)) & 0xff00000000000000ULL) >> 56) : __swap64md(*(__uint64_t *)(&cqe->cq_output_ptr ))), bemtoh32(&cqe->cq_output_length)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&cqe-> cq_output_length)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t * )(&cqe->cq_output_length)) & 0xff) << 24 | ( (__uint32_t)(*(__uint32_t *)(&cqe->cq_output_length)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(&cqe-> cq_output_length)) & 0xff0000) >> 8 | ((__uint32_t) (*(__uint32_t *)(&cqe->cq_output_length)) & 0xff000000 ) >> 24) : __swap32md(*(__uint32_t *)(&cqe->cq_output_length ))), |
3475 | cqe->cq_token, cqe->cq_signature, cqe->cq_status); |
3476 | } |
3477 | |
3478 | static void |
3479 | mcx_cmdq_mbox_dump(struct mcx_dmamem *mboxes, int num) |
3480 | { |
3481 | int i, j; |
3482 | uint8_t *d; |
3483 | |
3484 | for (i = 0; i < num; i++) { |
3485 | struct mcx_cmdq_mailbox *mbox; |
3486 | mbox = mcx_cq_mbox(mboxes, i); |
3487 | |
3488 | d = mcx_cq_mbox_data(mbox); |
3489 | for (j = 0; j < MCX_CMDQ_MAILBOX_DATASIZE512; j++) { |
3490 | if (j != 0 && (j % 16 == 0)) |
3491 | printf("\n"); |
3492 | printf("%.2x ", d[j]); |
3493 | } |
3494 | } |
3495 | } |
3496 | #endif |
3497 | |
3498 | static int |
3499 | mcx_access_hca_reg(struct mcx_softc *sc, uint16_t reg, int op, void *data, |
3500 | int len) |
3501 | { |
3502 | struct mcx_dmamem mxm; |
3503 | struct mcx_cmdq_entry *cqe; |
3504 | struct mcx_cmd_access_reg_in *in; |
3505 | struct mcx_cmd_access_reg_out *out; |
3506 | uint8_t token = mcx_cmdq_token(sc); |
3507 | int error, nmb; |
3508 | |
3509 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
3510 | mcx_cmdq_init(sc, cqe, sizeof(*in) + len, sizeof(*out) + len, |
3511 | token); |
3512 | |
3513 | in = mcx_cmdq_in(cqe); |
3514 | in->cmd_opcode = htobe16(MCX_CMD_ACCESS_REG)(__uint16_t)(__builtin_constant_p(0x805) ? (__uint16_t)(((__uint16_t )(0x805) & 0xffU) << 8 | ((__uint16_t)(0x805) & 0xff00U) >> 8) : __swap16md(0x805)); |
3515 | in->cmd_op_mod = htobe16(op)(__uint16_t)(__builtin_constant_p(op) ? (__uint16_t)(((__uint16_t )(op) & 0xffU) << 8 | ((__uint16_t)(op) & 0xff00U ) >> 8) : __swap16md(op)); |
3516 | in->cmd_register_id = htobe16(reg)(__uint16_t)(__builtin_constant_p(reg) ? (__uint16_t)(((__uint16_t )(reg) & 0xffU) << 8 | ((__uint16_t)(reg) & 0xff00U ) >> 8) : __swap16md(reg)); |
3517 | |
3518 | nmb = howmany(len, MCX_CMDQ_MAILBOX_DATASIZE)(((len) + ((512) - 1)) / (512)); |
3519 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb, |
3520 | &cqe->cq_output_ptr, token) != 0) { |
3521 | printf(", unable to allocate access reg mailboxen\n"); |
3522 | return (-1); |
3523 | } |
3524 | cqe->cq_input_ptr = cqe->cq_output_ptr; |
3525 | mcx_cmdq_mboxes_copyin(&mxm, nmb, data, len); |
3526 | mcx_cmdq_mboxes_sign(&mxm, nmb); |
3527 | mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW(0x01|0x04)); |
3528 | |
3529 | mcx_cmdq_post(sc, cqe, 0); |
3530 | error = mcx_cmdq_poll(sc, cqe, 1000); |
3531 | mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW(0x02|0x08)); |
3532 | |
3533 | if (error != 0) { |
3534 | printf("%s: access reg (%s %x) timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
3535 | (op == MCX_REG_OP_WRITE0 ? "write" : "read"), reg); |
3536 | goto free; |
3537 | } |
3538 | error = mcx_cmdq_verify(cqe); |
3539 | if (error != 0) { |
3540 | printf("%s: access reg (%s %x) reply corrupt\n", |
3541 | (op == MCX_REG_OP_WRITE0 ? "write" : "read"), DEVNAME(sc)((sc)->sc_dev.dv_xname), |
3542 | reg); |
3543 | goto free; |
3544 | } |
3545 | |
3546 | out = mcx_cmdq_out(cqe); |
3547 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
3548 | printf("%s: access reg (%s %x) failed (%x, %.6x)\n", |
3549 | DEVNAME(sc)((sc)->sc_dev.dv_xname), (op == MCX_REG_OP_WRITE0 ? "write" : "read"), |
3550 | reg, out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
3551 | error = -1; |
3552 | goto free; |
3553 | } |
3554 | |
3555 | mcx_cmdq_mboxes_copyout(&mxm, nmb, data, len); |
3556 | free: |
3557 | mcx_dmamem_free(sc, &mxm); |
3558 | |
3559 | return (error); |
3560 | } |
3561 | |
3562 | static int |
3563 | mcx_set_issi(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, |
3564 | unsigned int slot) |
3565 | { |
3566 | struct mcx_cmd_set_issi_in *in; |
3567 | struct mcx_cmd_set_issi_out *out; |
3568 | uint8_t status; |
3569 | |
3570 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
3571 | |
3572 | in = mcx_cmdq_in(cqe); |
3573 | in->cmd_opcode = htobe16(MCX_CMD_SET_ISSI)(__uint16_t)(__builtin_constant_p(0x10b) ? (__uint16_t)(((__uint16_t )(0x10b) & 0xffU) << 8 | ((__uint16_t)(0x10b) & 0xff00U) >> 8) : __swap16md(0x10b)); |
3574 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
3575 | in->cmd_current_issi = htobe16(MCX_ISSI)(__uint16_t)(__builtin_constant_p(1) ? (__uint16_t)(((__uint16_t )(1) & 0xffU) << 8 | ((__uint16_t)(1) & 0xff00U ) >> 8) : __swap16md(1)); |
3576 | |
3577 | mcx_cmdq_post(sc, cqe, slot); |
3578 | if (mcx_cmdq_poll(sc, cqe, 1000) != 0) |
3579 | return (-1); |
3580 | if (mcx_cmdq_verify(cqe) != 0) |
3581 | return (-1); |
3582 | |
3583 | status = cqe->cq_output_data[0]; |
3584 | if (status != MCX_CQ_STATUS_OK(0x00 << 1)) |
3585 | return (-1); |
3586 | |
3587 | return (0); |
3588 | } |
3589 | |
3590 | static int |
3591 | mcx_issi(struct mcx_softc *sc) |
3592 | { |
3593 | struct mcx_dmamem mxm; |
3594 | struct mcx_cmdq_entry *cqe; |
3595 | struct mcx_cmd_query_issi_in *in; |
3596 | struct mcx_cmd_query_issi_il_out *out; |
3597 | struct mcx_cmd_query_issi_mb_out *mb; |
3598 | uint8_t token = mcx_cmdq_token(sc); |
3599 | uint8_t status; |
3600 | int error; |
3601 | |
3602 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
3603 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mb), token); |
3604 | |
3605 | in = mcx_cmdq_in(cqe); |
3606 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI)(__uint16_t)(__builtin_constant_p(0x10a) ? (__uint16_t)(((__uint16_t )(0x10a) & 0xffU) << 8 | ((__uint16_t)(0x10a) & 0xff00U) >> 8) : __swap16md(0x10a)); |
3607 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
3608 | |
3609 | CTASSERT(sizeof(*mb) <= MCX_CMDQ_MAILBOX_DATASIZE)extern char _ctassert[(sizeof(*mb) <= 512) ? 1 : -1 ] __attribute__ ((__unused__)); |
3610 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
3611 | &cqe->cq_output_ptr, token) != 0) { |
3612 | printf(", unable to allocate query issi mailbox\n"); |
3613 | return (-1); |
3614 | } |
3615 | mcx_cmdq_mboxes_sign(&mxm, 1); |
3616 | |
3617 | mcx_cmdq_post(sc, cqe, 0); |
3618 | error = mcx_cmdq_poll(sc, cqe, 1000); |
3619 | if (error != 0) { |
3620 | printf(", query issi timeout\n"); |
3621 | goto free; |
3622 | } |
3623 | error = mcx_cmdq_verify(cqe); |
3624 | if (error != 0) { |
3625 | printf(", query issi reply corrupt\n"); |
3626 | goto free; |
3627 | } |
3628 | |
3629 | status = cqe->cq_output_data[0]; |
3630 | switch (status) { |
3631 | case MCX_CQ_STATUS_OK(0x00 << 1): |
3632 | break; |
3633 | case MCX_CQ_STATUS_BAD_OPCODE(0x02 << 1): |
3634 | /* use ISSI 0 */ |
3635 | goto free; |
3636 | default: |
3637 | printf(", query issi failed (%x)\n", status); |
3638 | error = -1; |
3639 | goto free; |
3640 | } |
3641 | |
3642 | out = mcx_cmdq_out(cqe); |
3643 | if (out->cmd_current_issi == htobe16(MCX_ISSI)(__uint16_t)(__builtin_constant_p(1) ? (__uint16_t)(((__uint16_t )(1) & 0xffU) << 8 | ((__uint16_t)(1) & 0xff00U ) >> 8) : __swap16md(1))) { |
3644 | /* use ISSI 1 */ |
3645 | goto free; |
3646 | } |
3647 | |
3648 | /* don't need to read cqe anymore, can be used for SET ISSI */ |
3649 | |
3650 | mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
3651 | CTASSERT(MCX_ISSI < NBBY)extern char _ctassert[(1 < 8) ? 1 : -1 ] __attribute__((__unused__ )); |
3652 | /* XXX math is hard */ |
3653 | if (!ISSET(mb->cmd_supported_issi[79], 1 << MCX_ISSI)((mb->cmd_supported_issi[79]) & (1 << 1))) { |
3654 | /* use ISSI 0 */ |
3655 | goto free; |
3656 | } |
3657 | |
3658 | if (mcx_set_issi(sc, cqe, 0) != 0) { |
3659 | /* ignore the error, just use ISSI 0 */ |
3660 | } else { |
3661 | /* use ISSI 1 */ |
3662 | } |
3663 | |
3664 | free: |
3665 | mcx_cq_mboxes_free(sc, &mxm); |
3666 | return (error); |
3667 | } |
3668 | |
3669 | static int |
3670 | mcx_query_pages(struct mcx_softc *sc, uint16_t type, |
3671 | int32_t *npages, uint16_t *func_id) |
3672 | { |
3673 | struct mcx_cmdq_entry *cqe; |
3674 | struct mcx_cmd_query_pages_in *in; |
3675 | struct mcx_cmd_query_pages_out *out; |
3676 | |
3677 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
3678 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
3679 | |
3680 | in = mcx_cmdq_in(cqe); |
3681 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_PAGES)(__uint16_t)(__builtin_constant_p(0x107) ? (__uint16_t)(((__uint16_t )(0x107) & 0xffU) << 8 | ((__uint16_t)(0x107) & 0xff00U) >> 8) : __swap16md(0x107)); |
3682 | in->cmd_op_mod = type; |
3683 | |
3684 | mcx_cmdq_post(sc, cqe, 0); |
3685 | if (mcx_cmdq_poll(sc, cqe, 1000) != 0) { |
3686 | printf(", query pages timeout\n"); |
3687 | return (-1); |
3688 | } |
3689 | if (mcx_cmdq_verify(cqe) != 0) { |
3690 | printf(", query pages reply corrupt\n"); |
3691 | return (-1); |
3692 | } |
3693 | |
3694 | out = mcx_cmdq_out(cqe); |
3695 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
3696 | printf(", query pages failed (%x)\n", out->cmd_status); |
3697 | return (-1); |
3698 | } |
3699 | |
3700 | *func_id = out->cmd_func_id; |
3701 | *npages = bemtoh32(&out->cmd_num_pages)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&out-> cmd_num_pages)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)( &out->cmd_num_pages)) & 0xff) << 24 | ((__uint32_t )(*(__uint32_t *)(&out->cmd_num_pages)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(&out->cmd_num_pages )) & 0xff0000) >> 8 | ((__uint32_t)(*(__uint32_t *) (&out->cmd_num_pages)) & 0xff000000) >> 24) : __swap32md(*(__uint32_t *)(&out->cmd_num_pages))); |
3702 | |
3703 | return (0); |
3704 | } |
3705 | |
3706 | struct bus_dma_iter { |
3707 | bus_dmamap_t i_map; |
3708 | bus_size_t i_offset; |
3709 | unsigned int i_index; |
3710 | }; |
3711 | |
3712 | static void |
3713 | bus_dma_iter_init(struct bus_dma_iter *i, bus_dmamap_t map) |
3714 | { |
3715 | i->i_map = map; |
3716 | i->i_offset = 0; |
3717 | i->i_index = 0; |
3718 | } |
3719 | |
3720 | static bus_addr_t |
3721 | bus_dma_iter_addr(struct bus_dma_iter *i) |
3722 | { |
3723 | return (i->i_map->dm_segs[i->i_index].ds_addr + i->i_offset); |
3724 | } |
3725 | |
3726 | static void |
3727 | bus_dma_iter_add(struct bus_dma_iter *i, bus_size_t size) |
3728 | { |
3729 | bus_dma_segment_t *seg = i->i_map->dm_segs + i->i_index; |
3730 | bus_size_t diff; |
3731 | |
3732 | do { |
3733 | diff = seg->ds_len - i->i_offset; |
3734 | if (size < diff) |
3735 | break; |
3736 | |
3737 | size -= diff; |
3738 | |
3739 | seg++; |
3740 | |
3741 | i->i_offset = 0; |
3742 | i->i_index++; |
3743 | } while (size > 0); |
3744 | |
3745 | i->i_offset += size; |
3746 | } |
3747 | |
3748 | static int |
3749 | mcx_add_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t func_id) |
3750 | { |
3751 | struct mcx_dmamem mxm; |
3752 | struct mcx_cmdq_entry *cqe; |
3753 | struct mcx_cmd_manage_pages_in *in; |
3754 | struct mcx_cmd_manage_pages_out *out; |
3755 | unsigned int paslen, nmb, i, j, npages; |
3756 | struct bus_dma_iter iter; |
3757 | uint64_t *pas; |
3758 | uint8_t status; |
3759 | uint8_t token = mcx_cmdq_token(sc); |
3760 | int error; |
3761 | |
3762 | npages = mhm->mhm_npages; |
3763 | |
3764 | paslen = sizeof(*pas) * npages; |
3765 | nmb = howmany(paslen, MCX_CMDQ_MAILBOX_DATASIZE)(((paslen) + ((512) - 1)) / (512)); |
3766 | |
3767 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
3768 | mcx_cmdq_init(sc, cqe, sizeof(*in) + paslen, sizeof(*out), token); |
3769 | |
3770 | in = mcx_cmdq_in(cqe); |
3771 | in->cmd_opcode = htobe16(MCX_CMD_MANAGE_PAGES)(__uint16_t)(__builtin_constant_p(0x108) ? (__uint16_t)(((__uint16_t )(0x108) & 0xffU) << 8 | ((__uint16_t)(0x108) & 0xff00U) >> 8) : __swap16md(0x108)); |
3772 | in->cmd_op_mod = htobe16(MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS)(__uint16_t)(__builtin_constant_p(0x01) ? (__uint16_t)(((__uint16_t )(0x01) & 0xffU) << 8 | ((__uint16_t)(0x01) & 0xff00U ) >> 8) : __swap16md(0x01)); |
3773 | in->cmd_func_id = func_id; |
3774 | htobem32(&in->cmd_input_num_entries, npages)(*(__uint32_t *)(&in->cmd_input_num_entries) = (__uint32_t )(__builtin_constant_p(npages) ? (__uint32_t)(((__uint32_t)(npages ) & 0xff) << 24 | ((__uint32_t)(npages) & 0xff00 ) << 8 | ((__uint32_t)(npages) & 0xff0000) >> 8 | ((__uint32_t)(npages) & 0xff000000) >> 24) : __swap32md (npages))); |
3775 | |
3776 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb, |
3777 | &cqe->cq_input_ptr, token) != 0) { |
3778 | printf(", unable to allocate manage pages mailboxen\n"); |
3779 | return (-1); |
3780 | } |
3781 | |
3782 | bus_dma_iter_init(&iter, mhm->mhm_map); |
3783 | for (i = 0; i < nmb; i++) { |
3784 | unsigned int lim; |
3785 | |
3786 | pas = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, i)); |
3787 | lim = min(MCX_CMDQ_MAILBOX_DATASIZE512 / sizeof(*pas), npages); |
3788 | |
3789 | for (j = 0; j < lim; j++) { |
3790 | htobem64(&pas[j], bus_dma_iter_addr(&iter))(*(__uint64_t *)(&pas[j]) = (__uint64_t)(__builtin_constant_p (bus_dma_iter_addr(&iter)) ? (__uint64_t)((((__uint64_t)( bus_dma_iter_addr(&iter)) & 0xff) << 56) | ((__uint64_t )(bus_dma_iter_addr(&iter)) & 0xff00ULL) << 40 | ((__uint64_t)(bus_dma_iter_addr(&iter)) & 0xff0000ULL ) << 24 | ((__uint64_t)(bus_dma_iter_addr(&iter)) & 0xff000000ULL) << 8 | ((__uint64_t)(bus_dma_iter_addr( &iter)) & 0xff00000000ULL) >> 8 | ((__uint64_t) (bus_dma_iter_addr(&iter)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(bus_dma_iter_addr(&iter)) & 0xff000000000000ULL ) >> 40 | ((__uint64_t)(bus_dma_iter_addr(&iter)) & 0xff00000000000000ULL) >> 56) : __swap64md(bus_dma_iter_addr (&iter)))); |
3791 | bus_dma_iter_add(&iter, MCX_PAGE_SIZE(1 << 12)); |
3792 | } |
3793 | |
3794 | npages -= lim; |
3795 | } |
3796 | |
3797 | mcx_cmdq_mboxes_sign(&mxm, nmb); |
3798 | |
3799 | mcx_cmdq_post(sc, cqe, 0); |
3800 | error = mcx_cmdq_poll(sc, cqe, 1000); |
3801 | if (error != 0) { |
3802 | printf(", manage pages timeout\n"); |
3803 | goto free; |
3804 | } |
3805 | error = mcx_cmdq_verify(cqe); |
3806 | if (error != 0) { |
3807 | printf(", manage pages reply corrupt\n"); |
3808 | goto free; |
3809 | } |
3810 | |
3811 | status = cqe->cq_output_data[0]; |
3812 | if (status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
3813 | printf(", manage pages failed (%x)\n", status); |
3814 | error = -1; |
3815 | goto free; |
3816 | } |
3817 | |
3818 | free: |
3819 | mcx_dmamem_free(sc, &mxm); |
3820 | |
3821 | return (error); |
3822 | } |
3823 | |
3824 | static int |
3825 | mcx_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t type) |
3826 | { |
3827 | int32_t npages; |
3828 | uint16_t func_id; |
3829 | |
3830 | if (mcx_query_pages(sc, type, &npages, &func_id) != 0) { |
3831 | /* error printed by mcx_query_pages */ |
3832 | return (-1); |
3833 | } |
3834 | |
3835 | if (npages < 1) |
3836 | return (0); |
3837 | |
3838 | if (mcx_hwmem_alloc(sc, mhm, npages) != 0) { |
3839 | printf(", unable to allocate hwmem\n"); |
3840 | return (-1); |
3841 | } |
3842 | |
3843 | if (mcx_add_pages(sc, mhm, func_id) != 0) { |
3844 | printf(", unable to add hwmem\n"); |
3845 | goto free; |
3846 | } |
3847 | |
3848 | return (0); |
3849 | |
3850 | free: |
3851 | mcx_hwmem_free(sc, mhm); |
3852 | |
3853 | return (-1); |
3854 | } |
3855 | |
3856 | static int |
3857 | mcx_hca_max_caps(struct mcx_softc *sc) |
3858 | { |
3859 | struct mcx_dmamem mxm; |
3860 | struct mcx_cmdq_entry *cqe; |
3861 | struct mcx_cmd_query_hca_cap_in *in; |
3862 | struct mcx_cmd_query_hca_cap_out *out; |
3863 | struct mcx_cmdq_mailbox *mb; |
3864 | struct mcx_cap_device *hca; |
3865 | uint8_t status; |
3866 | uint8_t token = mcx_cmdq_token(sc); |
3867 | int error; |
3868 | |
3869 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
3870 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN0x1000, |
3871 | token); |
3872 | |
3873 | in = mcx_cmdq_in(cqe); |
3874 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP)(__uint16_t)(__builtin_constant_p(0x100) ? (__uint16_t)(((__uint16_t )(0x100) & 0xffU) << 8 | ((__uint16_t)(0x100) & 0xff00U) >> 8) : __swap16md(0x100)); |
3875 | in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_MAX |(__uint16_t)(__builtin_constant_p((0x0 << 0) | (0x0 << 1)) ? (__uint16_t)(((__uint16_t)((0x0 << 0) | (0x0 << 1)) & 0xffU) << 8 | ((__uint16_t)((0x0 << 0) | (0x0 << 1)) & 0xff00U) >> 8) : __swap16md( (0x0 << 0) | (0x0 << 1))) |
3876 | MCX_CMD_QUERY_HCA_CAP_DEVICE)(__uint16_t)(__builtin_constant_p((0x0 << 0) | (0x0 << 1)) ? (__uint16_t)(((__uint16_t)((0x0 << 0) | (0x0 << 1)) & 0xffU) << 8 | ((__uint16_t)((0x0 << 0) | (0x0 << 1)) & 0xff00U) >> 8) : __swap16md( (0x0 << 0) | (0x0 << 1))); |
3877 | |
3878 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES(0x1000 / 512), |
3879 | &cqe->cq_output_ptr, token) != 0) { |
3880 | printf(", unable to allocate query hca caps mailboxen\n"); |
3881 | return (-1); |
3882 | } |
3883 | mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES(0x1000 / 512)); |
3884 | mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW(0x01|0x04)); |
3885 | |
3886 | mcx_cmdq_post(sc, cqe, 0); |
3887 | error = mcx_cmdq_poll(sc, cqe, 1000); |
3888 | mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW(0x02|0x08)); |
3889 | |
3890 | if (error != 0) { |
3891 | printf(", query hca caps timeout\n"); |
3892 | goto free; |
3893 | } |
3894 | error = mcx_cmdq_verify(cqe); |
3895 | if (error != 0) { |
3896 | printf(", query hca caps reply corrupt\n"); |
3897 | goto free; |
3898 | } |
3899 | |
3900 | status = cqe->cq_output_data[0]; |
3901 | if (status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
3902 | printf(", query hca caps failed (%x)\n", status); |
3903 | error = -1; |
3904 | goto free; |
3905 | } |
3906 | |
3907 | mb = mcx_cq_mbox(&mxm, 0); |
3908 | hca = mcx_cq_mbox_data(mb); |
3909 | |
3910 | if ((hca->port_type & MCX_CAP_DEVICE_PORT_TYPE0x03) |
3911 | != MCX_CAP_DEVICE_PORT_TYPE_ETH0x01) { |
3912 | printf(", not in ethernet mode\n"); |
3913 | error = -1; |
3914 | goto free; |
3915 | } |
3916 | if (hca->log_pg_sz > PAGE_SHIFT12) { |
3917 | printf(", minimum system page shift %u is too large\n", |
3918 | hca->log_pg_sz); |
3919 | error = -1; |
3920 | goto free; |
3921 | } |
3922 | /* |
3923 | * blueflame register is split into two buffers, and we must alternate |
3924 | * between the two of them. |
3925 | */ |
3926 | sc->sc_bf_size = (1 << hca->log_bf_reg_size) / 2; |
3927 | sc->sc_max_rqt_size = (1 << hca->log_max_rqt_size); |
3928 | |
3929 | if (hca->local_ca_ack_delay & MCX_CAP_DEVICE_MCAM_REG0x40) |
3930 | sc->sc_mcam_reg = 1; |
3931 | |
3932 | sc->sc_mhz = bemtoh32(&hca->device_frequency_mhz)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&hca-> device_frequency_mhz)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&hca->device_frequency_mhz)) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(&hca->device_frequency_mhz )) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(& hca->device_frequency_mhz)) & 0xff0000) >> 8 | ( (__uint32_t)(*(__uint32_t *)(&hca->device_frequency_mhz )) & 0xff000000) >> 24) : __swap32md(*(__uint32_t * )(&hca->device_frequency_mhz))); |
3933 | sc->sc_khz = bemtoh32(&hca->device_frequency_khz)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&hca-> device_frequency_khz)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&hca->device_frequency_khz)) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(&hca->device_frequency_khz )) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(& hca->device_frequency_khz)) & 0xff0000) >> 8 | ( (__uint32_t)(*(__uint32_t *)(&hca->device_frequency_khz )) & 0xff000000) >> 24) : __swap32md(*(__uint32_t * )(&hca->device_frequency_khz))); |
3934 | |
3935 | free: |
3936 | mcx_dmamem_free(sc, &mxm); |
3937 | |
3938 | return (error); |
3939 | } |
3940 | |
3941 | static int |
3942 | mcx_hca_set_caps(struct mcx_softc *sc) |
3943 | { |
3944 | struct mcx_dmamem mxm; |
3945 | struct mcx_cmdq_entry *cqe; |
3946 | struct mcx_cmd_query_hca_cap_in *in; |
3947 | struct mcx_cmd_query_hca_cap_out *out; |
3948 | struct mcx_cmdq_mailbox *mb; |
3949 | struct mcx_cap_device *hca; |
3950 | uint8_t status; |
3951 | uint8_t token = mcx_cmdq_token(sc); |
3952 | int error; |
3953 | |
3954 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
3955 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN0x1000, |
3956 | token); |
3957 | |
3958 | in = mcx_cmdq_in(cqe); |
3959 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP)(__uint16_t)(__builtin_constant_p(0x100) ? (__uint16_t)(((__uint16_t )(0x100) & 0xffU) << 8 | ((__uint16_t)(0x100) & 0xff00U) >> 8) : __swap16md(0x100)); |
3960 | in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_CURRENT |(__uint16_t)(__builtin_constant_p((0x1 << 0) | (0x0 << 1)) ? (__uint16_t)(((__uint16_t)((0x1 << 0) | (0x0 << 1)) & 0xffU) << 8 | ((__uint16_t)((0x1 << 0) | (0x0 << 1)) & 0xff00U) >> 8) : __swap16md( (0x1 << 0) | (0x0 << 1))) |
3961 | MCX_CMD_QUERY_HCA_CAP_DEVICE)(__uint16_t)(__builtin_constant_p((0x1 << 0) | (0x0 << 1)) ? (__uint16_t)(((__uint16_t)((0x1 << 0) | (0x0 << 1)) & 0xffU) << 8 | ((__uint16_t)((0x1 << 0) | (0x0 << 1)) & 0xff00U) >> 8) : __swap16md( (0x1 << 0) | (0x0 << 1))); |
3962 | |
3963 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES(0x1000 / 512), |
3964 | &cqe->cq_output_ptr, token) != 0) { |
3965 | printf(", unable to allocate manage pages mailboxen\n"); |
3966 | return (-1); |
3967 | } |
3968 | mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES(0x1000 / 512)); |
3969 | mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW(0x01|0x04)); |
3970 | |
3971 | mcx_cmdq_post(sc, cqe, 0); |
3972 | error = mcx_cmdq_poll(sc, cqe, 1000); |
3973 | mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW(0x02|0x08)); |
3974 | |
3975 | if (error != 0) { |
3976 | printf(", query hca caps timeout\n"); |
3977 | goto free; |
3978 | } |
3979 | error = mcx_cmdq_verify(cqe); |
3980 | if (error != 0) { |
3981 | printf(", query hca caps reply corrupt\n"); |
3982 | goto free; |
3983 | } |
3984 | |
3985 | status = cqe->cq_output_data[0]; |
3986 | if (status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
3987 | printf(", query hca caps failed (%x)\n", status); |
3988 | error = -1; |
3989 | goto free; |
3990 | } |
3991 | |
3992 | mb = mcx_cq_mbox(&mxm, 0); |
3993 | hca = mcx_cq_mbox_data(mb); |
3994 | |
3995 | hca->log_pg_sz = PAGE_SHIFT12; |
3996 | |
3997 | free: |
3998 | mcx_dmamem_free(sc, &mxm); |
3999 | |
4000 | return (error); |
4001 | } |
4002 | |
4003 | |
4004 | static int |
4005 | mcx_init_hca(struct mcx_softc *sc) |
4006 | { |
4007 | struct mcx_cmdq_entry *cqe; |
4008 | struct mcx_cmd_init_hca_in *in; |
4009 | struct mcx_cmd_init_hca_out *out; |
4010 | int error; |
4011 | uint8_t status; |
4012 | |
4013 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4014 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
4015 | |
4016 | in = mcx_cmdq_in(cqe); |
4017 | in->cmd_opcode = htobe16(MCX_CMD_INIT_HCA)(__uint16_t)(__builtin_constant_p(0x102) ? (__uint16_t)(((__uint16_t )(0x102) & 0xffU) << 8 | ((__uint16_t)(0x102) & 0xff00U) >> 8) : __swap16md(0x102)); |
4018 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4019 | |
4020 | mcx_cmdq_post(sc, cqe, 0); |
4021 | |
4022 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4023 | if (error != 0) { |
4024 | printf(", hca init timeout\n"); |
4025 | return (-1); |
4026 | } |
4027 | if (mcx_cmdq_verify(cqe) != 0) { |
4028 | printf(", hca init command corrupt\n"); |
4029 | return (-1); |
4030 | } |
4031 | |
4032 | status = cqe->cq_output_data[0]; |
4033 | if (status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4034 | printf(", hca init failed (%x)\n", status); |
4035 | return (-1); |
4036 | } |
4037 | |
4038 | return (0); |
4039 | } |
4040 | |
4041 | static int |
4042 | mcx_set_driver_version(struct mcx_softc *sc) |
4043 | { |
4044 | struct mcx_dmamem mxm; |
4045 | struct mcx_cmdq_entry *cqe; |
4046 | struct mcx_cmd_set_driver_version_in *in; |
4047 | struct mcx_cmd_set_driver_version_out *out; |
4048 | int error; |
4049 | int token; |
4050 | uint8_t status; |
4051 | |
4052 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4053 | token = mcx_cmdq_token(sc); |
4054 | mcx_cmdq_init(sc, cqe, sizeof(*in) + |
4055 | sizeof(struct mcx_cmd_set_driver_version), sizeof(*out), token); |
4056 | |
4057 | in = mcx_cmdq_in(cqe); |
4058 | in->cmd_opcode = htobe16(MCX_CMD_SET_DRIVER_VERSION)(__uint16_t)(__builtin_constant_p(0x10d) ? (__uint16_t)(((__uint16_t )(0x10d) & 0xffU) << 8 | ((__uint16_t)(0x10d) & 0xff00U) >> 8) : __swap16md(0x10d)); |
4059 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4060 | |
4061 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
4062 | &cqe->cq_input_ptr, token) != 0) { |
4063 | printf(", unable to allocate set driver version mailboxen\n"); |
4064 | return (-1); |
4065 | } |
4066 | strlcpy(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)), |
4067 | "OpenBSD,mcx,1.000.000000", MCX_CMDQ_MAILBOX_DATASIZE512); |
4068 | |
4069 | mcx_cmdq_mboxes_sign(&mxm, 1); |
4070 | mcx_cmdq_post(sc, cqe, 0); |
4071 | |
4072 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4073 | if (error != 0) { |
4074 | printf(", set driver version timeout\n"); |
4075 | goto free; |
4076 | } |
4077 | if (mcx_cmdq_verify(cqe) != 0) { |
4078 | printf(", set driver version command corrupt\n"); |
4079 | goto free; |
4080 | } |
4081 | |
4082 | status = cqe->cq_output_data[0]; |
4083 | if (status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4084 | printf(", set driver version failed (%x)\n", status); |
4085 | error = -1; |
4086 | goto free; |
4087 | } |
4088 | |
4089 | free: |
4090 | mcx_dmamem_free(sc, &mxm); |
4091 | |
4092 | return (error); |
4093 | } |
4094 | |
4095 | static int |
4096 | mcx_iff(struct mcx_softc *sc) |
4097 | { |
4098 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
4099 | struct mcx_dmamem mxm; |
4100 | struct mcx_cmdq_entry *cqe; |
4101 | struct mcx_cmd_modify_nic_vport_context_in *in; |
4102 | struct mcx_cmd_modify_nic_vport_context_out *out; |
4103 | struct mcx_nic_vport_ctx *ctx; |
4104 | int error; |
4105 | int token; |
4106 | int insize; |
4107 | uint32_t dest; |
4108 | |
4109 | dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE(1 << 24) | |
4110 | sc->sc_rss_flow_table_id; |
4111 | |
4112 | /* enable or disable the promisc flow */ |
4113 | if (ISSET(ifp->if_flags, IFF_PROMISC)((ifp->if_flags) & (0x100))) { |
4114 | if (sc->sc_promisc_flow_enabled == 0) { |
4115 | mcx_set_flow_table_entry_mac(sc, |
4116 | MCX_FLOW_GROUP_PROMISC0, 0, NULL((void *)0), dest); |
4117 | sc->sc_promisc_flow_enabled = 1; |
4118 | } |
4119 | } else if (sc->sc_promisc_flow_enabled != 0) { |
4120 | mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC0, 0); |
4121 | sc->sc_promisc_flow_enabled = 0; |
4122 | } |
4123 | |
4124 | /* enable or disable the all-multicast flow */ |
4125 | if (ISSET(ifp->if_flags, IFF_ALLMULTI)((ifp->if_flags) & (0x200))) { |
4126 | if (sc->sc_allmulti_flow_enabled == 0) { |
4127 | uint8_t mcast[ETHER_ADDR_LEN6]; |
4128 | |
4129 | memset(mcast, 0, sizeof(mcast))__builtin_memset((mcast), (0), (sizeof(mcast))); |
4130 | mcast[0] = 0x01; |
4131 | mcx_set_flow_table_entry_mac(sc, |
4132 | MCX_FLOW_GROUP_ALLMULTI1, 0, mcast, dest); |
4133 | sc->sc_allmulti_flow_enabled = 1; |
4134 | } |
4135 | } else if (sc->sc_allmulti_flow_enabled != 0) { |
4136 | mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI1, 0); |
4137 | sc->sc_allmulti_flow_enabled = 0; |
4138 | } |
4139 | |
4140 | insize = sizeof(struct mcx_nic_vport_ctx) + 240; |
4141 | |
4142 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4143 | token = mcx_cmdq_token(sc); |
4144 | mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token); |
4145 | |
4146 | in = mcx_cmdq_in(cqe); |
4147 | in->cmd_opcode = htobe16(MCX_CMD_MODIFY_NIC_VPORT_CONTEXT)(__uint16_t)(__builtin_constant_p(0x755) ? (__uint16_t)(((__uint16_t )(0x755) & 0xffU) << 8 | ((__uint16_t)(0x755) & 0xff00U) >> 8) : __swap16md(0x755)); |
4148 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4149 | in->cmd_field_select = htobe32((__uint32_t)(__builtin_constant_p(0x10 | 0x40) ? (__uint32_t) (((__uint32_t)(0x10 | 0x40) & 0xff) << 24 | ((__uint32_t )(0x10 | 0x40) & 0xff00) << 8 | ((__uint32_t)(0x10 | 0x40) & 0xff0000) >> 8 | ((__uint32_t)(0x10 | 0x40 ) & 0xff000000) >> 24) : __swap32md(0x10 | 0x40)) |
4150 | MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC |(__uint32_t)(__builtin_constant_p(0x10 | 0x40) ? (__uint32_t) (((__uint32_t)(0x10 | 0x40) & 0xff) << 24 | ((__uint32_t )(0x10 | 0x40) & 0xff00) << 8 | ((__uint32_t)(0x10 | 0x40) & 0xff0000) >> 8 | ((__uint32_t)(0x10 | 0x40 ) & 0xff000000) >> 24) : __swap32md(0x10 | 0x40)) |
4151 | MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU)(__uint32_t)(__builtin_constant_p(0x10 | 0x40) ? (__uint32_t) (((__uint32_t)(0x10 | 0x40) & 0xff) << 24 | ((__uint32_t )(0x10 | 0x40) & 0xff00) << 8 | ((__uint32_t)(0x10 | 0x40) & 0xff0000) >> 8 | ((__uint32_t)(0x10 | 0x40 ) & 0xff000000) >> 24) : __swap32md(0x10 | 0x40)); |
4152 | |
4153 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) { |
4154 | printf(", unable to allocate modify " |
4155 | "nic vport context mailboxen\n"); |
4156 | return (-1); |
4157 | } |
4158 | ctx = (struct mcx_nic_vport_ctx *) |
4159 | (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 240); |
4160 | ctx->vp_mtu = htobe32(sc->sc_hardmtu)(__uint32_t)(__builtin_constant_p(sc->sc_hardmtu) ? (__uint32_t )(((__uint32_t)(sc->sc_hardmtu) & 0xff) << 24 | ( (__uint32_t)(sc->sc_hardmtu) & 0xff00) << 8 | (( __uint32_t)(sc->sc_hardmtu) & 0xff0000) >> 8 | ( (__uint32_t)(sc->sc_hardmtu) & 0xff000000) >> 24 ) : __swap32md(sc->sc_hardmtu)); |
4161 | /* |
4162 | * always leave promisc-all enabled on the vport since we |
4163 | * can't give it a vlan list, and we're already doing multicast |
4164 | * filtering in the flow table. |
4165 | */ |
4166 | ctx->vp_flags = htobe16(MCX_NIC_VPORT_CTX_PROMISC_ALL)(__uint16_t)(__builtin_constant_p((1 << 13)) ? (__uint16_t )(((__uint16_t)((1 << 13)) & 0xffU) << 8 | (( __uint16_t)((1 << 13)) & 0xff00U) >> 8) : __swap16md ((1 << 13))); |
4167 | |
4168 | mcx_cmdq_mboxes_sign(&mxm, 1); |
4169 | mcx_cmdq_post(sc, cqe, 0); |
4170 | |
4171 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4172 | if (error != 0) { |
4173 | printf(", modify nic vport context timeout\n"); |
4174 | goto free; |
4175 | } |
4176 | if (mcx_cmdq_verify(cqe) != 0) { |
4177 | printf(", modify nic vport context command corrupt\n"); |
4178 | goto free; |
4179 | } |
4180 | |
4181 | out = mcx_cmdq_out(cqe); |
4182 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4183 | printf(", modify nic vport context failed (%x, %x)\n", |
4184 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
4185 | error = -1; |
4186 | goto free; |
4187 | } |
4188 | |
4189 | free: |
4190 | mcx_dmamem_free(sc, &mxm); |
4191 | |
4192 | return (error); |
4193 | } |
4194 | |
4195 | static int |
4196 | mcx_alloc_uar(struct mcx_softc *sc, int *uar) |
4197 | { |
4198 | struct mcx_cmdq_entry *cqe; |
4199 | struct mcx_cmd_alloc_uar_in *in; |
4200 | struct mcx_cmd_alloc_uar_out *out; |
4201 | int error; |
4202 | |
4203 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4204 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
4205 | |
4206 | in = mcx_cmdq_in(cqe); |
4207 | in->cmd_opcode = htobe16(MCX_CMD_ALLOC_UAR)(__uint16_t)(__builtin_constant_p(0x802) ? (__uint16_t)(((__uint16_t )(0x802) & 0xffU) << 8 | ((__uint16_t)(0x802) & 0xff00U) >> 8) : __swap16md(0x802)); |
4208 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4209 | |
4210 | mcx_cmdq_post(sc, cqe, 0); |
4211 | |
4212 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4213 | if (error != 0) { |
4214 | printf(", alloc uar timeout\n"); |
4215 | return (-1); |
4216 | } |
4217 | if (mcx_cmdq_verify(cqe) != 0) { |
4218 | printf(", alloc uar command corrupt\n"); |
4219 | return (-1); |
4220 | } |
4221 | |
4222 | out = mcx_cmdq_out(cqe); |
4223 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4224 | printf(", alloc uar failed (%x)\n", out->cmd_status); |
4225 | return (-1); |
4226 | } |
4227 | |
4228 | *uar = mcx_get_id(out->cmd_uar); |
4229 | return (0); |
4230 | } |
4231 | |
4232 | static int |
4233 | mcx_create_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar, |
4234 | uint64_t events, int vector) |
4235 | { |
4236 | struct mcx_cmdq_entry *cqe; |
4237 | struct mcx_dmamem mxm; |
4238 | struct mcx_cmd_create_eq_in *in; |
4239 | struct mcx_cmd_create_eq_mb_in *mbin; |
4240 | struct mcx_cmd_create_eq_out *out; |
4241 | struct mcx_eq_entry *eqe; |
4242 | int error; |
4243 | uint64_t *pas; |
4244 | int insize, npages, paslen, i, token; |
4245 | |
4246 | eq->eq_cons = 0; |
4247 | |
4248 | npages = howmany((1 << MCX_LOG_EQ_SIZE) * sizeof(struct mcx_eq_entry),((((1 << 7) * sizeof(struct mcx_eq_entry)) + (((1 << 12)) - 1)) / ((1 << 12))) |
4249 | MCX_PAGE_SIZE)((((1 << 7) * sizeof(struct mcx_eq_entry)) + (((1 << 12)) - 1)) / ((1 << 12))); |
4250 | paslen = npages * sizeof(*pas); |
4251 | insize = sizeof(struct mcx_cmd_create_eq_mb_in) + paslen; |
4252 | |
4253 | if (mcx_dmamem_alloc(sc, &eq->eq_mem, npages * MCX_PAGE_SIZE(1 << 12), |
4254 | MCX_PAGE_SIZE(1 << 12)) != 0) { |
4255 | printf(", unable to allocate event queue memory\n"); |
4256 | return (-1); |
4257 | } |
4258 | |
4259 | eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem)((void *)(&eq->eq_mem)->mxm_kva); |
4260 | for (i = 0; i < (1 << MCX_LOG_EQ_SIZE7); i++) { |
4261 | eqe[i].eq_owner = MCX_EQ_ENTRY_OWNER_INIT1; |
4262 | } |
4263 | |
4264 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4265 | token = mcx_cmdq_token(sc); |
4266 | mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token); |
4267 | |
4268 | in = mcx_cmdq_in(cqe); |
4269 | in->cmd_opcode = htobe16(MCX_CMD_CREATE_EQ)(__uint16_t)(__builtin_constant_p(0x301) ? (__uint16_t)(((__uint16_t )(0x301) & 0xffU) << 8 | ((__uint16_t)(0x301) & 0xff00U) >> 8) : __swap16md(0x301)); |
4270 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4271 | |
4272 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, |
4273 | howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE)(((insize) + ((512) - 1)) / (512)), |
4274 | &cqe->cq_input_ptr, token) != 0) { |
4275 | printf(", unable to allocate create eq mailboxen\n"); |
4276 | goto free_eq; |
4277 | } |
4278 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
4279 | mbin->cmd_eq_ctx.eq_uar_size = htobe32((__uint32_t)(__builtin_constant_p((7 << 24) | uar) ? (__uint32_t )(((__uint32_t)((7 << 24) | uar) & 0xff) << 24 | ((__uint32_t)((7 << 24) | uar) & 0xff00) << 8 | ((__uint32_t)((7 << 24) | uar) & 0xff0000) >> 8 | ((__uint32_t)((7 << 24) | uar) & 0xff000000) >> 24) : __swap32md((7 << 24) | uar)) |
4280 | (MCX_LOG_EQ_SIZE << MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT) | uar)(__uint32_t)(__builtin_constant_p((7 << 24) | uar) ? (__uint32_t )(((__uint32_t)((7 << 24) | uar) & 0xff) << 24 | ((__uint32_t)((7 << 24) | uar) & 0xff00) << 8 | ((__uint32_t)((7 << 24) | uar) & 0xff0000) >> 8 | ((__uint32_t)((7 << 24) | uar) & 0xff000000) >> 24) : __swap32md((7 << 24) | uar)); |
4281 | mbin->cmd_eq_ctx.eq_intr = vector; |
4282 | mbin->cmd_event_bitmask = htobe64(events)(__uint64_t)(__builtin_constant_p(events) ? (__uint64_t)((((__uint64_t )(events) & 0xff) << 56) | ((__uint64_t)(events) & 0xff00ULL) << 40 | ((__uint64_t)(events) & 0xff0000ULL ) << 24 | ((__uint64_t)(events) & 0xff000000ULL) << 8 | ((__uint64_t)(events) & 0xff00000000ULL) >> 8 | ((__uint64_t)(events) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(events) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(events) & 0xff00000000000000ULL) >> 56) : __swap64md(events)); |
4283 | |
4284 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& eq->eq_mem)->mxm_map)), (0), (((&eq->eq_mem)-> mxm_size)), (0x01)) |
4285 | 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& eq->eq_mem)->mxm_map)), (0), (((&eq->eq_mem)-> mxm_size)), (0x01)); |
4286 | |
4287 | /* physical addresses follow the mailbox in data */ |
4288 | mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &eq->eq_mem); |
4289 | mcx_cmdq_mboxes_sign(&mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE)(((insize) + ((512) - 1)) / (512))); |
4290 | mcx_cmdq_post(sc, cqe, 0); |
4291 | |
4292 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4293 | if (error != 0) { |
4294 | printf(", create eq timeout\n"); |
4295 | goto free_mxm; |
4296 | } |
4297 | if (mcx_cmdq_verify(cqe) != 0) { |
4298 | printf(", create eq command corrupt\n"); |
4299 | goto free_mxm; |
4300 | } |
4301 | |
4302 | out = mcx_cmdq_out(cqe); |
4303 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4304 | printf(", create eq failed (%x, %x)\n", out->cmd_status, |
4305 | betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
4306 | goto free_mxm; |
4307 | } |
4308 | |
4309 | eq->eq_n = mcx_get_id(out->cmd_eqn); |
4310 | |
4311 | mcx_dmamem_free(sc, &mxm); |
4312 | |
4313 | mcx_arm_eq(sc, eq, uar); |
4314 | |
4315 | return (0); |
4316 | |
4317 | free_mxm: |
4318 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& eq->eq_mem)->mxm_map)), (0), (((&eq->eq_mem)-> mxm_size)), (0x02)) |
4319 | 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& eq->eq_mem)->mxm_map)), (0), (((&eq->eq_mem)-> mxm_size)), (0x02)); |
4320 | mcx_dmamem_free(sc, &mxm); |
4321 | free_eq: |
4322 | mcx_dmamem_free(sc, &eq->eq_mem); |
4323 | return (-1); |
4324 | } |
4325 | |
4326 | static int |
4327 | mcx_alloc_pd(struct mcx_softc *sc) |
4328 | { |
4329 | struct mcx_cmdq_entry *cqe; |
4330 | struct mcx_cmd_alloc_pd_in *in; |
4331 | struct mcx_cmd_alloc_pd_out *out; |
4332 | int error; |
4333 | |
4334 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4335 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
4336 | |
4337 | in = mcx_cmdq_in(cqe); |
4338 | in->cmd_opcode = htobe16(MCX_CMD_ALLOC_PD)(__uint16_t)(__builtin_constant_p(0x800) ? (__uint16_t)(((__uint16_t )(0x800) & 0xffU) << 8 | ((__uint16_t)(0x800) & 0xff00U) >> 8) : __swap16md(0x800)); |
4339 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4340 | |
4341 | mcx_cmdq_post(sc, cqe, 0); |
4342 | |
4343 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4344 | if (error != 0) { |
4345 | printf(", alloc pd timeout\n"); |
4346 | return (-1); |
4347 | } |
4348 | if (mcx_cmdq_verify(cqe) != 0) { |
4349 | printf(", alloc pd command corrupt\n"); |
4350 | return (-1); |
4351 | } |
4352 | |
4353 | out = mcx_cmdq_out(cqe); |
4354 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4355 | printf(", alloc pd failed (%x)\n", out->cmd_status); |
4356 | return (-1); |
4357 | } |
4358 | |
4359 | sc->sc_pd = mcx_get_id(out->cmd_pd); |
4360 | return (0); |
4361 | } |
4362 | |
4363 | static int |
4364 | mcx_alloc_tdomain(struct mcx_softc *sc) |
4365 | { |
4366 | struct mcx_cmdq_entry *cqe; |
4367 | struct mcx_cmd_alloc_td_in *in; |
4368 | struct mcx_cmd_alloc_td_out *out; |
4369 | int error; |
4370 | |
4371 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4372 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
4373 | |
4374 | in = mcx_cmdq_in(cqe); |
4375 | in->cmd_opcode = htobe16(MCX_CMD_ALLOC_TRANSPORT_DOMAIN)(__uint16_t)(__builtin_constant_p(0x816) ? (__uint16_t)(((__uint16_t )(0x816) & 0xffU) << 8 | ((__uint16_t)(0x816) & 0xff00U) >> 8) : __swap16md(0x816)); |
4376 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4377 | |
4378 | mcx_cmdq_post(sc, cqe, 0); |
4379 | |
4380 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4381 | if (error != 0) { |
4382 | printf(", alloc transport domain timeout\n"); |
4383 | return (-1); |
4384 | } |
4385 | if (mcx_cmdq_verify(cqe) != 0) { |
4386 | printf(", alloc transport domain command corrupt\n"); |
4387 | return (-1); |
4388 | } |
4389 | |
4390 | out = mcx_cmdq_out(cqe); |
4391 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4392 | printf(", alloc transport domain failed (%x)\n", |
4393 | out->cmd_status); |
4394 | return (-1); |
4395 | } |
4396 | |
4397 | sc->sc_tdomain = mcx_get_id(out->cmd_tdomain); |
4398 | return (0); |
4399 | } |
4400 | |
4401 | static int |
4402 | mcx_query_nic_vport_context(struct mcx_softc *sc) |
4403 | { |
4404 | struct mcx_dmamem mxm; |
4405 | struct mcx_cmdq_entry *cqe; |
4406 | struct mcx_cmd_query_nic_vport_context_in *in; |
4407 | struct mcx_cmd_query_nic_vport_context_out *out; |
4408 | struct mcx_nic_vport_ctx *ctx; |
4409 | uint8_t *addr; |
4410 | int error, token, i; |
4411 | |
4412 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4413 | token = mcx_cmdq_token(sc); |
4414 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx), token); |
4415 | |
4416 | in = mcx_cmdq_in(cqe); |
4417 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_NIC_VPORT_CONTEXT)(__uint16_t)(__builtin_constant_p(0x754) ? (__uint16_t)(((__uint16_t )(0x754) & 0xffU) << 8 | ((__uint16_t)(0x754) & 0xff00U) >> 8) : __swap16md(0x754)); |
4418 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4419 | in->cmd_allowed_list_type = 0; |
4420 | |
4421 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
4422 | &cqe->cq_output_ptr, token) != 0) { |
4423 | printf(", unable to allocate " |
4424 | "query nic vport context mailboxen\n"); |
4425 | return (-1); |
4426 | } |
4427 | mcx_cmdq_mboxes_sign(&mxm, 1); |
4428 | mcx_cmdq_post(sc, cqe, 0); |
4429 | |
4430 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4431 | if (error != 0) { |
4432 | printf(", query nic vport context timeout\n"); |
4433 | goto free; |
4434 | } |
4435 | if (mcx_cmdq_verify(cqe) != 0) { |
4436 | printf(", query nic vport context command corrupt\n"); |
4437 | goto free; |
4438 | } |
4439 | |
4440 | out = mcx_cmdq_out(cqe); |
4441 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4442 | printf(", query nic vport context failed (%x, %x)\n", |
4443 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
4444 | error = -1; |
4445 | goto free; |
4446 | } |
4447 | |
4448 | ctx = (struct mcx_nic_vport_ctx *) |
4449 | mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
4450 | addr = (uint8_t *)&ctx->vp_perm_addr; |
4451 | for (i = 0; i < ETHER_ADDR_LEN6; i++) { |
4452 | sc->sc_ac.ac_enaddr[i] = addr[i + 2]; |
4453 | } |
4454 | free: |
4455 | mcx_dmamem_free(sc, &mxm); |
4456 | |
4457 | return (error); |
4458 | } |
4459 | |
4460 | static int |
4461 | mcx_query_special_contexts(struct mcx_softc *sc) |
4462 | { |
4463 | struct mcx_cmdq_entry *cqe; |
4464 | struct mcx_cmd_query_special_ctx_in *in; |
4465 | struct mcx_cmd_query_special_ctx_out *out; |
4466 | int error; |
4467 | |
4468 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4469 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
4470 | |
4471 | in = mcx_cmdq_in(cqe); |
4472 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_SPECIAL_CONTEXTS)(__uint16_t)(__builtin_constant_p(0x203) ? (__uint16_t)(((__uint16_t )(0x203) & 0xffU) << 8 | ((__uint16_t)(0x203) & 0xff00U) >> 8) : __swap16md(0x203)); |
4473 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4474 | |
4475 | mcx_cmdq_post(sc, cqe, 0); |
4476 | |
4477 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4478 | if (error != 0) { |
4479 | printf(", query special contexts timeout\n"); |
4480 | return (-1); |
4481 | } |
4482 | if (mcx_cmdq_verify(cqe) != 0) { |
4483 | printf(", query special contexts command corrupt\n"); |
4484 | return (-1); |
4485 | } |
4486 | |
4487 | out = mcx_cmdq_out(cqe); |
4488 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4489 | printf(", query special contexts failed (%x)\n", |
4490 | out->cmd_status); |
4491 | return (-1); |
4492 | } |
4493 | |
4494 | sc->sc_lkey = betoh32(out->cmd_resd_lkey)(__uint32_t)(__builtin_constant_p(out->cmd_resd_lkey) ? (__uint32_t )(((__uint32_t)(out->cmd_resd_lkey) & 0xff) << 24 | ((__uint32_t)(out->cmd_resd_lkey) & 0xff00) << 8 | ((__uint32_t)(out->cmd_resd_lkey) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_resd_lkey) & 0xff000000) >> 24) : __swap32md(out->cmd_resd_lkey)); |
4495 | return (0); |
4496 | } |
4497 | |
4498 | static int |
4499 | mcx_set_port_mtu(struct mcx_softc *sc, int mtu) |
4500 | { |
4501 | struct mcx_reg_pmtu pmtu; |
4502 | int error; |
4503 | |
4504 | /* read max mtu */ |
4505 | memset(&pmtu, 0, sizeof(pmtu))__builtin_memset((&pmtu), (0), (sizeof(pmtu))); |
4506 | pmtu.rp_local_port = 1; |
4507 | error = mcx_access_hca_reg(sc, MCX_REG_PMTU0x5003, MCX_REG_OP_READ1, &pmtu, |
4508 | sizeof(pmtu)); |
4509 | if (error != 0) { |
4510 | printf(", unable to get port MTU\n"); |
4511 | return error; |
4512 | } |
4513 | |
4514 | mtu = min(mtu, betoh16(pmtu.rp_max_mtu)(__uint16_t)(__builtin_constant_p(pmtu.rp_max_mtu) ? (__uint16_t )(((__uint16_t)(pmtu.rp_max_mtu) & 0xffU) << 8 | (( __uint16_t)(pmtu.rp_max_mtu) & 0xff00U) >> 8) : __swap16md (pmtu.rp_max_mtu))); |
4515 | pmtu.rp_admin_mtu = htobe16(mtu)(__uint16_t)(__builtin_constant_p(mtu) ? (__uint16_t)(((__uint16_t )(mtu) & 0xffU) << 8 | ((__uint16_t)(mtu) & 0xff00U ) >> 8) : __swap16md(mtu)); |
4516 | error = mcx_access_hca_reg(sc, MCX_REG_PMTU0x5003, MCX_REG_OP_WRITE0, &pmtu, |
4517 | sizeof(pmtu)); |
4518 | if (error != 0) { |
4519 | printf(", unable to set port MTU\n"); |
4520 | return error; |
4521 | } |
4522 | |
4523 | sc->sc_hardmtu = mtu; |
4524 | sc->sc_rxbufsz = roundup(mtu + ETHER_ALIGN, sizeof(long))((((mtu + 2)+((sizeof(long))-1))/(sizeof(long)))*(sizeof(long ))); |
4525 | return 0; |
4526 | } |
4527 | |
4528 | static int |
4529 | mcx_create_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar, int db, int eqn) |
4530 | { |
4531 | struct mcx_cmdq_entry *cmde; |
4532 | struct mcx_cq_entry *cqe; |
4533 | struct mcx_dmamem mxm; |
4534 | struct mcx_cmd_create_cq_in *in; |
4535 | struct mcx_cmd_create_cq_mb_in *mbin; |
4536 | struct mcx_cmd_create_cq_out *out; |
4537 | int error; |
4538 | uint64_t *pas; |
4539 | int insize, npages, paslen, i, token; |
4540 | |
4541 | cq->cq_doorbell = MCX_CQ_DOORBELL_BASE0 + (MCX_CQ_DOORBELL_STRIDE64 * db); |
4542 | |
4543 | npages = howmany((1 << MCX_LOG_CQ_SIZE) * sizeof(struct mcx_cq_entry),((((1 << 12) * sizeof(struct mcx_cq_entry)) + (((1 << 12)) - 1)) / ((1 << 12))) |
4544 | MCX_PAGE_SIZE)((((1 << 12) * sizeof(struct mcx_cq_entry)) + (((1 << 12)) - 1)) / ((1 << 12))); |
4545 | paslen = npages * sizeof(*pas); |
4546 | insize = sizeof(struct mcx_cmd_create_cq_mb_in) + paslen; |
4547 | |
4548 | if (mcx_dmamem_alloc(sc, &cq->cq_mem, npages * MCX_PAGE_SIZE(1 << 12), |
4549 | MCX_PAGE_SIZE(1 << 12)) != 0) { |
4550 | printf("%s: unable to allocate completion queue memory\n", |
4551 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4552 | return (-1); |
4553 | } |
4554 | cqe = MCX_DMA_KVA(&cq->cq_mem)((void *)(&cq->cq_mem)->mxm_kva); |
4555 | for (i = 0; i < (1 << MCX_LOG_CQ_SIZE12); i++) { |
4556 | cqe[i].cq_opcode_owner = MCX_CQ_ENTRY_FLAG_OWNER(1 << 0); |
4557 | } |
4558 | |
4559 | cmde = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4560 | token = mcx_cmdq_token(sc); |
4561 | mcx_cmdq_init(sc, cmde, sizeof(*in) + insize, sizeof(*out), token); |
4562 | |
4563 | in = mcx_cmdq_in(cmde); |
4564 | in->cmd_opcode = htobe16(MCX_CMD_CREATE_CQ)(__uint16_t)(__builtin_constant_p(0x400) ? (__uint16_t)(((__uint16_t )(0x400) & 0xffU) << 8 | ((__uint16_t)(0x400) & 0xff00U) >> 8) : __swap16md(0x400)); |
4565 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4566 | |
4567 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, |
4568 | howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE)(((insize) + ((512) - 1)) / (512)), |
4569 | &cmde->cq_input_ptr, token) != 0) { |
4570 | printf("%s: unable to allocate create cq mailboxen\n", |
4571 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4572 | goto free_cq; |
4573 | } |
4574 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
4575 | mbin->cmd_cq_ctx.cq_uar_size = htobe32((__uint32_t)(__builtin_constant_p((12 << 24) | uar) ? ( __uint32_t)(((__uint32_t)((12 << 24) | uar) & 0xff) << 24 | ((__uint32_t)((12 << 24) | uar) & 0xff00 ) << 8 | ((__uint32_t)((12 << 24) | uar) & 0xff0000 ) >> 8 | ((__uint32_t)((12 << 24) | uar) & 0xff000000 ) >> 24) : __swap32md((12 << 24) | uar)) |
4576 | (MCX_LOG_CQ_SIZE << MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT) | uar)(__uint32_t)(__builtin_constant_p((12 << 24) | uar) ? ( __uint32_t)(((__uint32_t)((12 << 24) | uar) & 0xff) << 24 | ((__uint32_t)((12 << 24) | uar) & 0xff00 ) << 8 | ((__uint32_t)((12 << 24) | uar) & 0xff0000 ) >> 8 | ((__uint32_t)((12 << 24) | uar) & 0xff000000 ) >> 24) : __swap32md((12 << 24) | uar)); |
4577 | mbin->cmd_cq_ctx.cq_eqn = htobe32(eqn)(__uint32_t)(__builtin_constant_p(eqn) ? (__uint32_t)(((__uint32_t )(eqn) & 0xff) << 24 | ((__uint32_t)(eqn) & 0xff00 ) << 8 | ((__uint32_t)(eqn) & 0xff0000) >> 8 | ((__uint32_t)(eqn) & 0xff000000) >> 24) : __swap32md (eqn)); |
4578 | mbin->cmd_cq_ctx.cq_period_max_count = htobe32((__uint32_t)(__builtin_constant_p((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) ? (__uint32_t)(((__uint32_t)((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) & 0xff) << 24 | ((__uint32_t)((50 << 16) | (((1 << (12 - 1) ) * 9) / 10)) & 0xff00) << 8 | ((__uint32_t)((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) & 0xff0000) >> 8 | ((__uint32_t)((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) & 0xff000000) >> 24) : __swap32md((50 << 16) | (((1 << (12 - 1)) * 9) / 10))) |
4579 | (MCX_CQ_MOD_PERIOD << MCX_CQ_CTX_PERIOD_SHIFT) |(__uint32_t)(__builtin_constant_p((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) ? (__uint32_t)(((__uint32_t)((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) & 0xff) << 24 | ((__uint32_t)((50 << 16) | (((1 << (12 - 1) ) * 9) / 10)) & 0xff00) << 8 | ((__uint32_t)((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) & 0xff0000) >> 8 | ((__uint32_t)((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) & 0xff000000) >> 24) : __swap32md((50 << 16) | (((1 << (12 - 1)) * 9) / 10))) |
4580 | MCX_CQ_MOD_COUNTER)(__uint32_t)(__builtin_constant_p((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) ? (__uint32_t)(((__uint32_t)((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) & 0xff) << 24 | ((__uint32_t)((50 << 16) | (((1 << (12 - 1) ) * 9) / 10)) & 0xff00) << 8 | ((__uint32_t)((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) & 0xff0000) >> 8 | ((__uint32_t)((50 << 16) | (((1 << (12 - 1)) * 9) / 10)) & 0xff000000) >> 24) : __swap32md((50 << 16) | (((1 << (12 - 1)) * 9) / 10))); |
4581 | mbin->cmd_cq_ctx.cq_doorbell = htobe64((__uint64_t)(__builtin_constant_p(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) ? (__uint64_t)((((__uint64_t)(((&sc->sc_doorbell_mem)-> mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff ) << 56) | ((__uint64_t)(((&sc->sc_doorbell_mem) ->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff00ULL) << 40 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff0000ULL) << 24 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff000000ULL) << 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff00000000ULL) >> 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(((&sc-> sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr) + cq-> cq_doorbell) & 0xff000000000000ULL) >> 40 | ((__uint64_t )(((&sc->sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr ) + cq->cq_doorbell) & 0xff00000000000000ULL) >> 56) : __swap64md(((&sc->sc_doorbell_mem)->mxm_map-> dm_segs[0].ds_addr) + cq->cq_doorbell)) |
4582 | MCX_DMA_DVA(&sc->sc_doorbell_mem) + cq->cq_doorbell)(__uint64_t)(__builtin_constant_p(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) ? (__uint64_t)((((__uint64_t)(((&sc->sc_doorbell_mem)-> mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff ) << 56) | ((__uint64_t)(((&sc->sc_doorbell_mem) ->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff00ULL) << 40 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff0000ULL) << 24 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff000000ULL) << 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff00000000ULL) >> 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + cq->cq_doorbell) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(((&sc-> sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr) + cq-> cq_doorbell) & 0xff000000000000ULL) >> 40 | ((__uint64_t )(((&sc->sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr ) + cq->cq_doorbell) & 0xff00000000000000ULL) >> 56) : __swap64md(((&sc->sc_doorbell_mem)->mxm_map-> dm_segs[0].ds_addr) + cq->cq_doorbell)); |
4583 | |
4584 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& cq->cq_mem)->mxm_map)), (0), (((&cq->cq_mem)-> mxm_size)), (0x01)) |
4585 | 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& cq->cq_mem)->mxm_map)), (0), (((&cq->cq_mem)-> mxm_size)), (0x01)); |
4586 | |
4587 | /* physical addresses follow the mailbox in data */ |
4588 | mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &cq->cq_mem); |
4589 | mcx_cmdq_post(sc, cmde, 0); |
4590 | |
4591 | error = mcx_cmdq_poll(sc, cmde, 1000); |
4592 | if (error != 0) { |
4593 | printf("%s: create cq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4594 | goto free_mxm; |
4595 | } |
4596 | if (mcx_cmdq_verify(cmde) != 0) { |
4597 | printf("%s: create cq command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4598 | goto free_mxm; |
4599 | } |
4600 | |
4601 | out = mcx_cmdq_out(cmde); |
4602 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4603 | printf("%s: create cq failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
4604 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
4605 | goto free_mxm; |
4606 | } |
4607 | |
4608 | cq->cq_n = mcx_get_id(out->cmd_cqn); |
4609 | cq->cq_cons = 0; |
4610 | cq->cq_count = 0; |
4611 | |
4612 | mcx_dmamem_free(sc, &mxm); |
4613 | |
4614 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (cq->cq_doorbell), ( sizeof(struct mcx_cq_doorbell)), (0x04)) |
4615 | cq->cq_doorbell, sizeof(struct mcx_cq_doorbell),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (cq->cq_doorbell), ( sizeof(struct mcx_cq_doorbell)), (0x04)) |
4616 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (cq->cq_doorbell), ( sizeof(struct mcx_cq_doorbell)), (0x04)); |
4617 | |
4618 | mcx_arm_cq(sc, cq, uar); |
4619 | |
4620 | return (0); |
4621 | |
4622 | free_mxm: |
4623 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& cq->cq_mem)->mxm_map)), (0), (((&cq->cq_mem)-> mxm_size)), (0x02)) |
4624 | 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& cq->cq_mem)->mxm_map)), (0), (((&cq->cq_mem)-> mxm_size)), (0x02)); |
4625 | mcx_dmamem_free(sc, &mxm); |
4626 | free_cq: |
4627 | mcx_dmamem_free(sc, &cq->cq_mem); |
4628 | return (-1); |
4629 | } |
4630 | |
4631 | static int |
4632 | mcx_destroy_cq(struct mcx_softc *sc, struct mcx_cq *cq) |
4633 | { |
4634 | struct mcx_cmdq_entry *cqe; |
4635 | struct mcx_cmd_destroy_cq_in *in; |
4636 | struct mcx_cmd_destroy_cq_out *out; |
4637 | int error; |
4638 | int token; |
4639 | |
4640 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4641 | token = mcx_cmdq_token(sc); |
4642 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token); |
4643 | |
4644 | in = mcx_cmdq_in(cqe); |
4645 | in->cmd_opcode = htobe16(MCX_CMD_DESTROY_CQ)(__uint16_t)(__builtin_constant_p(0x401) ? (__uint16_t)(((__uint16_t )(0x401) & 0xffU) << 8 | ((__uint16_t)(0x401) & 0xff00U) >> 8) : __swap16md(0x401)); |
4646 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4647 | in->cmd_cqn = htobe32(cq->cq_n)(__uint32_t)(__builtin_constant_p(cq->cq_n) ? (__uint32_t) (((__uint32_t)(cq->cq_n) & 0xff) << 24 | ((__uint32_t )(cq->cq_n) & 0xff00) << 8 | ((__uint32_t)(cq-> cq_n) & 0xff0000) >> 8 | ((__uint32_t)(cq->cq_n) & 0xff000000) >> 24) : __swap32md(cq->cq_n)); |
4648 | |
4649 | mcx_cmdq_post(sc, cqe, 0); |
4650 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4651 | if (error != 0) { |
4652 | printf("%s: destroy cq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4653 | return error; |
4654 | } |
4655 | if (mcx_cmdq_verify(cqe) != 0) { |
4656 | printf("%s: destroy cq command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4657 | return error; |
4658 | } |
4659 | |
4660 | out = mcx_cmdq_out(cqe); |
4661 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4662 | printf("%s: destroy cq failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
4663 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
4664 | return -1; |
4665 | } |
4666 | |
4667 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (cq->cq_doorbell), ( sizeof(struct mcx_cq_doorbell)), (0x08)) |
4668 | cq->cq_doorbell, sizeof(struct mcx_cq_doorbell),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (cq->cq_doorbell), ( sizeof(struct mcx_cq_doorbell)), (0x08)) |
4669 | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (cq->cq_doorbell), ( sizeof(struct mcx_cq_doorbell)), (0x08)); |
4670 | |
4671 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& cq->cq_mem)->mxm_map)), (0), (((&cq->cq_mem)-> mxm_size)), (0x02)) |
4672 | 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& cq->cq_mem)->mxm_map)), (0), (((&cq->cq_mem)-> mxm_size)), (0x02)); |
4673 | mcx_dmamem_free(sc, &cq->cq_mem); |
4674 | |
4675 | cq->cq_n = 0; |
4676 | cq->cq_cons = 0; |
4677 | cq->cq_count = 0; |
4678 | return 0; |
4679 | } |
4680 | |
4681 | static int |
4682 | mcx_create_rq(struct mcx_softc *sc, struct mcx_rx *rx, int db, int cqn) |
4683 | { |
4684 | struct mcx_cmdq_entry *cqe; |
4685 | struct mcx_dmamem mxm; |
4686 | struct mcx_cmd_create_rq_in *in; |
4687 | struct mcx_cmd_create_rq_out *out; |
4688 | struct mcx_rq_ctx *mbin; |
4689 | int error; |
4690 | uint64_t *pas; |
4691 | uint32_t rq_flags; |
4692 | int insize, npages, paslen, token; |
4693 | |
4694 | rx->rx_doorbell = MCX_WQ_DOORBELL_BASE(1 << 12)/2 + |
4695 | (db * MCX_WQ_DOORBELL_STRIDE64); |
4696 | |
4697 | npages = howmany((1 << MCX_LOG_RQ_SIZE) * sizeof(struct mcx_rq_entry),((((1 << 10) * sizeof(struct mcx_rq_entry)) + (((1 << 12)) - 1)) / ((1 << 12))) |
4698 | MCX_PAGE_SIZE)((((1 << 10) * sizeof(struct mcx_rq_entry)) + (((1 << 12)) - 1)) / ((1 << 12))); |
4699 | paslen = npages * sizeof(*pas); |
4700 | insize = 0x10 + sizeof(struct mcx_rq_ctx) + paslen; |
4701 | |
4702 | if (mcx_dmamem_alloc(sc, &rx->rx_rq_mem, npages * MCX_PAGE_SIZE(1 << 12), |
4703 | MCX_PAGE_SIZE(1 << 12)) != 0) { |
4704 | printf("%s: unable to allocate receive queue memory\n", |
4705 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4706 | return (-1); |
4707 | } |
4708 | |
4709 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4710 | token = mcx_cmdq_token(sc); |
4711 | mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token); |
4712 | |
4713 | in = mcx_cmdq_in(cqe); |
4714 | in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQ)(__uint16_t)(__builtin_constant_p(0x908) ? (__uint16_t)(((__uint16_t )(0x908) & 0xffU) << 8 | ((__uint16_t)(0x908) & 0xff00U) >> 8) : __swap16md(0x908)); |
4715 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4716 | |
4717 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, |
4718 | howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE)(((insize) + ((512) - 1)) / (512)), |
4719 | &cqe->cq_input_ptr, token) != 0) { |
4720 | printf("%s: unable to allocate create rq mailboxen\n", |
4721 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4722 | goto free_rq; |
4723 | } |
4724 | mbin = (struct mcx_rq_ctx *) |
4725 | (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10); |
4726 | rq_flags = MCX_RQ_CTX_RLKEY(1 << 31); |
4727 | #if NVLAN1 == 0 |
4728 | rq_flags |= MCX_RQ_CTX_VLAN_STRIP_DIS(1 << 28); |
4729 | #endif |
4730 | mbin->rq_flags = htobe32(rq_flags)(__uint32_t)(__builtin_constant_p(rq_flags) ? (__uint32_t)((( __uint32_t)(rq_flags) & 0xff) << 24 | ((__uint32_t) (rq_flags) & 0xff00) << 8 | ((__uint32_t)(rq_flags) & 0xff0000) >> 8 | ((__uint32_t)(rq_flags) & 0xff000000 ) >> 24) : __swap32md(rq_flags)); |
4731 | mbin->rq_cqn = htobe32(cqn)(__uint32_t)(__builtin_constant_p(cqn) ? (__uint32_t)(((__uint32_t )(cqn) & 0xff) << 24 | ((__uint32_t)(cqn) & 0xff00 ) << 8 | ((__uint32_t)(cqn) & 0xff0000) >> 8 | ((__uint32_t)(cqn) & 0xff000000) >> 24) : __swap32md (cqn)); |
4732 | mbin->rq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC(1 << 4); |
4733 | mbin->rq_wq.wq_pd = htobe32(sc->sc_pd)(__uint32_t)(__builtin_constant_p(sc->sc_pd) ? (__uint32_t )(((__uint32_t)(sc->sc_pd) & 0xff) << 24 | ((__uint32_t )(sc->sc_pd) & 0xff00) << 8 | ((__uint32_t)(sc-> sc_pd) & 0xff0000) >> 8 | ((__uint32_t)(sc->sc_pd ) & 0xff000000) >> 24) : __swap32md(sc->sc_pd)); |
4734 | mbin->rq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +(__uint64_t)(__builtin_constant_p(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) ? (__uint64_t)((((__uint64_t)(((&sc->sc_doorbell_mem)-> mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff ) << 56) | ((__uint64_t)(((&sc->sc_doorbell_mem) ->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff00ULL) << 40 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff0000ULL) << 24 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff000000ULL) << 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff00000000ULL) >> 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(((&sc-> sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr) + rx-> rx_doorbell) & 0xff000000000000ULL) >> 40 | ((__uint64_t )(((&sc->sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr ) + rx->rx_doorbell) & 0xff00000000000000ULL) >> 56) : __swap64md(((&sc->sc_doorbell_mem)->mxm_map-> dm_segs[0].ds_addr) + rx->rx_doorbell)) |
4735 | rx->rx_doorbell)(__uint64_t)(__builtin_constant_p(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) ? (__uint64_t)((((__uint64_t)(((&sc->sc_doorbell_mem)-> mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff ) << 56) | ((__uint64_t)(((&sc->sc_doorbell_mem) ->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff00ULL) << 40 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff0000ULL) << 24 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff000000ULL) << 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff00000000ULL) >> 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + rx->rx_doorbell) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(((&sc-> sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr) + rx-> rx_doorbell) & 0xff000000000000ULL) >> 40 | ((__uint64_t )(((&sc->sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr ) + rx->rx_doorbell) & 0xff00000000000000ULL) >> 56) : __swap64md(((&sc->sc_doorbell_mem)->mxm_map-> dm_segs[0].ds_addr) + rx->rx_doorbell)); |
4736 | mbin->rq_wq.wq_log_stride = htobe16(4)(__uint16_t)(__builtin_constant_p(4) ? (__uint16_t)(((__uint16_t )(4) & 0xffU) << 8 | ((__uint16_t)(4) & 0xff00U ) >> 8) : __swap16md(4)); |
4737 | mbin->rq_wq.wq_log_size = MCX_LOG_RQ_SIZE10; |
4738 | |
4739 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rx->rx_rq_mem)->mxm_map)), (0), (((&rx->rx_rq_mem )->mxm_size)), (0x04)) |
4740 | 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rx->rx_rq_mem)->mxm_map)), (0), (((&rx->rx_rq_mem )->mxm_size)), (0x04)); |
4741 | |
4742 | /* physical addresses follow the mailbox in data */ |
4743 | mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10, npages, &rx->rx_rq_mem); |
4744 | mcx_cmdq_post(sc, cqe, 0); |
4745 | |
4746 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4747 | if (error != 0) { |
4748 | printf("%s: create rq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4749 | goto free_mxm; |
4750 | } |
4751 | if (mcx_cmdq_verify(cqe) != 0) { |
4752 | printf("%s: create rq command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4753 | goto free_mxm; |
4754 | } |
4755 | |
4756 | out = mcx_cmdq_out(cqe); |
4757 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4758 | printf("%s: create rq failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
4759 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
4760 | goto free_mxm; |
4761 | } |
4762 | |
4763 | rx->rx_rqn = mcx_get_id(out->cmd_rqn); |
4764 | |
4765 | mcx_dmamem_free(sc, &mxm); |
4766 | |
4767 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (rx->rx_doorbell), ( sizeof(uint32_t)), (0x04)) |
4768 | rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (rx->rx_doorbell), ( sizeof(uint32_t)), (0x04)); |
4769 | |
4770 | return (0); |
4771 | |
4772 | free_mxm: |
4773 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rx->rx_rq_mem)->mxm_map)), (0), (((&rx->rx_rq_mem )->mxm_size)), (0x08)) |
4774 | 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rx->rx_rq_mem)->mxm_map)), (0), (((&rx->rx_rq_mem )->mxm_size)), (0x08)); |
4775 | mcx_dmamem_free(sc, &mxm); |
4776 | free_rq: |
4777 | mcx_dmamem_free(sc, &rx->rx_rq_mem); |
4778 | return (-1); |
4779 | } |
4780 | |
4781 | static int |
4782 | mcx_ready_rq(struct mcx_softc *sc, struct mcx_rx *rx) |
4783 | { |
4784 | struct mcx_cmdq_entry *cqe; |
4785 | struct mcx_dmamem mxm; |
4786 | struct mcx_cmd_modify_rq_in *in; |
4787 | struct mcx_cmd_modify_rq_mb_in *mbin; |
4788 | struct mcx_cmd_modify_rq_out *out; |
4789 | int error; |
4790 | int token; |
4791 | |
4792 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4793 | token = mcx_cmdq_token(sc); |
4794 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), |
4795 | sizeof(*out), token); |
4796 | |
4797 | in = mcx_cmdq_in(cqe); |
4798 | in->cmd_opcode = htobe16(MCX_CMD_MODIFY_RQ)(__uint16_t)(__builtin_constant_p(0x909) ? (__uint16_t)(((__uint16_t )(0x909) & 0xffU) << 8 | ((__uint16_t)(0x909) & 0xff00U) >> 8) : __swap16md(0x909)); |
4799 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4800 | in->cmd_rq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | rx->rx_rqn)(__uint32_t)(__builtin_constant_p((0 << 28) | rx->rx_rqn ) ? (__uint32_t)(((__uint32_t)((0 << 28) | rx->rx_rqn ) & 0xff) << 24 | ((__uint32_t)((0 << 28) | rx ->rx_rqn) & 0xff00) << 8 | ((__uint32_t)((0 << 28) | rx->rx_rqn) & 0xff0000) >> 8 | ((__uint32_t )((0 << 28) | rx->rx_rqn) & 0xff000000) >> 24) : __swap32md((0 << 28) | rx->rx_rqn)); |
4801 | |
4802 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
4803 | &cqe->cq_input_ptr, token) != 0) { |
4804 | printf("%s: unable to allocate modify rq mailbox\n", |
4805 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4806 | return (-1); |
4807 | } |
4808 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
4809 | mbin->cmd_rq_ctx.rq_flags = htobe32((__uint32_t)(__builtin_constant_p(1 << 20) ? (__uint32_t )(((__uint32_t)(1 << 20) & 0xff) << 24 | ((__uint32_t )(1 << 20) & 0xff00) << 8 | ((__uint32_t)(1 << 20) & 0xff0000) >> 8 | ((__uint32_t)(1 << 20 ) & 0xff000000) >> 24) : __swap32md(1 << 20)) |
4810 | MCX_QUEUE_STATE_RDY << MCX_RQ_CTX_STATE_SHIFT)(__uint32_t)(__builtin_constant_p(1 << 20) ? (__uint32_t )(((__uint32_t)(1 << 20) & 0xff) << 24 | ((__uint32_t )(1 << 20) & 0xff00) << 8 | ((__uint32_t)(1 << 20) & 0xff0000) >> 8 | ((__uint32_t)(1 << 20 ) & 0xff000000) >> 24) : __swap32md(1 << 20)); |
4811 | |
4812 | mcx_cmdq_mboxes_sign(&mxm, 1); |
4813 | mcx_cmdq_post(sc, cqe, 0); |
4814 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4815 | if (error != 0) { |
4816 | printf("%s: modify rq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4817 | goto free; |
4818 | } |
4819 | if (mcx_cmdq_verify(cqe) != 0) { |
4820 | printf("%s: modify rq command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4821 | goto free; |
4822 | } |
4823 | |
4824 | out = mcx_cmdq_out(cqe); |
4825 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4826 | printf("%s: modify rq failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
4827 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
4828 | error = -1; |
4829 | goto free; |
4830 | } |
4831 | |
4832 | free: |
4833 | mcx_dmamem_free(sc, &mxm); |
4834 | return (error); |
4835 | } |
4836 | |
4837 | static int |
4838 | mcx_destroy_rq(struct mcx_softc *sc, struct mcx_rx *rx) |
4839 | { |
4840 | struct mcx_cmdq_entry *cqe; |
4841 | struct mcx_cmd_destroy_rq_in *in; |
4842 | struct mcx_cmd_destroy_rq_out *out; |
4843 | int error; |
4844 | int token; |
4845 | |
4846 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4847 | token = mcx_cmdq_token(sc); |
4848 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token); |
4849 | |
4850 | in = mcx_cmdq_in(cqe); |
4851 | in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQ)(__uint16_t)(__builtin_constant_p(0x90a) ? (__uint16_t)(((__uint16_t )(0x90a) & 0xffU) << 8 | ((__uint16_t)(0x90a) & 0xff00U) >> 8) : __swap16md(0x90a)); |
4852 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4853 | in->cmd_rqn = htobe32(rx->rx_rqn)(__uint32_t)(__builtin_constant_p(rx->rx_rqn) ? (__uint32_t )(((__uint32_t)(rx->rx_rqn) & 0xff) << 24 | ((__uint32_t )(rx->rx_rqn) & 0xff00) << 8 | ((__uint32_t)(rx-> rx_rqn) & 0xff0000) >> 8 | ((__uint32_t)(rx->rx_rqn ) & 0xff000000) >> 24) : __swap32md(rx->rx_rqn)); |
4854 | |
4855 | mcx_cmdq_post(sc, cqe, 0); |
4856 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4857 | if (error != 0) { |
4858 | printf("%s: destroy rq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4859 | return error; |
4860 | } |
4861 | if (mcx_cmdq_verify(cqe) != 0) { |
4862 | printf("%s: destroy rq command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4863 | return error; |
4864 | } |
4865 | |
4866 | out = mcx_cmdq_out(cqe); |
4867 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4868 | printf("%s: destroy rq failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
4869 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
4870 | return -1; |
4871 | } |
4872 | |
4873 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (rx->rx_doorbell), ( sizeof(uint32_t)), (0x08)) |
4874 | rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (rx->rx_doorbell), ( sizeof(uint32_t)), (0x08)); |
4875 | |
4876 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rx->rx_rq_mem)->mxm_map)), (0), (((&rx->rx_rq_mem )->mxm_size)), (0x08)) |
4877 | 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rx->rx_rq_mem)->mxm_map)), (0), (((&rx->rx_rq_mem )->mxm_size)), (0x08)); |
4878 | mcx_dmamem_free(sc, &rx->rx_rq_mem); |
4879 | |
4880 | rx->rx_rqn = 0; |
4881 | return 0; |
4882 | } |
4883 | |
4884 | static int |
4885 | mcx_create_tir_direct(struct mcx_softc *sc, struct mcx_rx *rx, int *tirn) |
4886 | { |
4887 | struct mcx_cmdq_entry *cqe; |
4888 | struct mcx_dmamem mxm; |
4889 | struct mcx_cmd_create_tir_in *in; |
4890 | struct mcx_cmd_create_tir_mb_in *mbin; |
4891 | struct mcx_cmd_create_tir_out *out; |
4892 | int error; |
4893 | int token; |
4894 | |
4895 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4896 | token = mcx_cmdq_token(sc); |
4897 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), |
4898 | sizeof(*out), token); |
4899 | |
4900 | in = mcx_cmdq_in(cqe); |
4901 | in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR)(__uint16_t)(__builtin_constant_p(0x900) ? (__uint16_t)(((__uint16_t )(0x900) & 0xffU) << 8 | ((__uint16_t)(0x900) & 0xff00U) >> 8) : __swap16md(0x900)); |
4902 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4903 | |
4904 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
4905 | &cqe->cq_input_ptr, token) != 0) { |
4906 | printf("%s: unable to allocate create tir mailbox\n", |
4907 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4908 | return (-1); |
4909 | } |
4910 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
4911 | /* leave disp_type = 0, so packets get sent to the inline rqn */ |
4912 | mbin->cmd_inline_rqn = htobe32(rx->rx_rqn)(__uint32_t)(__builtin_constant_p(rx->rx_rqn) ? (__uint32_t )(((__uint32_t)(rx->rx_rqn) & 0xff) << 24 | ((__uint32_t )(rx->rx_rqn) & 0xff00) << 8 | ((__uint32_t)(rx-> rx_rqn) & 0xff0000) >> 8 | ((__uint32_t)(rx->rx_rqn ) & 0xff000000) >> 24) : __swap32md(rx->rx_rqn)); |
4913 | mbin->cmd_tdomain = htobe32(sc->sc_tdomain)(__uint32_t)(__builtin_constant_p(sc->sc_tdomain) ? (__uint32_t )(((__uint32_t)(sc->sc_tdomain) & 0xff) << 24 | ( (__uint32_t)(sc->sc_tdomain) & 0xff00) << 8 | (( __uint32_t)(sc->sc_tdomain) & 0xff0000) >> 8 | ( (__uint32_t)(sc->sc_tdomain) & 0xff000000) >> 24 ) : __swap32md(sc->sc_tdomain)); |
4914 | |
4915 | mcx_cmdq_post(sc, cqe, 0); |
4916 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4917 | if (error != 0) { |
4918 | printf("%s: create tir timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4919 | goto free; |
4920 | } |
4921 | if (mcx_cmdq_verify(cqe) != 0) { |
4922 | printf("%s: create tir command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4923 | goto free; |
4924 | } |
4925 | |
4926 | out = mcx_cmdq_out(cqe); |
4927 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4928 | printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
4929 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
4930 | error = -1; |
4931 | goto free; |
4932 | } |
4933 | |
4934 | *tirn = mcx_get_id(out->cmd_tirn); |
4935 | free: |
4936 | mcx_dmamem_free(sc, &mxm); |
4937 | return (error); |
4938 | } |
4939 | |
4940 | static int |
4941 | mcx_create_tir_indirect(struct mcx_softc *sc, int rqtn, uint32_t hash_sel, |
4942 | int *tirn) |
4943 | { |
4944 | struct mcx_cmdq_entry *cqe; |
4945 | struct mcx_dmamem mxm; |
4946 | struct mcx_cmd_create_tir_in *in; |
4947 | struct mcx_cmd_create_tir_mb_in *mbin; |
4948 | struct mcx_cmd_create_tir_out *out; |
4949 | int error; |
4950 | int token; |
4951 | |
4952 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
4953 | token = mcx_cmdq_token(sc); |
4954 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), |
4955 | sizeof(*out), token); |
4956 | |
4957 | in = mcx_cmdq_in(cqe); |
4958 | in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR)(__uint16_t)(__builtin_constant_p(0x900) ? (__uint16_t)(((__uint16_t )(0x900) & 0xffU) << 8 | ((__uint16_t)(0x900) & 0xff00U) >> 8) : __swap16md(0x900)); |
4959 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
4960 | |
4961 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
4962 | &cqe->cq_input_ptr, token) != 0) { |
4963 | printf("%s: unable to allocate create tir mailbox\n", |
4964 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4965 | return (-1); |
4966 | } |
4967 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
4968 | mbin->cmd_disp_type = htobe32(MCX_TIR_CTX_DISP_TYPE_INDIRECT(__uint32_t)(__builtin_constant_p(1 << 28) ? (__uint32_t )(((__uint32_t)(1 << 28) & 0xff) << 24 | ((__uint32_t )(1 << 28) & 0xff00) << 8 | ((__uint32_t)(1 << 28) & 0xff0000) >> 8 | ((__uint32_t)(1 << 28 ) & 0xff000000) >> 24) : __swap32md(1 << 28)) |
4969 | << MCX_TIR_CTX_DISP_TYPE_SHIFT)(__uint32_t)(__builtin_constant_p(1 << 28) ? (__uint32_t )(((__uint32_t)(1 << 28) & 0xff) << 24 | ((__uint32_t )(1 << 28) & 0xff00) << 8 | ((__uint32_t)(1 << 28) & 0xff0000) >> 8 | ((__uint32_t)(1 << 28 ) & 0xff000000) >> 24) : __swap32md(1 << 28)); |
4970 | mbin->cmd_indir_table = htobe32(rqtn)(__uint32_t)(__builtin_constant_p(rqtn) ? (__uint32_t)(((__uint32_t )(rqtn) & 0xff) << 24 | ((__uint32_t)(rqtn) & 0xff00 ) << 8 | ((__uint32_t)(rqtn) & 0xff0000) >> 8 | ((__uint32_t)(rqtn) & 0xff000000) >> 24) : __swap32md (rqtn)); |
4971 | mbin->cmd_tdomain = htobe32(sc->sc_tdomain |(__uint32_t)(__builtin_constant_p(sc->sc_tdomain | 2 << 28) ? (__uint32_t)(((__uint32_t)(sc->sc_tdomain | 2 << 28) & 0xff) << 24 | ((__uint32_t)(sc->sc_tdomain | 2 << 28) & 0xff00) << 8 | ((__uint32_t)(sc ->sc_tdomain | 2 << 28) & 0xff0000) >> 8 | ((__uint32_t)(sc->sc_tdomain | 2 << 28) & 0xff000000 ) >> 24) : __swap32md(sc->sc_tdomain | 2 << 28 )) |
4972 | MCX_TIR_CTX_HASH_TOEPLITZ << MCX_TIR_CTX_HASH_SHIFT)(__uint32_t)(__builtin_constant_p(sc->sc_tdomain | 2 << 28) ? (__uint32_t)(((__uint32_t)(sc->sc_tdomain | 2 << 28) & 0xff) << 24 | ((__uint32_t)(sc->sc_tdomain | 2 << 28) & 0xff00) << 8 | ((__uint32_t)(sc ->sc_tdomain | 2 << 28) & 0xff0000) >> 8 | ((__uint32_t)(sc->sc_tdomain | 2 << 28) & 0xff000000 ) >> 24) : __swap32md(sc->sc_tdomain | 2 << 28 )); |
4973 | mbin->cmd_rx_hash_sel_outer = htobe32(hash_sel)(__uint32_t)(__builtin_constant_p(hash_sel) ? (__uint32_t)((( __uint32_t)(hash_sel) & 0xff) << 24 | ((__uint32_t) (hash_sel) & 0xff00) << 8 | ((__uint32_t)(hash_sel) & 0xff0000) >> 8 | ((__uint32_t)(hash_sel) & 0xff000000 ) >> 24) : __swap32md(hash_sel)); |
4974 | stoeplitz_to_key(&mbin->cmd_rx_hash_key, |
4975 | sizeof(mbin->cmd_rx_hash_key)); |
4976 | |
4977 | mcx_cmdq_post(sc, cqe, 0); |
4978 | error = mcx_cmdq_poll(sc, cqe, 1000); |
4979 | if (error != 0) { |
4980 | printf("%s: create tir timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4981 | goto free; |
4982 | } |
4983 | if (mcx_cmdq_verify(cqe) != 0) { |
4984 | printf("%s: create tir command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4985 | goto free; |
4986 | } |
4987 | |
4988 | out = mcx_cmdq_out(cqe); |
4989 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
4990 | printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
4991 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
4992 | error = -1; |
4993 | goto free; |
4994 | } |
4995 | |
4996 | *tirn = mcx_get_id(out->cmd_tirn); |
4997 | free: |
4998 | mcx_dmamem_free(sc, &mxm); |
4999 | return (error); |
5000 | } |
5001 | |
5002 | static int |
5003 | mcx_destroy_tir(struct mcx_softc *sc, int tirn) |
5004 | { |
5005 | struct mcx_cmdq_entry *cqe; |
5006 | struct mcx_cmd_destroy_tir_in *in; |
5007 | struct mcx_cmd_destroy_tir_out *out; |
5008 | int error; |
5009 | int token; |
5010 | |
5011 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5012 | token = mcx_cmdq_token(sc); |
5013 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token); |
5014 | |
5015 | in = mcx_cmdq_in(cqe); |
5016 | in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIR)(__uint16_t)(__builtin_constant_p(0x902) ? (__uint16_t)(((__uint16_t )(0x902) & 0xffU) << 8 | ((__uint16_t)(0x902) & 0xff00U) >> 8) : __swap16md(0x902)); |
5017 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5018 | in->cmd_tirn = htobe32(tirn)(__uint32_t)(__builtin_constant_p(tirn) ? (__uint32_t)(((__uint32_t )(tirn) & 0xff) << 24 | ((__uint32_t)(tirn) & 0xff00 ) << 8 | ((__uint32_t)(tirn) & 0xff0000) >> 8 | ((__uint32_t)(tirn) & 0xff000000) >> 24) : __swap32md (tirn)); |
5019 | |
5020 | mcx_cmdq_post(sc, cqe, 0); |
5021 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5022 | if (error != 0) { |
5023 | printf("%s: destroy tir timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5024 | return error; |
5025 | } |
5026 | if (mcx_cmdq_verify(cqe) != 0) { |
5027 | printf("%s: destroy tir command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5028 | return error; |
5029 | } |
5030 | |
5031 | out = mcx_cmdq_out(cqe); |
5032 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5033 | printf("%s: destroy tir failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5034 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5035 | return -1; |
5036 | } |
5037 | |
5038 | return (0); |
5039 | } |
5040 | |
5041 | static int |
5042 | mcx_create_sq(struct mcx_softc *sc, struct mcx_tx *tx, int uar, int db, |
5043 | int cqn) |
5044 | { |
5045 | struct mcx_cmdq_entry *cqe; |
5046 | struct mcx_dmamem mxm; |
5047 | struct mcx_cmd_create_sq_in *in; |
5048 | struct mcx_sq_ctx *mbin; |
5049 | struct mcx_cmd_create_sq_out *out; |
5050 | int error; |
5051 | uint64_t *pas; |
5052 | int insize, npages, paslen, token; |
5053 | |
5054 | tx->tx_doorbell = MCX_WQ_DOORBELL_BASE(1 << 12)/2 + |
5055 | (db * MCX_WQ_DOORBELL_STRIDE64) + 4; |
5056 | |
5057 | npages = howmany((1 << MCX_LOG_SQ_SIZE) * sizeof(struct mcx_sq_entry),((((1 << 11) * sizeof(struct mcx_sq_entry)) + (((1 << 12)) - 1)) / ((1 << 12))) |
5058 | MCX_PAGE_SIZE)((((1 << 11) * sizeof(struct mcx_sq_entry)) + (((1 << 12)) - 1)) / ((1 << 12))); |
5059 | paslen = npages * sizeof(*pas); |
5060 | insize = sizeof(struct mcx_sq_ctx) + paslen; |
5061 | |
5062 | if (mcx_dmamem_alloc(sc, &tx->tx_sq_mem, npages * MCX_PAGE_SIZE(1 << 12), |
5063 | MCX_PAGE_SIZE(1 << 12)) != 0) { |
5064 | printf("%s: unable to allocate send queue memory\n", |
5065 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5066 | return (-1); |
5067 | } |
5068 | |
5069 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5070 | token = mcx_cmdq_token(sc); |
5071 | mcx_cmdq_init(sc, cqe, sizeof(*in) + insize + paslen, sizeof(*out), |
5072 | token); |
5073 | |
5074 | in = mcx_cmdq_in(cqe); |
5075 | in->cmd_opcode = htobe16(MCX_CMD_CREATE_SQ)(__uint16_t)(__builtin_constant_p(0x904) ? (__uint16_t)(((__uint16_t )(0x904) & 0xffU) << 8 | ((__uint16_t)(0x904) & 0xff00U) >> 8) : __swap16md(0x904)); |
5076 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5077 | |
5078 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, |
5079 | howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE)(((insize) + ((512) - 1)) / (512)), |
5080 | &cqe->cq_input_ptr, token) != 0) { |
5081 | printf("%s: unable to allocate create sq mailboxen\n", |
5082 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5083 | goto free_sq; |
5084 | } |
5085 | mbin = (struct mcx_sq_ctx *) |
5086 | (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10); |
5087 | mbin->sq_flags = htobe32(MCX_SQ_CTX_RLKEY |(__uint32_t)(__builtin_constant_p((1 << 31) | (1 << 24)) ? (__uint32_t)(((__uint32_t)((1 << 31) | (1 << 24)) & 0xff) << 24 | ((__uint32_t)((1 << 31) | (1 << 24)) & 0xff00) << 8 | ((__uint32_t)( (1 << 31) | (1 << 24)) & 0xff0000) >> 8 | ((__uint32_t)((1 << 31) | (1 << 24)) & 0xff000000 ) >> 24) : __swap32md((1 << 31) | (1 << 24) )) |
5088 | (1 << MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT))(__uint32_t)(__builtin_constant_p((1 << 31) | (1 << 24)) ? (__uint32_t)(((__uint32_t)((1 << 31) | (1 << 24)) & 0xff) << 24 | ((__uint32_t)((1 << 31) | (1 << 24)) & 0xff00) << 8 | ((__uint32_t)( (1 << 31) | (1 << 24)) & 0xff0000) >> 8 | ((__uint32_t)((1 << 31) | (1 << 24)) & 0xff000000 ) >> 24) : __swap32md((1 << 31) | (1 << 24) )); |
5089 | mbin->sq_cqn = htobe32(cqn)(__uint32_t)(__builtin_constant_p(cqn) ? (__uint32_t)(((__uint32_t )(cqn) & 0xff) << 24 | ((__uint32_t)(cqn) & 0xff00 ) << 8 | ((__uint32_t)(cqn) & 0xff0000) >> 8 | ((__uint32_t)(cqn) & 0xff000000) >> 24) : __swap32md (cqn)); |
5090 | mbin->sq_tis_lst_sz = htobe32(1 << MCX_SQ_CTX_TIS_LST_SZ_SHIFT)(__uint32_t)(__builtin_constant_p(1 << 16) ? (__uint32_t )(((__uint32_t)(1 << 16) & 0xff) << 24 | ((__uint32_t )(1 << 16) & 0xff00) << 8 | ((__uint32_t)(1 << 16) & 0xff0000) >> 8 | ((__uint32_t)(1 << 16 ) & 0xff000000) >> 24) : __swap32md(1 << 16)); |
5091 | mbin->sq_tis_num = htobe32(sc->sc_tis)(__uint32_t)(__builtin_constant_p(sc->sc_tis) ? (__uint32_t )(((__uint32_t)(sc->sc_tis) & 0xff) << 24 | ((__uint32_t )(sc->sc_tis) & 0xff00) << 8 | ((__uint32_t)(sc-> sc_tis) & 0xff0000) >> 8 | ((__uint32_t)(sc->sc_tis ) & 0xff000000) >> 24) : __swap32md(sc->sc_tis)); |
5092 | mbin->sq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC(1 << 4); |
5093 | mbin->sq_wq.wq_pd = htobe32(sc->sc_pd)(__uint32_t)(__builtin_constant_p(sc->sc_pd) ? (__uint32_t )(((__uint32_t)(sc->sc_pd) & 0xff) << 24 | ((__uint32_t )(sc->sc_pd) & 0xff00) << 8 | ((__uint32_t)(sc-> sc_pd) & 0xff0000) >> 8 | ((__uint32_t)(sc->sc_pd ) & 0xff000000) >> 24) : __swap32md(sc->sc_pd)); |
5094 | mbin->sq_wq.wq_uar_page = htobe32(uar)(__uint32_t)(__builtin_constant_p(uar) ? (__uint32_t)(((__uint32_t )(uar) & 0xff) << 24 | ((__uint32_t)(uar) & 0xff00 ) << 8 | ((__uint32_t)(uar) & 0xff0000) >> 8 | ((__uint32_t)(uar) & 0xff000000) >> 24) : __swap32md (uar)); |
5095 | mbin->sq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +(__uint64_t)(__builtin_constant_p(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) ? (__uint64_t)((((__uint64_t)(((&sc->sc_doorbell_mem)-> mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff ) << 56) | ((__uint64_t)(((&sc->sc_doorbell_mem) ->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff00ULL) << 40 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff0000ULL) << 24 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff000000ULL) << 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff00000000ULL) >> 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(((&sc-> sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr) + tx-> tx_doorbell) & 0xff000000000000ULL) >> 40 | ((__uint64_t )(((&sc->sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr ) + tx->tx_doorbell) & 0xff00000000000000ULL) >> 56) : __swap64md(((&sc->sc_doorbell_mem)->mxm_map-> dm_segs[0].ds_addr) + tx->tx_doorbell)) |
5096 | tx->tx_doorbell)(__uint64_t)(__builtin_constant_p(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) ? (__uint64_t)((((__uint64_t)(((&sc->sc_doorbell_mem)-> mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff ) << 56) | ((__uint64_t)(((&sc->sc_doorbell_mem) ->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff00ULL) << 40 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff0000ULL) << 24 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff000000ULL) << 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff00000000ULL) >> 8 | ((__uint64_t)(((&sc->sc_doorbell_mem )->mxm_map->dm_segs[0].ds_addr) + tx->tx_doorbell) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(((&sc-> sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr) + tx-> tx_doorbell) & 0xff000000000000ULL) >> 40 | ((__uint64_t )(((&sc->sc_doorbell_mem)->mxm_map->dm_segs[0].ds_addr ) + tx->tx_doorbell) & 0xff00000000000000ULL) >> 56) : __swap64md(((&sc->sc_doorbell_mem)->mxm_map-> dm_segs[0].ds_addr) + tx->tx_doorbell)); |
5097 | mbin->sq_wq.wq_log_stride = htobe16(MCX_LOG_SQ_ENTRY_SIZE)(__uint16_t)(__builtin_constant_p(6) ? (__uint16_t)(((__uint16_t )(6) & 0xffU) << 8 | ((__uint16_t)(6) & 0xff00U ) >> 8) : __swap16md(6)); |
5098 | mbin->sq_wq.wq_log_size = MCX_LOG_SQ_SIZE11; |
5099 | |
5100 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& tx->tx_sq_mem)->mxm_map)), (0), (((&tx->tx_sq_mem )->mxm_size)), (0x04)) |
5101 | 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& tx->tx_sq_mem)->mxm_map)), (0), (((&tx->tx_sq_mem )->mxm_size)), (0x04)); |
5102 | |
5103 | /* physical addresses follow the mailbox in data */ |
5104 | mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10, |
5105 | npages, &tx->tx_sq_mem); |
5106 | mcx_cmdq_post(sc, cqe, 0); |
5107 | |
5108 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5109 | if (error != 0) { |
5110 | printf("%s: create sq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5111 | goto free_mxm; |
5112 | } |
5113 | if (mcx_cmdq_verify(cqe) != 0) { |
5114 | printf("%s: create sq command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5115 | goto free_mxm; |
5116 | } |
5117 | |
5118 | out = mcx_cmdq_out(cqe); |
5119 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5120 | printf("%s: create sq failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5121 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5122 | goto free_mxm; |
5123 | } |
5124 | |
5125 | tx->tx_uar = uar; |
5126 | tx->tx_sqn = mcx_get_id(out->cmd_sqn); |
5127 | |
5128 | mcx_dmamem_free(sc, &mxm); |
5129 | |
5130 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (tx->tx_doorbell), ( sizeof(uint32_t)), (0x04)) |
5131 | tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (tx->tx_doorbell), ( sizeof(uint32_t)), (0x04)); |
5132 | |
5133 | return (0); |
5134 | |
5135 | free_mxm: |
5136 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& tx->tx_sq_mem)->mxm_map)), (0), (((&tx->tx_sq_mem )->mxm_size)), (0x08)) |
5137 | 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& tx->tx_sq_mem)->mxm_map)), (0), (((&tx->tx_sq_mem )->mxm_size)), (0x08)); |
5138 | mcx_dmamem_free(sc, &mxm); |
5139 | free_sq: |
5140 | mcx_dmamem_free(sc, &tx->tx_sq_mem); |
5141 | return (-1); |
5142 | } |
5143 | |
5144 | static int |
5145 | mcx_destroy_sq(struct mcx_softc *sc, struct mcx_tx *tx) |
5146 | { |
5147 | struct mcx_cmdq_entry *cqe; |
5148 | struct mcx_cmd_destroy_sq_in *in; |
5149 | struct mcx_cmd_destroy_sq_out *out; |
5150 | int error; |
5151 | int token; |
5152 | |
5153 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5154 | token = mcx_cmdq_token(sc); |
5155 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token); |
5156 | |
5157 | in = mcx_cmdq_in(cqe); |
5158 | in->cmd_opcode = htobe16(MCX_CMD_DESTROY_SQ)(__uint16_t)(__builtin_constant_p(0x906) ? (__uint16_t)(((__uint16_t )(0x906) & 0xffU) << 8 | ((__uint16_t)(0x906) & 0xff00U) >> 8) : __swap16md(0x906)); |
5159 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5160 | in->cmd_sqn = htobe32(tx->tx_sqn)(__uint32_t)(__builtin_constant_p(tx->tx_sqn) ? (__uint32_t )(((__uint32_t)(tx->tx_sqn) & 0xff) << 24 | ((__uint32_t )(tx->tx_sqn) & 0xff00) << 8 | ((__uint32_t)(tx-> tx_sqn) & 0xff0000) >> 8 | ((__uint32_t)(tx->tx_sqn ) & 0xff000000) >> 24) : __swap32md(tx->tx_sqn)); |
5161 | |
5162 | mcx_cmdq_post(sc, cqe, 0); |
5163 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5164 | if (error != 0) { |
5165 | printf("%s: destroy sq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5166 | return error; |
5167 | } |
5168 | if (mcx_cmdq_verify(cqe) != 0) { |
5169 | printf("%s: destroy sq command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5170 | return error; |
5171 | } |
5172 | |
5173 | out = mcx_cmdq_out(cqe); |
5174 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5175 | printf("%s: destroy sq failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5176 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5177 | return -1; |
5178 | } |
5179 | |
5180 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (tx->tx_doorbell), ( sizeof(uint32_t)), (0x08)) |
5181 | tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (tx->tx_doorbell), ( sizeof(uint32_t)), (0x08)); |
5182 | |
5183 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& tx->tx_sq_mem)->mxm_map)), (0), (((&tx->tx_sq_mem )->mxm_size)), (0x08)) |
5184 | 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& tx->tx_sq_mem)->mxm_map)), (0), (((&tx->tx_sq_mem )->mxm_size)), (0x08)); |
5185 | mcx_dmamem_free(sc, &tx->tx_sq_mem); |
5186 | |
5187 | tx->tx_sqn = 0; |
5188 | return 0; |
5189 | } |
5190 | |
5191 | static int |
5192 | mcx_ready_sq(struct mcx_softc *sc, struct mcx_tx *tx) |
5193 | { |
5194 | struct mcx_cmdq_entry *cqe; |
5195 | struct mcx_dmamem mxm; |
5196 | struct mcx_cmd_modify_sq_in *in; |
5197 | struct mcx_cmd_modify_sq_mb_in *mbin; |
5198 | struct mcx_cmd_modify_sq_out *out; |
5199 | int error; |
5200 | int token; |
5201 | |
5202 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5203 | token = mcx_cmdq_token(sc); |
5204 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), |
5205 | sizeof(*out), token); |
5206 | |
5207 | in = mcx_cmdq_in(cqe); |
5208 | in->cmd_opcode = htobe16(MCX_CMD_MODIFY_SQ)(__uint16_t)(__builtin_constant_p(0x905) ? (__uint16_t)(((__uint16_t )(0x905) & 0xffU) << 8 | ((__uint16_t)(0x905) & 0xff00U) >> 8) : __swap16md(0x905)); |
5209 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5210 | in->cmd_sq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | tx->tx_sqn)(__uint32_t)(__builtin_constant_p((0 << 28) | tx->tx_sqn ) ? (__uint32_t)(((__uint32_t)((0 << 28) | tx->tx_sqn ) & 0xff) << 24 | ((__uint32_t)((0 << 28) | tx ->tx_sqn) & 0xff00) << 8 | ((__uint32_t)((0 << 28) | tx->tx_sqn) & 0xff0000) >> 8 | ((__uint32_t )((0 << 28) | tx->tx_sqn) & 0xff000000) >> 24) : __swap32md((0 << 28) | tx->tx_sqn)); |
5211 | |
5212 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
5213 | &cqe->cq_input_ptr, token) != 0) { |
5214 | printf("%s: unable to allocate modify sq mailbox\n", |
5215 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5216 | return (-1); |
5217 | } |
5218 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
5219 | mbin->cmd_sq_ctx.sq_flags = htobe32((__uint32_t)(__builtin_constant_p(1 << 20) ? (__uint32_t )(((__uint32_t)(1 << 20) & 0xff) << 24 | ((__uint32_t )(1 << 20) & 0xff00) << 8 | ((__uint32_t)(1 << 20) & 0xff0000) >> 8 | ((__uint32_t)(1 << 20 ) & 0xff000000) >> 24) : __swap32md(1 << 20)) |
5220 | MCX_QUEUE_STATE_RDY << MCX_SQ_CTX_STATE_SHIFT)(__uint32_t)(__builtin_constant_p(1 << 20) ? (__uint32_t )(((__uint32_t)(1 << 20) & 0xff) << 24 | ((__uint32_t )(1 << 20) & 0xff00) << 8 | ((__uint32_t)(1 << 20) & 0xff0000) >> 8 | ((__uint32_t)(1 << 20 ) & 0xff000000) >> 24) : __swap32md(1 << 20)); |
5221 | |
5222 | mcx_cmdq_mboxes_sign(&mxm, 1); |
5223 | mcx_cmdq_post(sc, cqe, 0); |
5224 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5225 | if (error != 0) { |
5226 | printf("%s: modify sq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5227 | goto free; |
5228 | } |
5229 | if (mcx_cmdq_verify(cqe) != 0) { |
5230 | printf("%s: modify sq command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5231 | goto free; |
5232 | } |
5233 | |
5234 | out = mcx_cmdq_out(cqe); |
5235 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5236 | printf("%s: modify sq failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5237 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5238 | error = -1; |
5239 | goto free; |
5240 | } |
5241 | |
5242 | free: |
5243 | mcx_dmamem_free(sc, &mxm); |
5244 | return (error); |
5245 | } |
5246 | |
5247 | static int |
5248 | mcx_create_tis(struct mcx_softc *sc, int *tis) |
5249 | { |
5250 | struct mcx_cmdq_entry *cqe; |
5251 | struct mcx_dmamem mxm; |
5252 | struct mcx_cmd_create_tis_in *in; |
5253 | struct mcx_cmd_create_tis_mb_in *mbin; |
5254 | struct mcx_cmd_create_tis_out *out; |
5255 | int error; |
5256 | int token; |
5257 | |
5258 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5259 | token = mcx_cmdq_token(sc); |
5260 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), |
5261 | sizeof(*out), token); |
5262 | |
5263 | in = mcx_cmdq_in(cqe); |
5264 | in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIS)(__uint16_t)(__builtin_constant_p(0x912) ? (__uint16_t)(((__uint16_t )(0x912) & 0xffU) << 8 | ((__uint16_t)(0x912) & 0xff00U) >> 8) : __swap16md(0x912)); |
5265 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5266 | |
5267 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
5268 | &cqe->cq_input_ptr, token) != 0) { |
5269 | printf("%s: unable to allocate create tis mailbox\n", |
5270 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5271 | return (-1); |
5272 | } |
5273 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
5274 | mbin->cmd_tdomain = htobe32(sc->sc_tdomain)(__uint32_t)(__builtin_constant_p(sc->sc_tdomain) ? (__uint32_t )(((__uint32_t)(sc->sc_tdomain) & 0xff) << 24 | ( (__uint32_t)(sc->sc_tdomain) & 0xff00) << 8 | (( __uint32_t)(sc->sc_tdomain) & 0xff0000) >> 8 | ( (__uint32_t)(sc->sc_tdomain) & 0xff000000) >> 24 ) : __swap32md(sc->sc_tdomain)); |
5275 | |
5276 | mcx_cmdq_mboxes_sign(&mxm, 1); |
5277 | mcx_cmdq_post(sc, cqe, 0); |
5278 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5279 | if (error != 0) { |
5280 | printf("%s: create tis timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5281 | goto free; |
5282 | } |
5283 | if (mcx_cmdq_verify(cqe) != 0) { |
5284 | printf("%s: create tis command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5285 | goto free; |
5286 | } |
5287 | |
5288 | out = mcx_cmdq_out(cqe); |
5289 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5290 | printf("%s: create tis failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5291 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5292 | error = -1; |
5293 | goto free; |
5294 | } |
5295 | |
5296 | *tis = mcx_get_id(out->cmd_tisn); |
5297 | free: |
5298 | mcx_dmamem_free(sc, &mxm); |
5299 | return (error); |
5300 | } |
5301 | |
5302 | static int |
5303 | mcx_destroy_tis(struct mcx_softc *sc, int tis) |
5304 | { |
5305 | struct mcx_cmdq_entry *cqe; |
5306 | struct mcx_cmd_destroy_tis_in *in; |
5307 | struct mcx_cmd_destroy_tis_out *out; |
5308 | int error; |
5309 | int token; |
5310 | |
5311 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5312 | token = mcx_cmdq_token(sc); |
5313 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token); |
5314 | |
5315 | in = mcx_cmdq_in(cqe); |
5316 | in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIS)(__uint16_t)(__builtin_constant_p(0x914) ? (__uint16_t)(((__uint16_t )(0x914) & 0xffU) << 8 | ((__uint16_t)(0x914) & 0xff00U) >> 8) : __swap16md(0x914)); |
5317 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5318 | in->cmd_tisn = htobe32(tis)(__uint32_t)(__builtin_constant_p(tis) ? (__uint32_t)(((__uint32_t )(tis) & 0xff) << 24 | ((__uint32_t)(tis) & 0xff00 ) << 8 | ((__uint32_t)(tis) & 0xff0000) >> 8 | ((__uint32_t)(tis) & 0xff000000) >> 24) : __swap32md (tis)); |
5319 | |
5320 | mcx_cmdq_post(sc, cqe, 0); |
5321 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5322 | if (error != 0) { |
5323 | printf("%s: destroy tis timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5324 | return error; |
5325 | } |
5326 | if (mcx_cmdq_verify(cqe) != 0) { |
5327 | printf("%s: destroy tis command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5328 | return error; |
5329 | } |
5330 | |
5331 | out = mcx_cmdq_out(cqe); |
5332 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5333 | printf("%s: destroy tis failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5334 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5335 | return -1; |
5336 | } |
5337 | |
5338 | return 0; |
5339 | } |
5340 | |
5341 | static int |
5342 | mcx_create_rqt(struct mcx_softc *sc, int size, int *rqns, int *rqt) |
5343 | { |
5344 | struct mcx_cmdq_entry *cqe; |
5345 | struct mcx_dmamem mxm; |
5346 | struct mcx_cmd_create_rqt_in *in; |
5347 | struct mcx_cmd_create_rqt_mb_in *mbin; |
5348 | struct mcx_cmd_create_rqt_out *out; |
5349 | struct mcx_rqt_ctx *rqt_ctx; |
5350 | int *rqtn; |
5351 | int error; |
5352 | int token; |
5353 | int i; |
5354 | |
5355 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5356 | token = mcx_cmdq_token(sc); |
5357 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + |
5358 | (size * sizeof(int)), sizeof(*out), token); |
5359 | |
5360 | in = mcx_cmdq_in(cqe); |
5361 | in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQT)(__uint16_t)(__builtin_constant_p(0x916) ? (__uint16_t)(((__uint16_t )(0x916) & 0xffU) << 8 | ((__uint16_t)(0x916) & 0xff00U) >> 8) : __swap16md(0x916)); |
5362 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5363 | |
5364 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
5365 | &cqe->cq_input_ptr, token) != 0) { |
5366 | printf("%s: unable to allocate create rqt mailbox\n", |
5367 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5368 | return (-1); |
5369 | } |
5370 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
5371 | rqt_ctx = &mbin->cmd_rqt; |
5372 | rqt_ctx->cmd_rqt_max_size = htobe16(sc->sc_max_rqt_size)(__uint16_t)(__builtin_constant_p(sc->sc_max_rqt_size) ? ( __uint16_t)(((__uint16_t)(sc->sc_max_rqt_size) & 0xffU ) << 8 | ((__uint16_t)(sc->sc_max_rqt_size) & 0xff00U ) >> 8) : __swap16md(sc->sc_max_rqt_size)); |
5373 | rqt_ctx->cmd_rqt_actual_size = htobe16(size)(__uint16_t)(__builtin_constant_p(size) ? (__uint16_t)(((__uint16_t )(size) & 0xffU) << 8 | ((__uint16_t)(size) & 0xff00U ) >> 8) : __swap16md(size)); |
5374 | |
5375 | /* rqt list follows the rqt context */ |
5376 | rqtn = (int *)(rqt_ctx + 1); |
5377 | for (i = 0; i < size; i++) { |
5378 | rqtn[i] = htobe32(rqns[i])(__uint32_t)(__builtin_constant_p(rqns[i]) ? (__uint32_t)(((__uint32_t )(rqns[i]) & 0xff) << 24 | ((__uint32_t)(rqns[i]) & 0xff00) << 8 | ((__uint32_t)(rqns[i]) & 0xff0000) >> 8 | ((__uint32_t)(rqns[i]) & 0xff000000) >> 24) : __swap32md (rqns[i])); |
5379 | } |
5380 | |
5381 | mcx_cmdq_mboxes_sign(&mxm, 1); |
5382 | mcx_cmdq_post(sc, cqe, 0); |
5383 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5384 | if (error != 0) { |
5385 | printf("%s: create rqt timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5386 | goto free; |
5387 | } |
5388 | if (mcx_cmdq_verify(cqe) != 0) { |
5389 | printf("%s: create rqt command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5390 | goto free; |
5391 | } |
5392 | |
5393 | out = mcx_cmdq_out(cqe); |
5394 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5395 | printf("%s: create rqt failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5396 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5397 | error = -1; |
5398 | goto free; |
5399 | } |
5400 | |
5401 | *rqt = mcx_get_id(out->cmd_rqtn); |
5402 | return (0); |
5403 | free: |
5404 | mcx_dmamem_free(sc, &mxm); |
5405 | return (error); |
5406 | } |
5407 | |
5408 | static int |
5409 | mcx_destroy_rqt(struct mcx_softc *sc, int rqt) |
5410 | { |
5411 | struct mcx_cmdq_entry *cqe; |
5412 | struct mcx_cmd_destroy_rqt_in *in; |
5413 | struct mcx_cmd_destroy_rqt_out *out; |
5414 | int error; |
5415 | int token; |
5416 | |
5417 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5418 | token = mcx_cmdq_token(sc); |
5419 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token); |
5420 | |
5421 | in = mcx_cmdq_in(cqe); |
5422 | in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQT)(__uint16_t)(__builtin_constant_p(0x918) ? (__uint16_t)(((__uint16_t )(0x918) & 0xffU) << 8 | ((__uint16_t)(0x918) & 0xff00U) >> 8) : __swap16md(0x918)); |
5423 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5424 | in->cmd_rqtn = htobe32(rqt)(__uint32_t)(__builtin_constant_p(rqt) ? (__uint32_t)(((__uint32_t )(rqt) & 0xff) << 24 | ((__uint32_t)(rqt) & 0xff00 ) << 8 | ((__uint32_t)(rqt) & 0xff0000) >> 8 | ((__uint32_t)(rqt) & 0xff000000) >> 24) : __swap32md (rqt)); |
5425 | |
5426 | mcx_cmdq_post(sc, cqe, 0); |
5427 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5428 | if (error != 0) { |
5429 | printf("%s: destroy rqt timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5430 | return error; |
5431 | } |
5432 | if (mcx_cmdq_verify(cqe) != 0) { |
5433 | printf("%s: destroy rqt command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5434 | return error; |
5435 | } |
5436 | |
5437 | out = mcx_cmdq_out(cqe); |
5438 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5439 | printf("%s: destroy rqt failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5440 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5441 | return -1; |
5442 | } |
5443 | |
5444 | return 0; |
5445 | } |
5446 | |
5447 | #if 0 |
5448 | static int |
5449 | mcx_alloc_flow_counter(struct mcx_softc *sc, int i) |
5450 | { |
5451 | struct mcx_cmdq_entry *cqe; |
5452 | struct mcx_cmd_alloc_flow_counter_in *in; |
5453 | struct mcx_cmd_alloc_flow_counter_out *out; |
5454 | int error; |
5455 | |
5456 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5457 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc)); |
5458 | |
5459 | in = mcx_cmdq_in(cqe); |
5460 | in->cmd_opcode = htobe16(MCX_CMD_ALLOC_FLOW_COUNTER)(__uint16_t)(__builtin_constant_p(0x939) ? (__uint16_t)(((__uint16_t )(0x939) & 0xffU) << 8 | ((__uint16_t)(0x939) & 0xff00U) >> 8) : __swap16md(0x939)); |
5461 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5462 | |
5463 | mcx_cmdq_post(sc, cqe, 0); |
5464 | |
5465 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5466 | if (error != 0) { |
5467 | printf("%s: alloc flow counter timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5468 | return (-1); |
5469 | } |
5470 | if (mcx_cmdq_verify(cqe) != 0) { |
5471 | printf("%s: alloc flow counter command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5472 | return (-1); |
5473 | } |
5474 | |
5475 | out = (struct mcx_cmd_alloc_flow_counter_out *)cqe->cq_output_data; |
5476 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5477 | printf("%s: alloc flow counter failed (%x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5478 | out->cmd_status); |
5479 | return (-1); |
5480 | } |
5481 | |
5482 | sc->sc_flow_counter_id[i] = betoh16(out->cmd_flow_counter_id)(__uint16_t)(__builtin_constant_p(out->cmd_flow_counter_id ) ? (__uint16_t)(((__uint16_t)(out->cmd_flow_counter_id) & 0xffU) << 8 | ((__uint16_t)(out->cmd_flow_counter_id ) & 0xff00U) >> 8) : __swap16md(out->cmd_flow_counter_id )); |
5483 | printf("flow counter id %d = %d\n", i, sc->sc_flow_counter_id[i]); |
5484 | |
5485 | return (0); |
5486 | } |
5487 | #endif |
5488 | |
5489 | static int |
5490 | mcx_create_flow_table(struct mcx_softc *sc, int log_size, int level, |
5491 | int *flow_table_id) |
5492 | { |
5493 | struct mcx_cmdq_entry *cqe; |
5494 | struct mcx_dmamem mxm; |
5495 | struct mcx_cmd_create_flow_table_in *in; |
5496 | struct mcx_cmd_create_flow_table_mb_in *mbin; |
5497 | struct mcx_cmd_create_flow_table_out *out; |
5498 | int error; |
5499 | int token; |
5500 | |
5501 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5502 | token = mcx_cmdq_token(sc); |
5503 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), |
5504 | sizeof(*out), token); |
5505 | |
5506 | in = mcx_cmdq_in(cqe); |
5507 | in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_TABLE)(__uint16_t)(__builtin_constant_p(0x930) ? (__uint16_t)(((__uint16_t )(0x930) & 0xffU) << 8 | ((__uint16_t)(0x930) & 0xff00U) >> 8) : __swap16md(0x930)); |
5508 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5509 | |
5510 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
5511 | &cqe->cq_input_ptr, token) != 0) { |
5512 | printf("%s: unable to allocate create flow table mailbox\n", |
5513 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5514 | return (-1); |
5515 | } |
5516 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
5517 | mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX0; |
5518 | mbin->cmd_ctx.ft_log_size = log_size; |
5519 | mbin->cmd_ctx.ft_level = level; |
5520 | |
5521 | mcx_cmdq_mboxes_sign(&mxm, 1); |
5522 | mcx_cmdq_post(sc, cqe, 0); |
5523 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5524 | if (error != 0) { |
5525 | printf("%s: create flow table timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5526 | goto free; |
5527 | } |
5528 | if (mcx_cmdq_verify(cqe) != 0) { |
5529 | printf("%s: create flow table command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5530 | goto free; |
5531 | } |
5532 | |
5533 | out = mcx_cmdq_out(cqe); |
5534 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5535 | printf("%s: create flow table failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5536 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5537 | error = -1; |
5538 | goto free; |
5539 | } |
5540 | |
5541 | *flow_table_id = mcx_get_id(out->cmd_table_id); |
5542 | free: |
5543 | mcx_dmamem_free(sc, &mxm); |
5544 | return (error); |
5545 | } |
5546 | |
5547 | static int |
5548 | mcx_set_flow_table_root(struct mcx_softc *sc, int flow_table_id) |
5549 | { |
5550 | struct mcx_cmdq_entry *cqe; |
5551 | struct mcx_dmamem mxm; |
5552 | struct mcx_cmd_set_flow_table_root_in *in; |
5553 | struct mcx_cmd_set_flow_table_root_mb_in *mbin; |
5554 | struct mcx_cmd_set_flow_table_root_out *out; |
5555 | int error; |
5556 | int token; |
5557 | |
5558 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5559 | token = mcx_cmdq_token(sc); |
5560 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), |
5561 | sizeof(*out), token); |
5562 | |
5563 | in = mcx_cmdq_in(cqe); |
5564 | in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ROOT)(__uint16_t)(__builtin_constant_p(0x92f) ? (__uint16_t)(((__uint16_t )(0x92f) & 0xffU) << 8 | ((__uint16_t)(0x92f) & 0xff00U) >> 8) : __swap16md(0x92f)); |
5565 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5566 | |
5567 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
5568 | &cqe->cq_input_ptr, token) != 0) { |
5569 | printf("%s: unable to allocate set flow table root mailbox\n", |
5570 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5571 | return (-1); |
5572 | } |
5573 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
5574 | mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX0; |
5575 | mbin->cmd_table_id = htobe32(flow_table_id)(__uint32_t)(__builtin_constant_p(flow_table_id) ? (__uint32_t )(((__uint32_t)(flow_table_id) & 0xff) << 24 | ((__uint32_t )(flow_table_id) & 0xff00) << 8 | ((__uint32_t)(flow_table_id ) & 0xff0000) >> 8 | ((__uint32_t)(flow_table_id) & 0xff000000) >> 24) : __swap32md(flow_table_id)); |
5576 | |
5577 | mcx_cmdq_mboxes_sign(&mxm, 1); |
5578 | mcx_cmdq_post(sc, cqe, 0); |
5579 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5580 | if (error != 0) { |
5581 | printf("%s: set flow table root timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5582 | goto free; |
5583 | } |
5584 | if (mcx_cmdq_verify(cqe) != 0) { |
5585 | printf("%s: set flow table root command corrupt\n", |
5586 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5587 | goto free; |
5588 | } |
5589 | |
5590 | out = mcx_cmdq_out(cqe); |
5591 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5592 | printf("%s: set flow table root failed (%x, %x)\n", |
5593 | DEVNAME(sc)((sc)->sc_dev.dv_xname), out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5594 | error = -1; |
5595 | goto free; |
5596 | } |
5597 | |
5598 | free: |
5599 | mcx_dmamem_free(sc, &mxm); |
5600 | return (error); |
5601 | } |
5602 | |
5603 | static int |
5604 | mcx_destroy_flow_table(struct mcx_softc *sc, int flow_table_id) |
5605 | { |
5606 | struct mcx_cmdq_entry *cqe; |
5607 | struct mcx_dmamem mxm; |
5608 | struct mcx_cmd_destroy_flow_table_in *in; |
5609 | struct mcx_cmd_destroy_flow_table_mb_in *mb; |
5610 | struct mcx_cmd_destroy_flow_table_out *out; |
5611 | int error; |
5612 | int token; |
5613 | |
5614 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5615 | token = mcx_cmdq_token(sc); |
5616 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token); |
5617 | |
5618 | in = mcx_cmdq_in(cqe); |
5619 | in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_TABLE)(__uint16_t)(__builtin_constant_p(0x931) ? (__uint16_t)(((__uint16_t )(0x931) & 0xffU) << 8 | ((__uint16_t)(0x931) & 0xff00U) >> 8) : __swap16md(0x931)); |
5620 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5621 | |
5622 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
5623 | &cqe->cq_input_ptr, token) != 0) { |
5624 | printf("%s: unable to allocate destroy flow table mailbox\n", |
5625 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5626 | return (-1); |
5627 | } |
5628 | mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
5629 | mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX0; |
5630 | mb->cmd_table_id = htobe32(flow_table_id)(__uint32_t)(__builtin_constant_p(flow_table_id) ? (__uint32_t )(((__uint32_t)(flow_table_id) & 0xff) << 24 | ((__uint32_t )(flow_table_id) & 0xff00) << 8 | ((__uint32_t)(flow_table_id ) & 0xff0000) >> 8 | ((__uint32_t)(flow_table_id) & 0xff000000) >> 24) : __swap32md(flow_table_id)); |
5631 | |
5632 | mcx_cmdq_mboxes_sign(&mxm, 1); |
5633 | mcx_cmdq_post(sc, cqe, 0); |
5634 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5635 | if (error != 0) { |
5636 | printf("%s: destroy flow table timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5637 | goto free; |
5638 | } |
5639 | if (mcx_cmdq_verify(cqe) != 0) { |
5640 | printf("%s: destroy flow table command corrupt\n", |
5641 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5642 | goto free; |
5643 | } |
5644 | |
5645 | out = mcx_cmdq_out(cqe); |
5646 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5647 | printf("%s: destroy flow table failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5648 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5649 | error = -1; |
5650 | goto free; |
5651 | } |
5652 | |
5653 | free: |
5654 | mcx_dmamem_free(sc, &mxm); |
5655 | return (error); |
5656 | } |
5657 | |
5658 | |
5659 | static int |
5660 | mcx_create_flow_group(struct mcx_softc *sc, int flow_table_id, int group, |
5661 | int start, int size, int match_enable, struct mcx_flow_match *match) |
5662 | { |
5663 | struct mcx_cmdq_entry *cqe; |
5664 | struct mcx_dmamem mxm; |
5665 | struct mcx_cmd_create_flow_group_in *in; |
5666 | struct mcx_cmd_create_flow_group_mb_in *mbin; |
5667 | struct mcx_cmd_create_flow_group_out *out; |
5668 | struct mcx_flow_group *mfg; |
5669 | int error; |
5670 | int token; |
5671 | |
5672 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5673 | token = mcx_cmdq_token(sc); |
5674 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), |
5675 | token); |
5676 | |
5677 | in = mcx_cmdq_in(cqe); |
5678 | in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_GROUP)(__uint16_t)(__builtin_constant_p(0x933) ? (__uint16_t)(((__uint16_t )(0x933) & 0xffU) << 8 | ((__uint16_t)(0x933) & 0xff00U) >> 8) : __swap16md(0x933)); |
5679 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5680 | |
5681 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) |
5682 | != 0) { |
5683 | printf("%s: unable to allocate create flow group mailbox\n", |
5684 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5685 | return (-1); |
5686 | } |
5687 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
5688 | mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX0; |
5689 | mbin->cmd_table_id = htobe32(flow_table_id)(__uint32_t)(__builtin_constant_p(flow_table_id) ? (__uint32_t )(((__uint32_t)(flow_table_id) & 0xff) << 24 | ((__uint32_t )(flow_table_id) & 0xff00) << 8 | ((__uint32_t)(flow_table_id ) & 0xff0000) >> 8 | ((__uint32_t)(flow_table_id) & 0xff000000) >> 24) : __swap32md(flow_table_id)); |
5690 | mbin->cmd_start_flow_index = htobe32(start)(__uint32_t)(__builtin_constant_p(start) ? (__uint32_t)(((__uint32_t )(start) & 0xff) << 24 | ((__uint32_t)(start) & 0xff00) << 8 | ((__uint32_t)(start) & 0xff0000) >> 8 | ((__uint32_t)(start) & 0xff000000) >> 24) : __swap32md (start)); |
5691 | mbin->cmd_end_flow_index = htobe32(start + (size - 1))(__uint32_t)(__builtin_constant_p(start + (size - 1)) ? (__uint32_t )(((__uint32_t)(start + (size - 1)) & 0xff) << 24 | ((__uint32_t)(start + (size - 1)) & 0xff00) << 8 | ((__uint32_t)(start + (size - 1)) & 0xff0000) >> 8 | ((__uint32_t)(start + (size - 1)) & 0xff000000) >> 24) : __swap32md(start + (size - 1))); |
5692 | |
5693 | mbin->cmd_match_criteria_enable = match_enable; |
5694 | memcpy(&mbin->cmd_match_criteria, match, sizeof(*match))__builtin_memcpy((&mbin->cmd_match_criteria), (match), (sizeof(*match))); |
5695 | |
5696 | mcx_cmdq_mboxes_sign(&mxm, 2); |
5697 | mcx_cmdq_post(sc, cqe, 0); |
5698 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5699 | if (error != 0) { |
5700 | printf("%s: create flow group timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5701 | goto free; |
5702 | } |
5703 | if (mcx_cmdq_verify(cqe) != 0) { |
5704 | printf("%s: create flow group command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5705 | goto free; |
5706 | } |
5707 | |
5708 | out = mcx_cmdq_out(cqe); |
5709 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5710 | printf("%s: create flow group failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5711 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5712 | error = -1; |
5713 | goto free; |
5714 | } |
5715 | |
5716 | mfg = &sc->sc_flow_group[group]; |
5717 | mfg->g_id = mcx_get_id(out->cmd_group_id); |
5718 | mfg->g_table = flow_table_id; |
5719 | mfg->g_start = start; |
5720 | mfg->g_size = size; |
5721 | |
5722 | free: |
5723 | mcx_dmamem_free(sc, &mxm); |
5724 | return (error); |
5725 | } |
5726 | |
5727 | static int |
5728 | mcx_destroy_flow_group(struct mcx_softc *sc, int group) |
5729 | { |
5730 | struct mcx_cmdq_entry *cqe; |
5731 | struct mcx_dmamem mxm; |
5732 | struct mcx_cmd_destroy_flow_group_in *in; |
5733 | struct mcx_cmd_destroy_flow_group_mb_in *mb; |
5734 | struct mcx_cmd_destroy_flow_group_out *out; |
5735 | struct mcx_flow_group *mfg; |
5736 | int error; |
5737 | int token; |
5738 | |
5739 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5740 | token = mcx_cmdq_token(sc); |
5741 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token); |
5742 | |
5743 | in = mcx_cmdq_in(cqe); |
5744 | in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_GROUP)(__uint16_t)(__builtin_constant_p(0x934) ? (__uint16_t)(((__uint16_t )(0x934) & 0xffU) << 8 | ((__uint16_t)(0x934) & 0xff00U) >> 8) : __swap16md(0x934)); |
5745 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5746 | |
5747 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, |
5748 | &cqe->cq_input_ptr, token) != 0) { |
5749 | printf("%s: unable to allocate destroy flow group mailbox\n", |
5750 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5751 | return (-1); |
5752 | } |
5753 | mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
5754 | mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX0; |
5755 | mfg = &sc->sc_flow_group[group]; |
5756 | mb->cmd_table_id = htobe32(mfg->g_table)(__uint32_t)(__builtin_constant_p(mfg->g_table) ? (__uint32_t )(((__uint32_t)(mfg->g_table) & 0xff) << 24 | (( __uint32_t)(mfg->g_table) & 0xff00) << 8 | ((__uint32_t )(mfg->g_table) & 0xff0000) >> 8 | ((__uint32_t) (mfg->g_table) & 0xff000000) >> 24) : __swap32md (mfg->g_table)); |
5757 | mb->cmd_group_id = htobe32(mfg->g_id)(__uint32_t)(__builtin_constant_p(mfg->g_id) ? (__uint32_t )(((__uint32_t)(mfg->g_id) & 0xff) << 24 | ((__uint32_t )(mfg->g_id) & 0xff00) << 8 | ((__uint32_t)(mfg-> g_id) & 0xff0000) >> 8 | ((__uint32_t)(mfg->g_id ) & 0xff000000) >> 24) : __swap32md(mfg->g_id)); |
5758 | |
5759 | mcx_cmdq_mboxes_sign(&mxm, 2); |
5760 | mcx_cmdq_post(sc, cqe, 0); |
5761 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5762 | if (error != 0) { |
5763 | printf("%s: destroy flow group timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5764 | goto free; |
5765 | } |
5766 | if (mcx_cmdq_verify(cqe) != 0) { |
5767 | printf("%s: destroy flow group command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5768 | goto free; |
5769 | } |
5770 | |
5771 | out = mcx_cmdq_out(cqe); |
5772 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5773 | printf("%s: destroy flow group failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
5774 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5775 | error = -1; |
5776 | goto free; |
5777 | } |
5778 | |
5779 | mfg->g_id = -1; |
5780 | mfg->g_table = -1; |
5781 | mfg->g_size = 0; |
5782 | mfg->g_start = 0; |
5783 | free: |
5784 | mcx_dmamem_free(sc, &mxm); |
5785 | return (error); |
5786 | } |
5787 | |
5788 | static int |
5789 | mcx_set_flow_table_entry_mac(struct mcx_softc *sc, int group, int index, |
5790 | uint8_t *macaddr, uint32_t dest) |
5791 | { |
5792 | struct mcx_cmdq_entry *cqe; |
5793 | struct mcx_dmamem mxm; |
5794 | struct mcx_cmd_set_flow_table_entry_in *in; |
5795 | struct mcx_cmd_set_flow_table_entry_mb_in *mbin; |
5796 | struct mcx_cmd_set_flow_table_entry_out *out; |
5797 | struct mcx_flow_group *mfg; |
5798 | uint32_t *pdest; |
5799 | int error; |
5800 | int token; |
5801 | |
5802 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5803 | token = mcx_cmdq_token(sc); |
5804 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*pdest), |
5805 | sizeof(*out), token); |
5806 | |
5807 | in = mcx_cmdq_in(cqe); |
5808 | in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY)(__uint16_t)(__builtin_constant_p(0x936) ? (__uint16_t)(((__uint16_t )(0x936) & 0xffU) << 8 | ((__uint16_t)(0x936) & 0xff00U) >> 8) : __swap16md(0x936)); |
5809 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5810 | |
5811 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) |
5812 | != 0) { |
5813 | printf("%s: unable to allocate set flow table entry mailbox\n", |
5814 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5815 | return (-1); |
5816 | } |
5817 | |
5818 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
5819 | mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX0; |
5820 | |
5821 | mfg = &sc->sc_flow_group[group]; |
5822 | mbin->cmd_table_id = htobe32(mfg->g_table)(__uint32_t)(__builtin_constant_p(mfg->g_table) ? (__uint32_t )(((__uint32_t)(mfg->g_table) & 0xff) << 24 | (( __uint32_t)(mfg->g_table) & 0xff00) << 8 | ((__uint32_t )(mfg->g_table) & 0xff0000) >> 8 | ((__uint32_t) (mfg->g_table) & 0xff000000) >> 24) : __swap32md (mfg->g_table)); |
5823 | mbin->cmd_flow_index = htobe32(mfg->g_start + index)(__uint32_t)(__builtin_constant_p(mfg->g_start + index) ? ( __uint32_t)(((__uint32_t)(mfg->g_start + index) & 0xff ) << 24 | ((__uint32_t)(mfg->g_start + index) & 0xff00 ) << 8 | ((__uint32_t)(mfg->g_start + index) & 0xff0000 ) >> 8 | ((__uint32_t)(mfg->g_start + index) & 0xff000000 ) >> 24) : __swap32md(mfg->g_start + index)); |
5824 | mbin->cmd_flow_ctx.fc_group_id = htobe32(mfg->g_id)(__uint32_t)(__builtin_constant_p(mfg->g_id) ? (__uint32_t )(((__uint32_t)(mfg->g_id) & 0xff) << 24 | ((__uint32_t )(mfg->g_id) & 0xff00) << 8 | ((__uint32_t)(mfg-> g_id) & 0xff0000) >> 8 | ((__uint32_t)(mfg->g_id ) & 0xff000000) >> 24) : __swap32md(mfg->g_id)); |
5825 | |
5826 | /* flow context ends at offset 0x330, 0x130 into the second mbox */ |
5827 | pdest = (uint32_t *) |
5828 | (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130); |
5829 | mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD)(__uint32_t)(__builtin_constant_p((1 << 2)) ? (__uint32_t )(((__uint32_t)((1 << 2)) & 0xff) << 24 | ((__uint32_t )((1 << 2)) & 0xff00) << 8 | ((__uint32_t)((1 << 2)) & 0xff0000) >> 8 | ((__uint32_t)((1 << 2)) & 0xff000000) >> 24) : __swap32md((1 << 2 ))); |
5830 | mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1)(__uint32_t)(__builtin_constant_p(1) ? (__uint32_t)(((__uint32_t )(1) & 0xff) << 24 | ((__uint32_t)(1) & 0xff00) << 8 | ((__uint32_t)(1) & 0xff0000) >> 8 | ( (__uint32_t)(1) & 0xff000000) >> 24) : __swap32md(1 )); |
5831 | *pdest = htobe32(dest)(__uint32_t)(__builtin_constant_p(dest) ? (__uint32_t)(((__uint32_t )(dest) & 0xff) << 24 | ((__uint32_t)(dest) & 0xff00 ) << 8 | ((__uint32_t)(dest) & 0xff0000) >> 8 | ((__uint32_t)(dest) & 0xff000000) >> 24) : __swap32md (dest)); |
5832 | |
5833 | /* the only thing we match on at the moment is the dest mac address */ |
5834 | if (macaddr != NULL((void *)0)) { |
5835 | memcpy(mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac, macaddr,__builtin_memcpy((mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac ), (macaddr), (6)) |
5836 | ETHER_ADDR_LEN)__builtin_memcpy((mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac ), (macaddr), (6)); |
5837 | } |
5838 | |
5839 | mcx_cmdq_mboxes_sign(&mxm, 2); |
5840 | mcx_cmdq_post(sc, cqe, 0); |
5841 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5842 | if (error != 0) { |
5843 | printf("%s: set flow table entry timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5844 | goto free; |
5845 | } |
5846 | if (mcx_cmdq_verify(cqe) != 0) { |
5847 | printf("%s: set flow table entry command corrupt\n", |
5848 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5849 | goto free; |
5850 | } |
5851 | |
5852 | out = mcx_cmdq_out(cqe); |
5853 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5854 | printf("%s: set flow table entry failed (%x, %x)\n", |
5855 | DEVNAME(sc)((sc)->sc_dev.dv_xname), out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5856 | error = -1; |
5857 | goto free; |
5858 | } |
5859 | |
5860 | free: |
5861 | mcx_dmamem_free(sc, &mxm); |
5862 | return (error); |
5863 | } |
5864 | |
5865 | static int |
5866 | mcx_set_flow_table_entry_proto(struct mcx_softc *sc, int group, int index, |
5867 | int ethertype, int ip_proto, uint32_t dest) |
5868 | { |
5869 | struct mcx_cmdq_entry *cqe; |
5870 | struct mcx_dmamem mxm; |
5871 | struct mcx_cmd_set_flow_table_entry_in *in; |
5872 | struct mcx_cmd_set_flow_table_entry_mb_in *mbin; |
5873 | struct mcx_cmd_set_flow_table_entry_out *out; |
5874 | struct mcx_flow_group *mfg; |
5875 | uint32_t *pdest; |
5876 | int error; |
5877 | int token; |
5878 | |
5879 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5880 | token = mcx_cmdq_token(sc); |
5881 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*pdest), |
5882 | sizeof(*out), token); |
5883 | |
5884 | in = mcx_cmdq_in(cqe); |
5885 | in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY)(__uint16_t)(__builtin_constant_p(0x936) ? (__uint16_t)(((__uint16_t )(0x936) & 0xffU) << 8 | ((__uint16_t)(0x936) & 0xff00U) >> 8) : __swap16md(0x936)); |
5886 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5887 | |
5888 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) |
5889 | != 0) { |
5890 | printf("%s: unable to allocate set flow table entry mailbox\n", |
5891 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5892 | return (-1); |
5893 | } |
5894 | |
5895 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
5896 | mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX0; |
5897 | |
5898 | mfg = &sc->sc_flow_group[group]; |
5899 | mbin->cmd_table_id = htobe32(mfg->g_table)(__uint32_t)(__builtin_constant_p(mfg->g_table) ? (__uint32_t )(((__uint32_t)(mfg->g_table) & 0xff) << 24 | (( __uint32_t)(mfg->g_table) & 0xff00) << 8 | ((__uint32_t )(mfg->g_table) & 0xff0000) >> 8 | ((__uint32_t) (mfg->g_table) & 0xff000000) >> 24) : __swap32md (mfg->g_table)); |
5900 | mbin->cmd_flow_index = htobe32(mfg->g_start + index)(__uint32_t)(__builtin_constant_p(mfg->g_start + index) ? ( __uint32_t)(((__uint32_t)(mfg->g_start + index) & 0xff ) << 24 | ((__uint32_t)(mfg->g_start + index) & 0xff00 ) << 8 | ((__uint32_t)(mfg->g_start + index) & 0xff0000 ) >> 8 | ((__uint32_t)(mfg->g_start + index) & 0xff000000 ) >> 24) : __swap32md(mfg->g_start + index)); |
5901 | mbin->cmd_flow_ctx.fc_group_id = htobe32(mfg->g_id)(__uint32_t)(__builtin_constant_p(mfg->g_id) ? (__uint32_t )(((__uint32_t)(mfg->g_id) & 0xff) << 24 | ((__uint32_t )(mfg->g_id) & 0xff00) << 8 | ((__uint32_t)(mfg-> g_id) & 0xff0000) >> 8 | ((__uint32_t)(mfg->g_id ) & 0xff000000) >> 24) : __swap32md(mfg->g_id)); |
5902 | |
5903 | /* flow context ends at offset 0x330, 0x130 into the second mbox */ |
5904 | pdest = (uint32_t *) |
5905 | (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130); |
5906 | mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD)(__uint32_t)(__builtin_constant_p((1 << 2)) ? (__uint32_t )(((__uint32_t)((1 << 2)) & 0xff) << 24 | ((__uint32_t )((1 << 2)) & 0xff00) << 8 | ((__uint32_t)((1 << 2)) & 0xff0000) >> 8 | ((__uint32_t)((1 << 2)) & 0xff000000) >> 24) : __swap32md((1 << 2 ))); |
5907 | mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1)(__uint32_t)(__builtin_constant_p(1) ? (__uint32_t)(((__uint32_t )(1) & 0xff) << 24 | ((__uint32_t)(1) & 0xff00) << 8 | ((__uint32_t)(1) & 0xff0000) >> 8 | ( (__uint32_t)(1) & 0xff000000) >> 24) : __swap32md(1 )); |
5908 | *pdest = htobe32(dest)(__uint32_t)(__builtin_constant_p(dest) ? (__uint32_t)(((__uint32_t )(dest) & 0xff) << 24 | ((__uint32_t)(dest) & 0xff00 ) << 8 | ((__uint32_t)(dest) & 0xff0000) >> 8 | ((__uint32_t)(dest) & 0xff000000) >> 24) : __swap32md (dest)); |
5909 | |
5910 | mbin->cmd_flow_ctx.fc_match_value.mc_ethertype = htobe16(ethertype)(__uint16_t)(__builtin_constant_p(ethertype) ? (__uint16_t)(( (__uint16_t)(ethertype) & 0xffU) << 8 | ((__uint16_t )(ethertype) & 0xff00U) >> 8) : __swap16md(ethertype )); |
5911 | mbin->cmd_flow_ctx.fc_match_value.mc_ip_proto = ip_proto; |
5912 | |
5913 | mcx_cmdq_mboxes_sign(&mxm, 2); |
5914 | mcx_cmdq_post(sc, cqe, 0); |
5915 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5916 | if (error != 0) { |
5917 | printf("%s: set flow table entry timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5918 | goto free; |
5919 | } |
5920 | if (mcx_cmdq_verify(cqe) != 0) { |
5921 | printf("%s: set flow table entry command corrupt\n", |
5922 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5923 | goto free; |
5924 | } |
5925 | |
5926 | out = mcx_cmdq_out(cqe); |
5927 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5928 | printf("%s: set flow table entry failed (%x, %x)\n", |
5929 | DEVNAME(sc)((sc)->sc_dev.dv_xname), out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5930 | error = -1; |
5931 | goto free; |
5932 | } |
5933 | |
5934 | free: |
5935 | mcx_dmamem_free(sc, &mxm); |
5936 | return (error); |
5937 | } |
5938 | |
5939 | static int |
5940 | mcx_delete_flow_table_entry(struct mcx_softc *sc, int group, int index) |
5941 | { |
5942 | struct mcx_cmdq_entry *cqe; |
5943 | struct mcx_dmamem mxm; |
5944 | struct mcx_cmd_delete_flow_table_entry_in *in; |
5945 | struct mcx_cmd_delete_flow_table_entry_mb_in *mbin; |
5946 | struct mcx_cmd_delete_flow_table_entry_out *out; |
5947 | struct mcx_flow_group *mfg; |
5948 | int error; |
5949 | int token; |
5950 | |
5951 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
5952 | token = mcx_cmdq_token(sc); |
5953 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), |
5954 | token); |
5955 | |
5956 | in = mcx_cmdq_in(cqe); |
5957 | in->cmd_opcode = htobe16(MCX_CMD_DELETE_FLOW_TABLE_ENTRY)(__uint16_t)(__builtin_constant_p(0x938) ? (__uint16_t)(((__uint16_t )(0x938) & 0xffU) << 8 | ((__uint16_t)(0x938) & 0xff00U) >> 8) : __swap16md(0x938)); |
5958 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
5959 | |
5960 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, |
5961 | &cqe->cq_input_ptr, token) != 0) { |
5962 | printf("%s: unable to allocate " |
5963 | "delete flow table entry mailbox\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5964 | return (-1); |
5965 | } |
5966 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
5967 | mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX0; |
5968 | |
5969 | mfg = &sc->sc_flow_group[group]; |
5970 | mbin->cmd_table_id = htobe32(mfg->g_table)(__uint32_t)(__builtin_constant_p(mfg->g_table) ? (__uint32_t )(((__uint32_t)(mfg->g_table) & 0xff) << 24 | (( __uint32_t)(mfg->g_table) & 0xff00) << 8 | ((__uint32_t )(mfg->g_table) & 0xff0000) >> 8 | ((__uint32_t) (mfg->g_table) & 0xff000000) >> 24) : __swap32md (mfg->g_table)); |
5971 | mbin->cmd_flow_index = htobe32(mfg->g_start + index)(__uint32_t)(__builtin_constant_p(mfg->g_start + index) ? ( __uint32_t)(((__uint32_t)(mfg->g_start + index) & 0xff ) << 24 | ((__uint32_t)(mfg->g_start + index) & 0xff00 ) << 8 | ((__uint32_t)(mfg->g_start + index) & 0xff0000 ) >> 8 | ((__uint32_t)(mfg->g_start + index) & 0xff000000 ) >> 24) : __swap32md(mfg->g_start + index)); |
5972 | |
5973 | mcx_cmdq_mboxes_sign(&mxm, 2); |
5974 | mcx_cmdq_post(sc, cqe, 0); |
5975 | error = mcx_cmdq_poll(sc, cqe, 1000); |
5976 | if (error != 0) { |
5977 | printf("%s: delete flow table entry timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5978 | goto free; |
5979 | } |
5980 | if (mcx_cmdq_verify(cqe) != 0) { |
5981 | printf("%s: delete flow table entry command corrupt\n", |
5982 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
5983 | goto free; |
5984 | } |
5985 | |
5986 | out = mcx_cmdq_out(cqe); |
5987 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
5988 | printf("%s: delete flow table entry %d:%d failed (%x, %x)\n", |
5989 | DEVNAME(sc)((sc)->sc_dev.dv_xname), group, index, out->cmd_status, |
5990 | betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
5991 | error = -1; |
5992 | goto free; |
5993 | } |
5994 | |
5995 | free: |
5996 | mcx_dmamem_free(sc, &mxm); |
5997 | return (error); |
5998 | } |
5999 | |
6000 | #if 0 |
6001 | int |
6002 | mcx_dump_flow_table(struct mcx_softc *sc, int flow_table_id) |
6003 | { |
6004 | struct mcx_dmamem mxm; |
6005 | struct mcx_cmdq_entry *cqe; |
6006 | struct mcx_cmd_query_flow_table_in *in; |
6007 | struct mcx_cmd_query_flow_table_mb_in *mbin; |
6008 | struct mcx_cmd_query_flow_table_out *out; |
6009 | struct mcx_cmd_query_flow_table_mb_out *mbout; |
6010 | uint8_t token = mcx_cmdq_token(sc); |
6011 | int error; |
6012 | int i; |
6013 | uint8_t *dump; |
6014 | |
6015 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
6016 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), |
6017 | sizeof(*out) + sizeof(*mbout) + 16, token); |
6018 | |
6019 | in = mcx_cmdq_in(cqe); |
6020 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE)(__uint16_t)(__builtin_constant_p(0x932) ? (__uint16_t)(((__uint16_t )(0x932) & 0xffU) << 8 | ((__uint16_t)(0x932) & 0xff00U) >> 8) : __swap16md(0x932)); |
6021 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
6022 | |
6023 | CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE)extern char _ctassert[(sizeof(*mbin) <= 512) ? 1 : -1 ] __attribute__ ((__unused__)); |
6024 | CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE)extern char _ctassert[(sizeof(*mbout) <= 512) ? 1 : -1 ] __attribute__ ((__unused__)); |
6025 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, |
6026 | &cqe->cq_output_ptr, token) != 0) { |
6027 | printf(", unable to allocate query flow table mailboxes\n"); |
6028 | return (-1); |
6029 | } |
6030 | cqe->cq_input_ptr = cqe->cq_output_ptr; |
6031 | |
6032 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
6033 | mbin->cmd_table_type = 0; |
6034 | mbin->cmd_table_id = htobe32(flow_table_id)(__uint32_t)(__builtin_constant_p(flow_table_id) ? (__uint32_t )(((__uint32_t)(flow_table_id) & 0xff) << 24 | ((__uint32_t )(flow_table_id) & 0xff00) << 8 | ((__uint32_t)(flow_table_id ) & 0xff0000) >> 8 | ((__uint32_t)(flow_table_id) & 0xff000000) >> 24) : __swap32md(flow_table_id)); |
6035 | |
6036 | mcx_cmdq_mboxes_sign(&mxm, 1); |
6037 | |
6038 | mcx_cmdq_post(sc, cqe, 0); |
6039 | error = mcx_cmdq_poll(sc, cqe, 1000); |
6040 | if (error != 0) { |
6041 | printf("%s: query flow table timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6042 | goto free; |
6043 | } |
6044 | error = mcx_cmdq_verify(cqe); |
6045 | if (error != 0) { |
6046 | printf("%s: query flow table reply corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6047 | goto free; |
6048 | } |
6049 | |
6050 | out = mcx_cmdq_out(cqe); |
6051 | switch (out->cmd_status) { |
6052 | case MCX_CQ_STATUS_OK(0x00 << 1): |
6053 | break; |
6054 | default: |
6055 | printf("%s: query flow table failed (%x/%x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
6056 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
6057 | error = -1; |
6058 | goto free; |
6059 | } |
6060 | |
6061 | mbout = (struct mcx_cmd_query_flow_table_mb_out *) |
6062 | (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))); |
6063 | dump = (uint8_t *)mbout + 8; |
6064 | for (i = 0; i < sizeof(struct mcx_flow_table_ctx); i++) { |
6065 | printf("%.2x ", dump[i]); |
6066 | if (i % 16 == 15) |
6067 | printf("\n"); |
6068 | } |
6069 | free: |
6070 | mcx_cq_mboxes_free(sc, &mxm); |
6071 | return (error); |
6072 | } |
6073 | int |
6074 | mcx_dump_flow_table_entry(struct mcx_softc *sc, int flow_table_id, int index) |
6075 | { |
6076 | struct mcx_dmamem mxm; |
6077 | struct mcx_cmdq_entry *cqe; |
6078 | struct mcx_cmd_query_flow_table_entry_in *in; |
6079 | struct mcx_cmd_query_flow_table_entry_mb_in *mbin; |
6080 | struct mcx_cmd_query_flow_table_entry_out *out; |
6081 | struct mcx_cmd_query_flow_table_entry_mb_out *mbout; |
6082 | uint8_t token = mcx_cmdq_token(sc); |
6083 | int error; |
6084 | int i; |
6085 | uint8_t *dump; |
6086 | |
6087 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
6088 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), |
6089 | sizeof(*out) + sizeof(*mbout) + 16, token); |
6090 | |
6091 | in = mcx_cmdq_in(cqe); |
6092 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE_ENTRY)(__uint16_t)(__builtin_constant_p(0x937) ? (__uint16_t)(((__uint16_t )(0x937) & 0xffU) << 8 | ((__uint16_t)(0x937) & 0xff00U) >> 8) : __swap16md(0x937)); |
6093 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
6094 | |
6095 | CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE)extern char _ctassert[(sizeof(*mbin) <= 512) ? 1 : -1 ] __attribute__ ((__unused__)); |
6096 | CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2)extern char _ctassert[(sizeof(*mbout) <= 512*2) ? 1 : -1 ] __attribute__((__unused__)); |
6097 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, |
6098 | &cqe->cq_output_ptr, token) != 0) { |
6099 | printf(", unable to allocate " |
6100 | "query flow table entry mailboxes\n"); |
6101 | return (-1); |
6102 | } |
6103 | cqe->cq_input_ptr = cqe->cq_output_ptr; |
6104 | |
6105 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
6106 | mbin->cmd_table_type = 0; |
6107 | mbin->cmd_table_id = htobe32(flow_table_id)(__uint32_t)(__builtin_constant_p(flow_table_id) ? (__uint32_t )(((__uint32_t)(flow_table_id) & 0xff) << 24 | ((__uint32_t )(flow_table_id) & 0xff00) << 8 | ((__uint32_t)(flow_table_id ) & 0xff0000) >> 8 | ((__uint32_t)(flow_table_id) & 0xff000000) >> 24) : __swap32md(flow_table_id)); |
6108 | mbin->cmd_flow_index = htobe32(index)(__uint32_t)(__builtin_constant_p(index) ? (__uint32_t)(((__uint32_t )(index) & 0xff) << 24 | ((__uint32_t)(index) & 0xff00) << 8 | ((__uint32_t)(index) & 0xff0000) >> 8 | ((__uint32_t)(index) & 0xff000000) >> 24) : __swap32md (index)); |
6109 | |
6110 | mcx_cmdq_mboxes_sign(&mxm, 1); |
6111 | |
6112 | mcx_cmdq_post(sc, cqe, 0); |
6113 | error = mcx_cmdq_poll(sc, cqe, 1000); |
6114 | if (error != 0) { |
6115 | printf("%s: query flow table entry timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6116 | goto free; |
6117 | } |
6118 | error = mcx_cmdq_verify(cqe); |
6119 | if (error != 0) { |
6120 | printf("%s: query flow table entry reply corrupt\n", |
6121 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6122 | goto free; |
6123 | } |
6124 | |
6125 | out = mcx_cmdq_out(cqe); |
6126 | switch (out->cmd_status) { |
6127 | case MCX_CQ_STATUS_OK(0x00 << 1): |
6128 | break; |
6129 | default: |
6130 | printf("%s: query flow table entry failed (%x/%x)\n", |
6131 | DEVNAME(sc)((sc)->sc_dev.dv_xname), out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
6132 | error = -1; |
6133 | goto free; |
6134 | } |
6135 | |
6136 | mbout = (struct mcx_cmd_query_flow_table_entry_mb_out *) |
6137 | (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))); |
6138 | dump = (uint8_t *)mbout; |
6139 | for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE512; i++) { |
6140 | printf("%.2x ", dump[i]); |
6141 | if (i % 16 == 15) |
6142 | printf("\n"); |
6143 | } |
6144 | |
6145 | free: |
6146 | mcx_cq_mboxes_free(sc, &mxm); |
6147 | return (error); |
6148 | } |
6149 | |
6150 | int |
6151 | mcx_dump_flow_group(struct mcx_softc *sc, int flow_table_id) |
6152 | { |
6153 | struct mcx_dmamem mxm; |
6154 | struct mcx_cmdq_entry *cqe; |
6155 | struct mcx_cmd_query_flow_group_in *in; |
6156 | struct mcx_cmd_query_flow_group_mb_in *mbin; |
6157 | struct mcx_cmd_query_flow_group_out *out; |
6158 | struct mcx_cmd_query_flow_group_mb_out *mbout; |
6159 | uint8_t token = mcx_cmdq_token(sc); |
6160 | int error; |
6161 | int i; |
6162 | uint8_t *dump; |
6163 | |
6164 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
6165 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), |
6166 | sizeof(*out) + sizeof(*mbout) + 16, token); |
6167 | |
6168 | in = mcx_cmdq_in(cqe); |
6169 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_GROUP)(__uint16_t)(__builtin_constant_p(0x935) ? (__uint16_t)(((__uint16_t )(0x935) & 0xffU) << 8 | ((__uint16_t)(0x935) & 0xff00U) >> 8) : __swap16md(0x935)); |
6170 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
6171 | |
6172 | CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE)extern char _ctassert[(sizeof(*mbin) <= 512) ? 1 : -1 ] __attribute__ ((__unused__)); |
6173 | CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2)extern char _ctassert[(sizeof(*mbout) <= 512*2) ? 1 : -1 ] __attribute__((__unused__)); |
6174 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, |
6175 | &cqe->cq_output_ptr, token) != 0) { |
6176 | printf(", unable to allocate query flow group mailboxes\n"); |
6177 | return (-1); |
6178 | } |
6179 | cqe->cq_input_ptr = cqe->cq_output_ptr; |
6180 | |
6181 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
6182 | mbin->cmd_table_type = 0; |
6183 | mbin->cmd_table_id = htobe32(flow_table_id)(__uint32_t)(__builtin_constant_p(flow_table_id) ? (__uint32_t )(((__uint32_t)(flow_table_id) & 0xff) << 24 | ((__uint32_t )(flow_table_id) & 0xff00) << 8 | ((__uint32_t)(flow_table_id ) & 0xff0000) >> 8 | ((__uint32_t)(flow_table_id) & 0xff000000) >> 24) : __swap32md(flow_table_id)); |
6184 | mbin->cmd_group_id = htobe32(sc->sc_flow_group_id)(__uint32_t)(__builtin_constant_p(sc->sc_flow_group_id) ? ( __uint32_t)(((__uint32_t)(sc->sc_flow_group_id) & 0xff ) << 24 | ((__uint32_t)(sc->sc_flow_group_id) & 0xff00 ) << 8 | ((__uint32_t)(sc->sc_flow_group_id) & 0xff0000 ) >> 8 | ((__uint32_t)(sc->sc_flow_group_id) & 0xff000000 ) >> 24) : __swap32md(sc->sc_flow_group_id)); |
6185 | |
6186 | mcx_cmdq_mboxes_sign(&mxm, 1); |
6187 | |
6188 | mcx_cmdq_post(sc, cqe, 0); |
6189 | error = mcx_cmdq_poll(sc, cqe, 1000); |
6190 | if (error != 0) { |
6191 | printf("%s: query flow group timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6192 | goto free; |
6193 | } |
6194 | error = mcx_cmdq_verify(cqe); |
6195 | if (error != 0) { |
6196 | printf("%s: query flow group reply corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6197 | goto free; |
6198 | } |
6199 | |
6200 | out = mcx_cmdq_out(cqe); |
6201 | switch (out->cmd_status) { |
6202 | case MCX_CQ_STATUS_OK(0x00 << 1): |
6203 | break; |
6204 | default: |
6205 | printf("%s: query flow group failed (%x/%x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
6206 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
6207 | error = -1; |
6208 | goto free; |
6209 | } |
6210 | |
6211 | mbout = (struct mcx_cmd_query_flow_group_mb_out *) |
6212 | (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))); |
6213 | dump = (uint8_t *)mbout; |
6214 | for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE512; i++) { |
6215 | printf("%.2x ", dump[i]); |
6216 | if (i % 16 == 15) |
6217 | printf("\n"); |
6218 | } |
6219 | dump = (uint8_t *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))); |
6220 | for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE512; i++) { |
6221 | printf("%.2x ", dump[i]); |
6222 | if (i % 16 == 15) |
6223 | printf("\n"); |
6224 | } |
6225 | |
6226 | free: |
6227 | mcx_cq_mboxes_free(sc, &mxm); |
6228 | return (error); |
6229 | } |
6230 | |
6231 | static int |
6232 | mcx_dump_counters(struct mcx_softc *sc) |
6233 | { |
6234 | struct mcx_dmamem mxm; |
6235 | struct mcx_cmdq_entry *cqe; |
6236 | struct mcx_cmd_query_vport_counters_in *in; |
6237 | struct mcx_cmd_query_vport_counters_mb_in *mbin; |
6238 | struct mcx_cmd_query_vport_counters_out *out; |
6239 | struct mcx_nic_vport_counters *counters; |
6240 | int error, token; |
6241 | |
6242 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
6243 | token = mcx_cmdq_token(sc); |
6244 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), |
6245 | sizeof(*out) + sizeof(*counters), token); |
6246 | |
6247 | in = mcx_cmdq_in(cqe); |
6248 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_VPORT_COUNTERS)(__uint16_t)(__builtin_constant_p(0x770) ? (__uint16_t)(((__uint16_t )(0x770) & 0xffU) << 8 | ((__uint16_t)(0x770) & 0xff00U) >> 8) : __swap16md(0x770)); |
6249 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
6250 | |
6251 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
6252 | &cqe->cq_output_ptr, token) != 0) { |
6253 | printf(", unable to allocate " |
6254 | "query nic vport counters mailboxen\n"); |
6255 | return (-1); |
6256 | } |
6257 | cqe->cq_input_ptr = cqe->cq_output_ptr; |
6258 | |
6259 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
6260 | mbin->cmd_clear = 0x80; |
6261 | |
6262 | mcx_cmdq_mboxes_sign(&mxm, 1); |
6263 | mcx_cmdq_post(sc, cqe, 0); |
6264 | |
6265 | error = mcx_cmdq_poll(sc, cqe, 1000); |
6266 | if (error != 0) { |
6267 | printf("%s: query nic vport counters timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6268 | goto free; |
6269 | } |
6270 | if (mcx_cmdq_verify(cqe) != 0) { |
6271 | printf("%s: query nic vport counters command corrupt\n", |
6272 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6273 | goto free; |
6274 | } |
6275 | |
6276 | out = mcx_cmdq_out(cqe); |
6277 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
6278 | printf("%s: query nic vport counters failed (%x, %x)\n", |
6279 | DEVNAME(sc)((sc)->sc_dev.dv_xname), out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
6280 | error = -1; |
6281 | goto free; |
6282 | } |
6283 | |
6284 | counters = (struct mcx_nic_vport_counters *) |
6285 | (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))); |
6286 | if (counters->rx_bcast.packets + counters->tx_bcast.packets + |
6287 | counters->rx_ucast.packets + counters->tx_ucast.packets + |
6288 | counters->rx_err.packets + counters->tx_err.packets) |
6289 | printf("%s: err %llx/%llx uc %llx/%llx bc %llx/%llx\n", |
6290 | DEVNAME(sc)((sc)->sc_dev.dv_xname), |
6291 | betoh64(counters->tx_err.packets)(__uint64_t)(__builtin_constant_p(counters->tx_err.packets ) ? (__uint64_t)((((__uint64_t)(counters->tx_err.packets) & 0xff) << 56) | ((__uint64_t)(counters->tx_err.packets ) & 0xff00ULL) << 40 | ((__uint64_t)(counters->tx_err .packets) & 0xff0000ULL) << 24 | ((__uint64_t)(counters ->tx_err.packets) & 0xff000000ULL) << 8 | ((__uint64_t )(counters->tx_err.packets) & 0xff00000000ULL) >> 8 | ((__uint64_t)(counters->tx_err.packets) & 0xff0000000000ULL ) >> 24 | ((__uint64_t)(counters->tx_err.packets) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(counters-> tx_err.packets) & 0xff00000000000000ULL) >> 56) : __swap64md (counters->tx_err.packets)), |
6292 | betoh64(counters->rx_err.packets)(__uint64_t)(__builtin_constant_p(counters->rx_err.packets ) ? (__uint64_t)((((__uint64_t)(counters->rx_err.packets) & 0xff) << 56) | ((__uint64_t)(counters->rx_err.packets ) & 0xff00ULL) << 40 | ((__uint64_t)(counters->rx_err .packets) & 0xff0000ULL) << 24 | ((__uint64_t)(counters ->rx_err.packets) & 0xff000000ULL) << 8 | ((__uint64_t )(counters->rx_err.packets) & 0xff00000000ULL) >> 8 | ((__uint64_t)(counters->rx_err.packets) & 0xff0000000000ULL ) >> 24 | ((__uint64_t)(counters->rx_err.packets) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(counters-> rx_err.packets) & 0xff00000000000000ULL) >> 56) : __swap64md (counters->rx_err.packets)), |
6293 | betoh64(counters->tx_ucast.packets)(__uint64_t)(__builtin_constant_p(counters->tx_ucast.packets ) ? (__uint64_t)((((__uint64_t)(counters->tx_ucast.packets ) & 0xff) << 56) | ((__uint64_t)(counters->tx_ucast .packets) & 0xff00ULL) << 40 | ((__uint64_t)(counters ->tx_ucast.packets) & 0xff0000ULL) << 24 | ((__uint64_t )(counters->tx_ucast.packets) & 0xff000000ULL) << 8 | ((__uint64_t)(counters->tx_ucast.packets) & 0xff00000000ULL ) >> 8 | ((__uint64_t)(counters->tx_ucast.packets) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(counters-> tx_ucast.packets) & 0xff000000000000ULL) >> 40 | (( __uint64_t)(counters->tx_ucast.packets) & 0xff00000000000000ULL ) >> 56) : __swap64md(counters->tx_ucast.packets)), |
6294 | betoh64(counters->rx_ucast.packets)(__uint64_t)(__builtin_constant_p(counters->rx_ucast.packets ) ? (__uint64_t)((((__uint64_t)(counters->rx_ucast.packets ) & 0xff) << 56) | ((__uint64_t)(counters->rx_ucast .packets) & 0xff00ULL) << 40 | ((__uint64_t)(counters ->rx_ucast.packets) & 0xff0000ULL) << 24 | ((__uint64_t )(counters->rx_ucast.packets) & 0xff000000ULL) << 8 | ((__uint64_t)(counters->rx_ucast.packets) & 0xff00000000ULL ) >> 8 | ((__uint64_t)(counters->rx_ucast.packets) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(counters-> rx_ucast.packets) & 0xff000000000000ULL) >> 40 | (( __uint64_t)(counters->rx_ucast.packets) & 0xff00000000000000ULL ) >> 56) : __swap64md(counters->rx_ucast.packets)), |
6295 | betoh64(counters->tx_bcast.packets)(__uint64_t)(__builtin_constant_p(counters->tx_bcast.packets ) ? (__uint64_t)((((__uint64_t)(counters->tx_bcast.packets ) & 0xff) << 56) | ((__uint64_t)(counters->tx_bcast .packets) & 0xff00ULL) << 40 | ((__uint64_t)(counters ->tx_bcast.packets) & 0xff0000ULL) << 24 | ((__uint64_t )(counters->tx_bcast.packets) & 0xff000000ULL) << 8 | ((__uint64_t)(counters->tx_bcast.packets) & 0xff00000000ULL ) >> 8 | ((__uint64_t)(counters->tx_bcast.packets) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(counters-> tx_bcast.packets) & 0xff000000000000ULL) >> 40 | (( __uint64_t)(counters->tx_bcast.packets) & 0xff00000000000000ULL ) >> 56) : __swap64md(counters->tx_bcast.packets)), |
6296 | betoh64(counters->rx_bcast.packets)(__uint64_t)(__builtin_constant_p(counters->rx_bcast.packets ) ? (__uint64_t)((((__uint64_t)(counters->rx_bcast.packets ) & 0xff) << 56) | ((__uint64_t)(counters->rx_bcast .packets) & 0xff00ULL) << 40 | ((__uint64_t)(counters ->rx_bcast.packets) & 0xff0000ULL) << 24 | ((__uint64_t )(counters->rx_bcast.packets) & 0xff000000ULL) << 8 | ((__uint64_t)(counters->rx_bcast.packets) & 0xff00000000ULL ) >> 8 | ((__uint64_t)(counters->rx_bcast.packets) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(counters-> rx_bcast.packets) & 0xff000000000000ULL) >> 40 | (( __uint64_t)(counters->rx_bcast.packets) & 0xff00000000000000ULL ) >> 56) : __swap64md(counters->rx_bcast.packets))); |
6297 | free: |
6298 | mcx_dmamem_free(sc, &mxm); |
6299 | |
6300 | return (error); |
6301 | } |
6302 | |
6303 | static int |
6304 | mcx_dump_flow_counter(struct mcx_softc *sc, int index, const char *what) |
6305 | { |
6306 | struct mcx_dmamem mxm; |
6307 | struct mcx_cmdq_entry *cqe; |
6308 | struct mcx_cmd_query_flow_counter_in *in; |
6309 | struct mcx_cmd_query_flow_counter_mb_in *mbin; |
6310 | struct mcx_cmd_query_flow_counter_out *out; |
6311 | struct mcx_counter *counters; |
6312 | int error, token; |
6313 | |
6314 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
6315 | token = mcx_cmdq_token(sc); |
6316 | mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out) + |
6317 | sizeof(*counters), token); |
6318 | |
6319 | in = mcx_cmdq_in(cqe); |
6320 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_COUNTER)(__uint16_t)(__builtin_constant_p(0x93b) ? (__uint16_t)(((__uint16_t )(0x93b) & 0xffU) << 8 | ((__uint16_t)(0x93b) & 0xff00U) >> 8) : __swap16md(0x93b)); |
6321 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
6322 | |
6323 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, |
6324 | &cqe->cq_output_ptr, token) != 0) { |
6325 | printf(", unable to allocate query flow counter mailboxen\n"); |
6326 | return (-1); |
6327 | } |
6328 | cqe->cq_input_ptr = cqe->cq_output_ptr; |
6329 | mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)); |
6330 | mbin->cmd_flow_counter_id = htobe16(sc->sc_flow_counter_id[index])(__uint16_t)(__builtin_constant_p(sc->sc_flow_counter_id[index ]) ? (__uint16_t)(((__uint16_t)(sc->sc_flow_counter_id[index ]) & 0xffU) << 8 | ((__uint16_t)(sc->sc_flow_counter_id [index]) & 0xff00U) >> 8) : __swap16md(sc->sc_flow_counter_id [index])); |
6331 | mbin->cmd_clear = 0x80; |
6332 | |
6333 | mcx_cmdq_mboxes_sign(&mxm, 1); |
6334 | mcx_cmdq_post(sc, cqe, 0); |
6335 | |
6336 | error = mcx_cmdq_poll(sc, cqe, 1000); |
6337 | if (error != 0) { |
6338 | printf("%s: query flow counter timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6339 | goto free; |
6340 | } |
6341 | if (mcx_cmdq_verify(cqe) != 0) { |
6342 | printf("%s: query flow counter command corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6343 | goto free; |
6344 | } |
6345 | |
6346 | out = mcx_cmdq_out(cqe); |
6347 | if (out->cmd_status != MCX_CQ_STATUS_OK(0x00 << 1)) { |
6348 | printf("%s: query flow counter failed (%x, %x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
6349 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
6350 | error = -1; |
6351 | goto free; |
6352 | } |
6353 | |
6354 | counters = (struct mcx_counter *) |
6355 | (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))); |
6356 | if (counters->packets) |
6357 | printf("%s: %s inflow %llx\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), what, |
6358 | betoh64(counters->packets)(__uint64_t)(__builtin_constant_p(counters->packets) ? (__uint64_t )((((__uint64_t)(counters->packets) & 0xff) << 56 ) | ((__uint64_t)(counters->packets) & 0xff00ULL) << 40 | ((__uint64_t)(counters->packets) & 0xff0000ULL) << 24 | ((__uint64_t)(counters->packets) & 0xff000000ULL ) << 8 | ((__uint64_t)(counters->packets) & 0xff00000000ULL ) >> 8 | ((__uint64_t)(counters->packets) & 0xff0000000000ULL ) >> 24 | ((__uint64_t)(counters->packets) & 0xff000000000000ULL ) >> 40 | ((__uint64_t)(counters->packets) & 0xff00000000000000ULL ) >> 56) : __swap64md(counters->packets))); |
6359 | free: |
6360 | mcx_dmamem_free(sc, &mxm); |
6361 | |
6362 | return (error); |
6363 | } |
6364 | |
6365 | #endif |
6366 | |
6367 | #if NKSTAT0 > 0 |
6368 | |
6369 | int |
6370 | mcx_query_rq(struct mcx_softc *sc, struct mcx_rx *rx, struct mcx_rq_ctx *rq_ctx) |
6371 | { |
6372 | struct mcx_dmamem mxm; |
6373 | struct mcx_cmdq_entry *cqe; |
6374 | struct mcx_cmd_query_rq_in *in; |
6375 | struct mcx_cmd_query_rq_out *out; |
6376 | struct mcx_cmd_query_rq_mb_out *mbout; |
6377 | uint8_t token = mcx_cmdq_token(sc); |
6378 | int error; |
6379 | |
6380 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
6381 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16, |
6382 | token); |
6383 | |
6384 | in = mcx_cmdq_in(cqe); |
6385 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_RQ)(__uint16_t)(__builtin_constant_p(0x90b) ? (__uint16_t)(((__uint16_t )(0x90b) & 0xffU) << 8 | ((__uint16_t)(0x90b) & 0xff00U) >> 8) : __swap16md(0x90b)); |
6386 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
6387 | in->cmd_rqn = htobe32(rx->rx_rqn)(__uint32_t)(__builtin_constant_p(rx->rx_rqn) ? (__uint32_t )(((__uint32_t)(rx->rx_rqn) & 0xff) << 24 | ((__uint32_t )(rx->rx_rqn) & 0xff00) << 8 | ((__uint32_t)(rx-> rx_rqn) & 0xff0000) >> 8 | ((__uint32_t)(rx->rx_rqn ) & 0xff000000) >> 24) : __swap32md(rx->rx_rqn)); |
6388 | |
6389 | CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2)extern char _ctassert[(sizeof(*mbout) <= 512*2) ? 1 : -1 ] __attribute__((__unused__)); |
6390 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, |
6391 | &cqe->cq_output_ptr, token) != 0) { |
6392 | printf("%s: unable to allocate query rq mailboxes\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6393 | return (-1); |
6394 | } |
6395 | |
6396 | mcx_cmdq_mboxes_sign(&mxm, 1); |
6397 | |
6398 | mcx_cmdq_post(sc, cqe, 0); |
6399 | error = mcx_cmdq_poll(sc, cqe, 1000); |
6400 | if (error != 0) { |
6401 | printf("%s: query rq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6402 | goto free; |
6403 | } |
6404 | error = mcx_cmdq_verify(cqe); |
6405 | if (error != 0) { |
6406 | printf("%s: query rq reply corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6407 | goto free; |
6408 | } |
6409 | |
6410 | out = mcx_cmdq_out(cqe); |
6411 | switch (out->cmd_status) { |
6412 | case MCX_CQ_STATUS_OK(0x00 << 1): |
6413 | break; |
6414 | default: |
6415 | printf("%s: query rq failed (%x/%x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
6416 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
6417 | error = -1; |
6418 | goto free; |
6419 | } |
6420 | |
6421 | mbout = (struct mcx_cmd_query_rq_mb_out *) |
6422 | (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))); |
6423 | memcpy(rq_ctx, &mbout->cmd_ctx, sizeof(*rq_ctx))__builtin_memcpy((rq_ctx), (&mbout->cmd_ctx), (sizeof( *rq_ctx))); |
6424 | |
6425 | free: |
6426 | mcx_cq_mboxes_free(sc, &mxm); |
6427 | return (error); |
6428 | } |
6429 | |
6430 | int |
6431 | mcx_query_sq(struct mcx_softc *sc, struct mcx_tx *tx, struct mcx_sq_ctx *sq_ctx) |
6432 | { |
6433 | struct mcx_dmamem mxm; |
6434 | struct mcx_cmdq_entry *cqe; |
6435 | struct mcx_cmd_query_sq_in *in; |
6436 | struct mcx_cmd_query_sq_out *out; |
6437 | struct mcx_cmd_query_sq_mb_out *mbout; |
6438 | uint8_t token = mcx_cmdq_token(sc); |
6439 | int error; |
6440 | |
6441 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
6442 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16, |
6443 | token); |
6444 | |
6445 | in = mcx_cmdq_in(cqe); |
6446 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_SQ)(__uint16_t)(__builtin_constant_p(0x907) ? (__uint16_t)(((__uint16_t )(0x907) & 0xffU) << 8 | ((__uint16_t)(0x907) & 0xff00U) >> 8) : __swap16md(0x907)); |
6447 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
6448 | in->cmd_sqn = htobe32(tx->tx_sqn)(__uint32_t)(__builtin_constant_p(tx->tx_sqn) ? (__uint32_t )(((__uint32_t)(tx->tx_sqn) & 0xff) << 24 | ((__uint32_t )(tx->tx_sqn) & 0xff00) << 8 | ((__uint32_t)(tx-> tx_sqn) & 0xff0000) >> 8 | ((__uint32_t)(tx->tx_sqn ) & 0xff000000) >> 24) : __swap32md(tx->tx_sqn)); |
6449 | |
6450 | CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2)extern char _ctassert[(sizeof(*mbout) <= 512*2) ? 1 : -1 ] __attribute__((__unused__)); |
6451 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, |
6452 | &cqe->cq_output_ptr, token) != 0) { |
6453 | printf("%s: unable to allocate query sq mailboxes\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6454 | return (-1); |
6455 | } |
6456 | |
6457 | mcx_cmdq_mboxes_sign(&mxm, 1); |
6458 | |
6459 | mcx_cmdq_post(sc, cqe, 0); |
6460 | error = mcx_cmdq_poll(sc, cqe, 1000); |
6461 | if (error != 0) { |
6462 | printf("%s: query sq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6463 | goto free; |
6464 | } |
6465 | error = mcx_cmdq_verify(cqe); |
6466 | if (error != 0) { |
6467 | printf("%s: query sq reply corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6468 | goto free; |
6469 | } |
6470 | |
6471 | out = mcx_cmdq_out(cqe); |
6472 | switch (out->cmd_status) { |
6473 | case MCX_CQ_STATUS_OK(0x00 << 1): |
6474 | break; |
6475 | default: |
6476 | printf("%s: query sq failed (%x/%x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
6477 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
6478 | error = -1; |
6479 | goto free; |
6480 | } |
6481 | |
6482 | mbout = (struct mcx_cmd_query_sq_mb_out *) |
6483 | (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))); |
6484 | memcpy(sq_ctx, &mbout->cmd_ctx, sizeof(*sq_ctx))__builtin_memcpy((sq_ctx), (&mbout->cmd_ctx), (sizeof( *sq_ctx))); |
6485 | |
6486 | free: |
6487 | mcx_cq_mboxes_free(sc, &mxm); |
6488 | return (error); |
6489 | } |
6490 | |
6491 | int |
6492 | mcx_query_cq(struct mcx_softc *sc, struct mcx_cq *cq, struct mcx_cq_ctx *cq_ctx) |
6493 | { |
6494 | struct mcx_dmamem mxm; |
6495 | struct mcx_cmdq_entry *cqe; |
6496 | struct mcx_cmd_query_cq_in *in; |
6497 | struct mcx_cmd_query_cq_out *out; |
6498 | struct mcx_cq_ctx *ctx; |
6499 | uint8_t token = mcx_cmdq_token(sc); |
6500 | int error; |
6501 | |
6502 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
6503 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx) + 16, |
6504 | token); |
6505 | |
6506 | in = mcx_cmdq_in(cqe); |
6507 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_CQ)(__uint16_t)(__builtin_constant_p(0x402) ? (__uint16_t)(((__uint16_t )(0x402) & 0xffU) << 8 | ((__uint16_t)(0x402) & 0xff00U) >> 8) : __swap16md(0x402)); |
6508 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
6509 | in->cmd_cqn = htobe32(cq->cq_n)(__uint32_t)(__builtin_constant_p(cq->cq_n) ? (__uint32_t) (((__uint32_t)(cq->cq_n) & 0xff) << 24 | ((__uint32_t )(cq->cq_n) & 0xff00) << 8 | ((__uint32_t)(cq-> cq_n) & 0xff0000) >> 8 | ((__uint32_t)(cq->cq_n) & 0xff000000) >> 24) : __swap32md(cq->cq_n)); |
6510 | |
6511 | CTASSERT(sizeof(*ctx) <= MCX_CMDQ_MAILBOX_DATASIZE*2)extern char _ctassert[(sizeof(*ctx) <= 512*2) ? 1 : -1 ] __attribute__ ((__unused__)); |
6512 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, |
6513 | &cqe->cq_output_ptr, token) != 0) { |
6514 | printf("%s: unable to allocate query cq mailboxes\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6515 | return (-1); |
6516 | } |
6517 | |
6518 | mcx_cmdq_mboxes_sign(&mxm, 1); |
6519 | |
6520 | mcx_cmdq_post(sc, cqe, 0); |
6521 | error = mcx_cmdq_poll(sc, cqe, 1000); |
6522 | if (error != 0) { |
6523 | printf("%s: query cq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6524 | goto free; |
6525 | } |
6526 | error = mcx_cmdq_verify(cqe); |
6527 | if (error != 0) { |
6528 | printf("%s: query cq reply corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6529 | goto free; |
6530 | } |
6531 | |
6532 | out = mcx_cmdq_out(cqe); |
6533 | switch (out->cmd_status) { |
6534 | case MCX_CQ_STATUS_OK(0x00 << 1): |
6535 | break; |
6536 | default: |
6537 | printf("%s: query cq failed (%x/%x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
6538 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
6539 | error = -1; |
6540 | goto free; |
6541 | } |
6542 | |
6543 | ctx = (struct mcx_cq_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))); |
6544 | memcpy(cq_ctx, ctx, sizeof(*cq_ctx))__builtin_memcpy((cq_ctx), (ctx), (sizeof(*cq_ctx))); |
6545 | free: |
6546 | mcx_cq_mboxes_free(sc, &mxm); |
6547 | return (error); |
6548 | } |
6549 | |
6550 | int |
6551 | mcx_query_eq(struct mcx_softc *sc, struct mcx_eq *eq, struct mcx_eq_ctx *eq_ctx) |
6552 | { |
6553 | struct mcx_dmamem mxm; |
6554 | struct mcx_cmdq_entry *cqe; |
6555 | struct mcx_cmd_query_eq_in *in; |
6556 | struct mcx_cmd_query_eq_out *out; |
6557 | struct mcx_eq_ctx *ctx; |
6558 | uint8_t token = mcx_cmdq_token(sc); |
6559 | int error; |
6560 | |
6561 | cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem)((void *)(&sc->sc_cmdq_mem)->mxm_kva); |
6562 | mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx) + 16, |
6563 | token); |
6564 | |
6565 | in = mcx_cmdq_in(cqe); |
6566 | in->cmd_opcode = htobe16(MCX_CMD_QUERY_EQ)(__uint16_t)(__builtin_constant_p(0x303) ? (__uint16_t)(((__uint16_t )(0x303) & 0xffU) << 8 | ((__uint16_t)(0x303) & 0xff00U) >> 8) : __swap16md(0x303)); |
6567 | in->cmd_op_mod = htobe16(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); |
6568 | in->cmd_eqn = htobe32(eq->eq_n)(__uint32_t)(__builtin_constant_p(eq->eq_n) ? (__uint32_t) (((__uint32_t)(eq->eq_n) & 0xff) << 24 | ((__uint32_t )(eq->eq_n) & 0xff00) << 8 | ((__uint32_t)(eq-> eq_n) & 0xff0000) >> 8 | ((__uint32_t)(eq->eq_n) & 0xff000000) >> 24) : __swap32md(eq->eq_n)); |
6569 | |
6570 | CTASSERT(sizeof(*ctx) <= MCX_CMDQ_MAILBOX_DATASIZE*2)extern char _ctassert[(sizeof(*ctx) <= 512*2) ? 1 : -1 ] __attribute__ ((__unused__)); |
6571 | if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, |
6572 | &cqe->cq_output_ptr, token) != 0) { |
6573 | printf("%s: unable to allocate query eq mailboxes\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6574 | return (-1); |
6575 | } |
6576 | |
6577 | mcx_cmdq_mboxes_sign(&mxm, 1); |
6578 | |
6579 | mcx_cmdq_post(sc, cqe, 0); |
6580 | error = mcx_cmdq_poll(sc, cqe, 1000); |
6581 | if (error != 0) { |
6582 | printf("%s: query eq timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6583 | goto free; |
6584 | } |
6585 | error = mcx_cmdq_verify(cqe); |
6586 | if (error != 0) { |
6587 | printf("%s: query eq reply corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
6588 | goto free; |
6589 | } |
6590 | |
6591 | out = mcx_cmdq_out(cqe); |
6592 | switch (out->cmd_status) { |
6593 | case MCX_CQ_STATUS_OK(0x00 << 1): |
6594 | break; |
6595 | default: |
6596 | printf("%s: query eq failed (%x/%x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
6597 | out->cmd_status, betoh32(out->cmd_syndrome)(__uint32_t)(__builtin_constant_p(out->cmd_syndrome) ? (__uint32_t )(((__uint32_t)(out->cmd_syndrome) & 0xff) << 24 | ((__uint32_t)(out->cmd_syndrome) & 0xff00) << 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff0000) >> 8 | ((__uint32_t)(out->cmd_syndrome) & 0xff000000) >> 24) : __swap32md(out->cmd_syndrome))); |
6598 | error = -1; |
6599 | goto free; |
6600 | } |
6601 | |
6602 | ctx = (struct mcx_eq_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))); |
6603 | memcpy(eq_ctx, ctx, sizeof(*eq_ctx))__builtin_memcpy((eq_ctx), (ctx), (sizeof(*eq_ctx))); |
6604 | free: |
6605 | mcx_cq_mboxes_free(sc, &mxm); |
6606 | return (error); |
6607 | } |
6608 | |
6609 | #endif /* NKSTAT > 0 */ |
6610 | |
6611 | static inline unsigned int |
6612 | mcx_rx_fill_slots(struct mcx_softc *sc, struct mcx_rx *rx, uint nslots) |
6613 | { |
6614 | struct mcx_rq_entry *ring, *rqe; |
6615 | struct mcx_slot *ms; |
6616 | struct mbuf *m; |
6617 | uint slot, p, fills; |
6618 | |
6619 | ring = MCX_DMA_KVA(&rx->rx_rq_mem)((void *)(&rx->rx_rq_mem)->mxm_kva); |
6620 | p = rx->rx_prod; |
6621 | |
6622 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rx->rx_rq_mem)->mxm_map)), (0), (((&rx->rx_rq_mem )->mxm_size)), (0x08)) |
6623 | 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rx->rx_rq_mem)->mxm_map)), (0), (((&rx->rx_rq_mem )->mxm_size)), (0x08)); |
6624 | |
6625 | for (fills = 0; fills < nslots; fills++) { |
6626 | slot = p % (1 << MCX_LOG_RQ_SIZE10); |
6627 | |
6628 | ms = &rx->rx_slots[slot]; |
6629 | rqe = &ring[slot]; |
6630 | |
6631 | m = MCLGETL(NULL, M_DONTWAIT, sc->sc_rxbufsz)m_clget((((void *)0)), (0x0002), (sc->sc_rxbufsz)); |
6632 | if (m == NULL((void *)0)) |
6633 | break; |
6634 | |
6635 | m->m_datam_hdr.mh_data += (m->m_extM_dat.MH.MH_dat.MH_ext.ext_size - sc->sc_rxbufsz); |
6636 | m->m_datam_hdr.mh_data += ETHER_ALIGN2; |
6637 | m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = sc->sc_hardmtu; |
6638 | |
6639 | if (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( ms->ms_map), (m), (0x0001)) |
6640 | BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( ms->ms_map), (m), (0x0001)) != 0) { |
6641 | m_freem(m); |
6642 | break; |
6643 | } |
6644 | ms->ms_m = m; |
6645 | |
6646 | htobem32(&rqe->rqe_byte_count, ms->ms_map->dm_segs[0].ds_len)(*(__uint32_t *)(&rqe->rqe_byte_count) = (__uint32_t)( __builtin_constant_p(ms->ms_map->dm_segs[0].ds_len) ? ( __uint32_t)(((__uint32_t)(ms->ms_map->dm_segs[0].ds_len ) & 0xff) << 24 | ((__uint32_t)(ms->ms_map->dm_segs [0].ds_len) & 0xff00) << 8 | ((__uint32_t)(ms->ms_map ->dm_segs[0].ds_len) & 0xff0000) >> 8 | ((__uint32_t )(ms->ms_map->dm_segs[0].ds_len) & 0xff000000) >> 24) : __swap32md(ms->ms_map->dm_segs[0].ds_len))); |
6647 | htobem64(&rqe->rqe_addr, ms->ms_map->dm_segs[0].ds_addr)(*(__uint64_t *)(&rqe->rqe_addr) = (__uint64_t)(__builtin_constant_p (ms->ms_map->dm_segs[0].ds_addr) ? (__uint64_t)((((__uint64_t )(ms->ms_map->dm_segs[0].ds_addr) & 0xff) << 56 ) | ((__uint64_t)(ms->ms_map->dm_segs[0].ds_addr) & 0xff00ULL) << 40 | ((__uint64_t)(ms->ms_map->dm_segs [0].ds_addr) & 0xff0000ULL) << 24 | ((__uint64_t)(ms ->ms_map->dm_segs[0].ds_addr) & 0xff000000ULL) << 8 | ((__uint64_t)(ms->ms_map->dm_segs[0].ds_addr) & 0xff00000000ULL) >> 8 | ((__uint64_t)(ms->ms_map-> dm_segs[0].ds_addr) & 0xff0000000000ULL) >> 24 | (( __uint64_t)(ms->ms_map->dm_segs[0].ds_addr) & 0xff000000000000ULL ) >> 40 | ((__uint64_t)(ms->ms_map->dm_segs[0].ds_addr ) & 0xff00000000000000ULL) >> 56) : __swap64md(ms-> ms_map->dm_segs[0].ds_addr))); |
6648 | htobem32(&rqe->rqe_lkey, sc->sc_lkey)(*(__uint32_t *)(&rqe->rqe_lkey) = (__uint32_t)(__builtin_constant_p (sc->sc_lkey) ? (__uint32_t)(((__uint32_t)(sc->sc_lkey) & 0xff) << 24 | ((__uint32_t)(sc->sc_lkey) & 0xff00) << 8 | ((__uint32_t)(sc->sc_lkey) & 0xff0000 ) >> 8 | ((__uint32_t)(sc->sc_lkey) & 0xff000000 ) >> 24) : __swap32md(sc->sc_lkey))); |
6649 | |
6650 | p++; |
6651 | } |
6652 | |
6653 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rx->rx_rq_mem)->mxm_map)), (0), (((&rx->rx_rq_mem )->mxm_size)), (0x04)) |
6654 | 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& rx->rx_rq_mem)->mxm_map)), (0), (((&rx->rx_rq_mem )->mxm_size)), (0x04)); |
6655 | |
6656 | rx->rx_prod = p; |
6657 | |
6658 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (rx->rx_doorbell), ( sizeof(uint32_t)), (0x08)) |
6659 | rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (rx->rx_doorbell), ( sizeof(uint32_t)), (0x08)); |
6660 | htobem32(MCX_DMA_OFF(&sc->sc_doorbell_mem, rx->rx_doorbell),(*(__uint32_t *)(((void *)((&sc->sc_doorbell_mem)-> mxm_kva + (rx->rx_doorbell)))) = (__uint32_t)(__builtin_constant_p (p & 0xffff) ? (__uint32_t)(((__uint32_t)(p & 0xffff) & 0xff) << 24 | ((__uint32_t)(p & 0xffff) & 0xff00) << 8 | ((__uint32_t)(p & 0xffff) & 0xff0000 ) >> 8 | ((__uint32_t)(p & 0xffff) & 0xff000000 ) >> 24) : __swap32md(p & 0xffff))) |
6661 | p & MCX_WQ_DOORBELL_MASK)(*(__uint32_t *)(((void *)((&sc->sc_doorbell_mem)-> mxm_kva + (rx->rx_doorbell)))) = (__uint32_t)(__builtin_constant_p (p & 0xffff) ? (__uint32_t)(((__uint32_t)(p & 0xffff) & 0xff) << 24 | ((__uint32_t)(p & 0xffff) & 0xff00) << 8 | ((__uint32_t)(p & 0xffff) & 0xff0000 ) >> 8 | ((__uint32_t)(p & 0xffff) & 0xff000000 ) >> 24) : __swap32md(p & 0xffff))); |
6662 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (rx->rx_doorbell), ( sizeof(uint32_t)), (0x04)) |
6663 | rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (rx->rx_doorbell), ( sizeof(uint32_t)), (0x04)); |
6664 | |
6665 | return (nslots - fills); |
6666 | } |
6667 | |
6668 | int |
6669 | mcx_rx_fill(struct mcx_softc *sc, struct mcx_rx *rx) |
6670 | { |
6671 | u_int slots; |
6672 | |
6673 | slots = if_rxr_get(&rx->rx_rxr, (1 << MCX_LOG_RQ_SIZE10)); |
6674 | if (slots == 0) |
6675 | return (1); |
6676 | |
6677 | slots = mcx_rx_fill_slots(sc, rx, slots); |
6678 | if_rxr_put(&rx->rx_rxr, slots)do { (&rx->rx_rxr)->rxr_alive -= (slots); } while ( 0); |
6679 | return (0); |
6680 | } |
6681 | |
6682 | void |
6683 | mcx_refill(void *xrx) |
6684 | { |
6685 | struct mcx_rx *rx = xrx; |
6686 | struct mcx_softc *sc = rx->rx_softc; |
6687 | |
6688 | mcx_rx_fill(sc, rx); |
6689 | |
6690 | if (if_rxr_inuse(&rx->rx_rxr)((&rx->rx_rxr)->rxr_alive) == 0) |
6691 | timeout_add(&rx->rx_refill, 1); |
6692 | } |
6693 | |
6694 | static int |
6695 | mcx_process_txeof(struct mcx_softc *sc, struct mcx_tx *tx, |
6696 | struct mcx_cq_entry *cqe) |
6697 | { |
6698 | struct mcx_slot *ms; |
6699 | bus_dmamap_t map; |
6700 | int slot, slots; |
6701 | |
6702 | slot = betoh16(cqe->cq_wqe_count)(__uint16_t)(__builtin_constant_p(cqe->cq_wqe_count) ? (__uint16_t )(((__uint16_t)(cqe->cq_wqe_count) & 0xffU) << 8 | ((__uint16_t)(cqe->cq_wqe_count) & 0xff00U) >> 8) : __swap16md(cqe->cq_wqe_count)) % (1 << MCX_LOG_SQ_SIZE11); |
6703 | |
6704 | ms = &tx->tx_slots[slot]; |
6705 | map = ms->ms_map; |
6706 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x08)) |
6707 | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x08)); |
6708 | |
6709 | slots = 1; |
6710 | if (map->dm_nsegs > 1) |
6711 | slots += (map->dm_nsegs+2) / MCX_SQ_SEGS_PER_SLOT(sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg )); |
6712 | |
6713 | bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map )); |
6714 | m_freem(ms->ms_m); |
6715 | ms->ms_m = NULL((void *)0); |
6716 | |
6717 | return (slots); |
6718 | } |
6719 | |
6720 | static uint64_t |
6721 | mcx_uptime(void) |
6722 | { |
6723 | struct timespec ts; |
6724 | |
6725 | nanouptime(&ts); |
6726 | |
6727 | return ((uint64_t)ts.tv_sec * 1000000000 + (uint64_t)ts.tv_nsec); |
6728 | } |
6729 | |
6730 | static void |
6731 | mcx_calibrate_first(struct mcx_softc *sc) |
6732 | { |
6733 | struct mcx_calibration *c = &sc->sc_calibration[0]; |
6734 | int s; |
6735 | |
6736 | sc->sc_calibration_gen = 0; |
6737 | |
6738 | s = splhigh()splraise(0xd); /* crit_enter? */ |
6739 | c->c_ubase = mcx_uptime(); |
6740 | c->c_tbase = mcx_timer(sc); |
6741 | splx(s)spllower(s); |
6742 | c->c_ratio = 0; |
6743 | |
6744 | #ifdef notyet |
6745 | timeout_add_sec(&sc->sc_calibrate, MCX_CALIBRATE_FIRST2); |
6746 | #endif |
6747 | } |
6748 | |
6749 | #define MCX_TIMESTAMP_SHIFT24 24 |
6750 | |
6751 | static void |
6752 | mcx_calibrate(void *arg) |
6753 | { |
6754 | struct mcx_softc *sc = arg; |
6755 | struct mcx_calibration *nc, *pc; |
6756 | uint64_t udiff, tdiff; |
6757 | unsigned int gen; |
6758 | int s; |
6759 | |
6760 | if (!ISSET(sc->sc_ac.ac_if.if_flags, IFF_RUNNING)((sc->sc_ac.ac_if.if_flags) & (0x40))) |
6761 | return; |
6762 | |
6763 | timeout_add_sec(&sc->sc_calibrate, MCX_CALIBRATE_NORMAL32); |
6764 | |
6765 | gen = sc->sc_calibration_gen; |
6766 | pc = &sc->sc_calibration[gen % nitems(sc->sc_calibration)(sizeof((sc->sc_calibration)) / sizeof((sc->sc_calibration )[0]))]; |
6767 | gen++; |
6768 | nc = &sc->sc_calibration[gen % nitems(sc->sc_calibration)(sizeof((sc->sc_calibration)) / sizeof((sc->sc_calibration )[0]))]; |
6769 | |
6770 | nc->c_uptime = pc->c_ubase; |
6771 | nc->c_timestamp = pc->c_tbase; |
6772 | |
6773 | s = splhigh()splraise(0xd); /* crit_enter? */ |
6774 | nc->c_ubase = mcx_uptime(); |
6775 | nc->c_tbase = mcx_timer(sc); |
6776 | splx(s)spllower(s); |
6777 | |
6778 | udiff = nc->c_ubase - nc->c_uptime; |
6779 | tdiff = nc->c_tbase - nc->c_timestamp; |
6780 | |
6781 | /* |
6782 | * udiff is the wall clock time between calibration ticks, |
6783 | * which should be 32 seconds or 32 billion nanoseconds. if |
6784 | * we squint, 1 billion nanoseconds is kind of like a 32 bit |
6785 | * number, so 32 billion should still have a lot of high bits |
6786 | * spare. we use this space by shifting the nanoseconds up |
6787 | * 24 bits so we have a nice big number to divide by the |
6788 | * number of mcx timer ticks. |
6789 | */ |
6790 | nc->c_ratio = (udiff << MCX_TIMESTAMP_SHIFT24) / tdiff; |
6791 | |
6792 | membar_producer()do { __asm volatile("" ::: "memory"); } while (0); |
6793 | sc->sc_calibration_gen = gen; |
6794 | } |
6795 | |
6796 | static int |
6797 | mcx_process_rx(struct mcx_softc *sc, struct mcx_rx *rx, |
6798 | struct mcx_cq_entry *cqe, struct mbuf_list *ml, |
6799 | const struct mcx_calibration *c) |
6800 | { |
6801 | struct mcx_slot *ms; |
6802 | struct mbuf *m; |
6803 | uint32_t flags, len; |
6804 | int slot; |
6805 | |
6806 | len = bemtoh32(&cqe->cq_byte_cnt)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&cqe-> cq_byte_cnt)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(& cqe->cq_byte_cnt)) & 0xff) << 24 | ((__uint32_t) (*(__uint32_t *)(&cqe->cq_byte_cnt)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(&cqe->cq_byte_cnt)) & 0xff0000) >> 8 | ((__uint32_t)(*(__uint32_t *)(& cqe->cq_byte_cnt)) & 0xff000000) >> 24) : __swap32md (*(__uint32_t *)(&cqe->cq_byte_cnt))); |
6807 | slot = betoh16(cqe->cq_wqe_count)(__uint16_t)(__builtin_constant_p(cqe->cq_wqe_count) ? (__uint16_t )(((__uint16_t)(cqe->cq_wqe_count) & 0xffU) << 8 | ((__uint16_t)(cqe->cq_wqe_count) & 0xff00U) >> 8) : __swap16md(cqe->cq_wqe_count)) % (1 << MCX_LOG_RQ_SIZE10); |
6808 | |
6809 | ms = &rx->rx_slots[slot]; |
6810 | bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, len, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ms-> ms_map), (0), (len), (0x02)); |
6811 | bus_dmamap_unload(sc->sc_dmat, ms->ms_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (ms-> ms_map)); |
6812 | |
6813 | m = ms->ms_m; |
6814 | ms->ms_m = NULL((void *)0); |
6815 | |
6816 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len; |
6817 | |
6818 | if (cqe->cq_rx_hash_type) { |
6819 | m->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid = betoh32(cqe->cq_rx_hash)(__uint32_t)(__builtin_constant_p(cqe->cq_rx_hash) ? (__uint32_t )(((__uint32_t)(cqe->cq_rx_hash) & 0xff) << 24 | ((__uint32_t)(cqe->cq_rx_hash) & 0xff00) << 8 | ((__uint32_t)(cqe->cq_rx_hash) & 0xff0000) >> 8 | ((__uint32_t)(cqe->cq_rx_hash) & 0xff000000) >> 24) : __swap32md(cqe->cq_rx_hash)); |
6820 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_FLOWID0x4000; |
6821 | } |
6822 | |
6823 | flags = bemtoh32(&cqe->cq_flags)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&cqe-> cq_flags)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(& cqe->cq_flags)) & 0xff) << 24 | ((__uint32_t)(*( __uint32_t *)(&cqe->cq_flags)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(&cqe->cq_flags)) & 0xff0000) >> 8 | ((__uint32_t)(*(__uint32_t *)(&cqe ->cq_flags)) & 0xff000000) >> 24) : __swap32md(* (__uint32_t *)(&cqe->cq_flags))); |
6824 | if (flags & MCX_CQ_ENTRY_FLAGS_L3_OK(1 << 25)) |
6825 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK0x0008; |
6826 | if (flags & MCX_CQ_ENTRY_FLAGS_L4_OK(1 << 26)) |
6827 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK0x0020 | |
6828 | M_UDP_CSUM_IN_OK0x0080; |
6829 | #if NVLAN1 > 0 |
6830 | if (flags & MCX_CQ_ENTRY_FLAGS_CV(1 << 16)) { |
6831 | m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag = (flags & |
6832 | MCX_CQ_ENTRY_FLAGS_VLAN_MASK(0xffff)); |
6833 | m->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020; |
6834 | } |
6835 | #endif |
6836 | |
6837 | #ifdef notyet |
6838 | if (ISSET(sc->sc_ac.ac_if.if_flags, IFF_LINK0)((sc->sc_ac.ac_if.if_flags) & (0x1000)) && c->c_ratio) { |
6839 | uint64_t t = bemtoh64(&cqe->cq_timestamp)(__uint64_t)(__builtin_constant_p(*(__uint64_t *)(&cqe-> cq_timestamp)) ? (__uint64_t)((((__uint64_t)(*(__uint64_t *)( &cqe->cq_timestamp)) & 0xff) << 56) | ((__uint64_t )(*(__uint64_t *)(&cqe->cq_timestamp)) & 0xff00ULL ) << 40 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_timestamp )) & 0xff0000ULL) << 24 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_timestamp)) & 0xff000000ULL) << 8 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_timestamp) ) & 0xff00000000ULL) >> 8 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_timestamp)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_timestamp )) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(*(__uint64_t *)(&cqe->cq_timestamp)) & 0xff00000000000000ULL) >> 56) : __swap64md(*(__uint64_t *)(&cqe->cq_timestamp)) ); |
6840 | t -= c->c_timestamp; |
6841 | t *= c->c_ratio; |
6842 | t >>= MCX_TIMESTAMP_SHIFT24; |
6843 | t += c->c_uptime; |
6844 | |
6845 | m->m_pkthdrM_dat.MH.MH_pkthdr.ph_timestamp = t; |
6846 | SET(m->m_pkthdr.csum_flags, M_TIMESTAMP)((m->M_dat.MH.MH_pkthdr.csum_flags) |= (0x2000)); |
6847 | } |
6848 | #endif |
6849 | |
6850 | ml_enqueue(ml, m); |
6851 | |
6852 | return (1); |
6853 | } |
6854 | |
6855 | static struct mcx_cq_entry * |
6856 | mcx_next_cq_entry(struct mcx_softc *sc, struct mcx_cq *cq) |
6857 | { |
6858 | struct mcx_cq_entry *cqe; |
6859 | int next; |
6860 | |
6861 | cqe = (struct mcx_cq_entry *)MCX_DMA_KVA(&cq->cq_mem)((void *)(&cq->cq_mem)->mxm_kva); |
6862 | next = cq->cq_cons % (1 << MCX_LOG_CQ_SIZE12); |
6863 | |
6864 | if ((cqe[next].cq_opcode_owner & MCX_CQ_ENTRY_FLAG_OWNER(1 << 0)) == |
6865 | ((cq->cq_cons >> MCX_LOG_CQ_SIZE12) & 1)) { |
6866 | return (&cqe[next]); |
6867 | } |
6868 | |
6869 | return (NULL((void *)0)); |
6870 | } |
6871 | |
6872 | static void |
6873 | mcx_arm_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar) |
6874 | { |
6875 | struct mcx_cq_doorbell *db; |
6876 | bus_size_t offset; |
6877 | uint32_t val; |
6878 | uint64_t uval; |
6879 | |
6880 | val = ((cq->cq_count) & 3) << MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT28; |
6881 | val |= (cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK(0xffffff)); |
6882 | |
6883 | db = MCX_DMA_OFF(&sc->sc_doorbell_mem, cq->cq_doorbell)((void *)((&sc->sc_doorbell_mem)->mxm_kva + (cq-> cq_doorbell))); |
6884 | |
6885 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (cq->cq_doorbell), ( sizeof(*db)), (0x08)) |
6886 | cq->cq_doorbell, sizeof(*db), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (cq->cq_doorbell), ( sizeof(*db)), (0x08)); |
6887 | |
6888 | htobem32(&db->db_update_ci, cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK)(*(__uint32_t *)(&db->db_update_ci) = (__uint32_t)(__builtin_constant_p (cq->cq_cons & (0xffffff)) ? (__uint32_t)(((__uint32_t )(cq->cq_cons & (0xffffff)) & 0xff) << 24 | ( (__uint32_t)(cq->cq_cons & (0xffffff)) & 0xff00) << 8 | ((__uint32_t)(cq->cq_cons & (0xffffff)) & 0xff0000 ) >> 8 | ((__uint32_t)(cq->cq_cons & (0xffffff)) & 0xff000000) >> 24) : __swap32md(cq->cq_cons & (0xffffff)))); |
6889 | htobem32(&db->db_arm_ci, val)(*(__uint32_t *)(&db->db_arm_ci) = (__uint32_t)(__builtin_constant_p (val) ? (__uint32_t)(((__uint32_t)(val) & 0xff) << 24 | ((__uint32_t)(val) & 0xff00) << 8 | ((__uint32_t )(val) & 0xff0000) >> 8 | ((__uint32_t)(val) & 0xff000000 ) >> 24) : __swap32md(val))); |
6890 | |
6891 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (cq->cq_doorbell), ( sizeof(*db)), (0x04)) |
6892 | cq->cq_doorbell, sizeof(*db), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (cq->cq_doorbell), ( sizeof(*db)), (0x04)); |
6893 | |
6894 | offset = (MCX_PAGE_SIZE(1 << 12) * uar) + MCX_UAR_CQ_DOORBELL0x20; |
6895 | |
6896 | uval = (uint64_t)val << 32; |
6897 | uval |= cq->cq_n; |
6898 | |
6899 | bus_space_write_raw_8(sc->sc_memt, sc->sc_memh, offset, htobe64(uval))((sc->sc_memt)->write_8((sc->sc_memh), (offset), ((__uint64_t )(__builtin_constant_p(uval) ? (__uint64_t)((((__uint64_t)(uval ) & 0xff) << 56) | ((__uint64_t)(uval) & 0xff00ULL ) << 40 | ((__uint64_t)(uval) & 0xff0000ULL) << 24 | ((__uint64_t)(uval) & 0xff000000ULL) << 8 | ( (__uint64_t)(uval) & 0xff00000000ULL) >> 8 | ((__uint64_t )(uval) & 0xff0000000000ULL) >> 24 | ((__uint64_t)( uval) & 0xff000000000000ULL) >> 40 | ((__uint64_t)( uval) & 0xff00000000000000ULL) >> 56) : __swap64md( uval))))); |
6900 | mcx_bar(sc, offset, sizeof(uval), BUS_SPACE_BARRIER_WRITE0x02); |
6901 | } |
6902 | |
6903 | void |
6904 | mcx_process_cq(struct mcx_softc *sc, struct mcx_queues *q, struct mcx_cq *cq) |
6905 | { |
6906 | struct mcx_rx *rx = &q->q_rx; |
6907 | struct mcx_tx *tx = &q->q_tx; |
6908 | const struct mcx_calibration *c; |
6909 | unsigned int gen; |
6910 | struct mcx_cq_entry *cqe; |
6911 | uint8_t *cqp; |
6912 | struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 }; |
6913 | int rxfree, txfree; |
6914 | |
6915 | gen = sc->sc_calibration_gen; |
6916 | membar_consumer()do { __asm volatile("" ::: "memory"); } while (0); |
6917 | c = &sc->sc_calibration[gen % nitems(sc->sc_calibration)(sizeof((sc->sc_calibration)) / sizeof((sc->sc_calibration )[0]))]; |
6918 | |
6919 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& cq->cq_mem)->mxm_map)), (0), (((&cq->cq_mem)-> mxm_size)), (0x02)) |
6920 | 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& cq->cq_mem)->mxm_map)), (0), (((&cq->cq_mem)-> mxm_size)), (0x02)); |
6921 | |
6922 | rxfree = 0; |
6923 | txfree = 0; |
6924 | while ((cqe = mcx_next_cq_entry(sc, cq))) { |
6925 | uint8_t opcode; |
6926 | opcode = (cqe->cq_opcode_owner >> MCX_CQ_ENTRY_OPCODE_SHIFT4); |
6927 | switch (opcode) { |
6928 | case MCX_CQ_ENTRY_OPCODE_REQ0: |
6929 | txfree += mcx_process_txeof(sc, tx, cqe); |
6930 | break; |
6931 | case MCX_CQ_ENTRY_OPCODE_SEND2: |
6932 | rxfree += mcx_process_rx(sc, rx, cqe, &ml, c); |
6933 | break; |
6934 | case MCX_CQ_ENTRY_OPCODE_REQ_ERR13: |
6935 | case MCX_CQ_ENTRY_OPCODE_SEND_ERR14: |
6936 | cqp = (uint8_t *)cqe; |
Value stored to 'cqp' is never read | |
6937 | /* printf("%s: cq completion error: %x\n", |
6938 | DEVNAME(sc), cqp[0x37]); */ |
6939 | break; |
6940 | |
6941 | default: |
6942 | /* printf("%s: cq completion opcode %x??\n", |
6943 | DEVNAME(sc), opcode); */ |
6944 | break; |
6945 | } |
6946 | |
6947 | cq->cq_cons++; |
6948 | } |
6949 | |
6950 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& cq->cq_mem)->mxm_map)), (0), (((&cq->cq_mem)-> mxm_size)), (0x01)) |
6951 | 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& cq->cq_mem)->mxm_map)), (0), (((&cq->cq_mem)-> mxm_size)), (0x01)); |
6952 | |
6953 | if (rxfree > 0) { |
6954 | if_rxr_put(&rx->rx_rxr, rxfree)do { (&rx->rx_rxr)->rxr_alive -= (rxfree); } while ( 0); |
6955 | if (ifiq_input(rx->rx_ifiq, &ml)) |
6956 | if_rxr_livelocked(&rx->rx_rxr); |
6957 | |
6958 | mcx_rx_fill(sc, rx); |
6959 | if (if_rxr_inuse(&rx->rx_rxr)((&rx->rx_rxr)->rxr_alive) == 0) |
6960 | timeout_add(&rx->rx_refill, 1); |
6961 | } |
6962 | |
6963 | cq->cq_count++; |
6964 | mcx_arm_cq(sc, cq, q->q_uar); |
6965 | |
6966 | if (txfree > 0) { |
6967 | tx->tx_cons += txfree; |
6968 | if (ifq_is_oactive(tx->tx_ifq)) |
6969 | ifq_restart(tx->tx_ifq); |
6970 | } |
6971 | } |
6972 | |
6973 | |
6974 | static void |
6975 | mcx_arm_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar) |
6976 | { |
6977 | bus_size_t offset; |
6978 | uint32_t val; |
6979 | |
6980 | offset = (MCX_PAGE_SIZE(1 << 12) * uar) + MCX_UAR_EQ_DOORBELL_ARM0x40; |
6981 | val = (eq->eq_n << 24) | (eq->eq_cons & 0xffffff); |
6982 | |
6983 | mcx_wr(sc, offset, val); |
6984 | mcx_bar(sc, offset, sizeof(val), BUS_SPACE_BARRIER_WRITE0x02); |
6985 | } |
6986 | |
6987 | static struct mcx_eq_entry * |
6988 | mcx_next_eq_entry(struct mcx_softc *sc, struct mcx_eq *eq) |
6989 | { |
6990 | struct mcx_eq_entry *eqe; |
6991 | int next; |
6992 | |
6993 | eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem)((void *)(&eq->eq_mem)->mxm_kva); |
6994 | next = eq->eq_cons % (1 << MCX_LOG_EQ_SIZE7); |
6995 | if ((eqe[next].eq_owner & 1) == |
6996 | ((eq->eq_cons >> MCX_LOG_EQ_SIZE7) & 1)) { |
6997 | eq->eq_cons++; |
6998 | return (&eqe[next]); |
6999 | } |
7000 | return (NULL((void *)0)); |
7001 | } |
7002 | |
7003 | int |
7004 | mcx_admin_intr(void *xsc) |
7005 | { |
7006 | struct mcx_softc *sc = (struct mcx_softc *)xsc; |
7007 | struct mcx_eq *eq = &sc->sc_admin_eq; |
7008 | struct mcx_eq_entry *eqe; |
7009 | |
7010 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& eq->eq_mem)->mxm_map)), (0), (((&eq->eq_mem)-> mxm_size)), (0x02)) |
7011 | 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& eq->eq_mem)->mxm_map)), (0), (((&eq->eq_mem)-> mxm_size)), (0x02)); |
7012 | |
7013 | while ((eqe = mcx_next_eq_entry(sc, eq)) != NULL((void *)0)) { |
7014 | switch (eqe->eq_event_type) { |
7015 | case MCX_EVENT_TYPE_LAST_WQE0x13: |
7016 | /* printf("%s: last wqe reached?\n", DEVNAME(sc)); */ |
7017 | break; |
7018 | |
7019 | case MCX_EVENT_TYPE_CQ_ERROR0x04: |
7020 | /* printf("%s: cq error\n", DEVNAME(sc)); */ |
7021 | break; |
7022 | |
7023 | case MCX_EVENT_TYPE_CMD_COMPLETION0x0a: |
7024 | /* wakeup probably */ |
7025 | break; |
7026 | |
7027 | case MCX_EVENT_TYPE_PORT_CHANGE0x09: |
7028 | task_add(systq, &sc->sc_port_change); |
7029 | break; |
7030 | |
7031 | default: |
7032 | /* printf("%s: something happened\n", DEVNAME(sc)); */ |
7033 | break; |
7034 | } |
7035 | } |
7036 | |
7037 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& eq->eq_mem)->mxm_map)), (0), (((&eq->eq_mem)-> mxm_size)), (0x01)) |
7038 | 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& eq->eq_mem)->mxm_map)), (0), (((&eq->eq_mem)-> mxm_size)), (0x01)); |
7039 | |
7040 | mcx_arm_eq(sc, eq, sc->sc_uar); |
7041 | |
7042 | return (1); |
7043 | } |
7044 | |
7045 | int |
7046 | mcx_cq_intr(void *xq) |
7047 | { |
7048 | struct mcx_queues *q = (struct mcx_queues *)xq; |
7049 | struct mcx_softc *sc = q->q_sc; |
7050 | struct mcx_eq *eq = &q->q_eq; |
7051 | struct mcx_eq_entry *eqe; |
7052 | int cqn; |
7053 | |
7054 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& eq->eq_mem)->mxm_map)), (0), (((&eq->eq_mem)-> mxm_size)), (0x02)) |
7055 | 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& eq->eq_mem)->mxm_map)), (0), (((&eq->eq_mem)-> mxm_size)), (0x02)); |
7056 | |
7057 | while ((eqe = mcx_next_eq_entry(sc, eq)) != NULL((void *)0)) { |
7058 | switch (eqe->eq_event_type) { |
7059 | case MCX_EVENT_TYPE_COMPLETION0x00: |
7060 | cqn = betoh32(eqe->eq_event_data[6])(__uint32_t)(__builtin_constant_p(eqe->eq_event_data[6]) ? (__uint32_t)(((__uint32_t)(eqe->eq_event_data[6]) & 0xff ) << 24 | ((__uint32_t)(eqe->eq_event_data[6]) & 0xff00) << 8 | ((__uint32_t)(eqe->eq_event_data[6]) & 0xff0000) >> 8 | ((__uint32_t)(eqe->eq_event_data [6]) & 0xff000000) >> 24) : __swap32md(eqe->eq_event_data [6])); |
7061 | if (cqn == q->q_cq.cq_n) |
7062 | mcx_process_cq(sc, q, &q->q_cq); |
7063 | break; |
7064 | } |
7065 | } |
7066 | |
7067 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& eq->eq_mem)->mxm_map)), (0), (((&eq->eq_mem)-> mxm_size)), (0x01)) |
7068 | 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& eq->eq_mem)->mxm_map)), (0), (((&eq->eq_mem)-> mxm_size)), (0x01)); |
7069 | |
7070 | mcx_arm_eq(sc, eq, q->q_uar); |
7071 | |
7072 | return (1); |
7073 | } |
7074 | |
7075 | static void |
7076 | mcx_free_slots(struct mcx_softc *sc, struct mcx_slot *slots, int allocated, |
7077 | int total) |
7078 | { |
7079 | struct mcx_slot *ms; |
7080 | |
7081 | int i = allocated; |
7082 | while (i-- > 0) { |
7083 | ms = &slots[i]; |
7084 | bus_dmamap_destroy(sc->sc_dmat, ms->ms_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ms ->ms_map)); |
7085 | if (ms->ms_m != NULL((void *)0)) |
7086 | m_freem(ms->ms_m); |
7087 | } |
7088 | free(slots, M_DEVBUF2, total * sizeof(*ms)); |
7089 | } |
7090 | |
7091 | static int |
7092 | mcx_queue_up(struct mcx_softc *sc, struct mcx_queues *q) |
7093 | { |
7094 | struct mcx_rx *rx; |
7095 | struct mcx_tx *tx; |
7096 | struct mcx_slot *ms; |
7097 | int i; |
7098 | |
7099 | rx = &q->q_rx; |
7100 | rx->rx_slots = mallocarray(sizeof(*ms), (1 << MCX_LOG_RQ_SIZE10), |
7101 | M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
7102 | if (rx->rx_slots == NULL((void *)0)) { |
7103 | printf("%s: failed to allocate rx slots\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
7104 | return ENOMEM12; |
7105 | } |
7106 | |
7107 | for (i = 0; i < (1 << MCX_LOG_RQ_SIZE10); i++) { |
7108 | ms = &rx->rx_slots[i]; |
7109 | if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_hardmtu), (1), (sc->sc_hardmtu), (0), (0x0000 | 0x0002 | 0x2000), (&ms->ms_map)) |
7110 | sc->sc_hardmtu, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_hardmtu), (1), (sc->sc_hardmtu), (0), (0x0000 | 0x0002 | 0x2000), (&ms->ms_map)) |
7111 | BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_hardmtu), (1), (sc->sc_hardmtu), (0), (0x0000 | 0x0002 | 0x2000), (&ms->ms_map)) |
7112 | &ms->ms_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_hardmtu), (1), (sc->sc_hardmtu), (0), (0x0000 | 0x0002 | 0x2000), (&ms->ms_map)) != 0) { |
7113 | printf("%s: failed to allocate rx dma maps\n", |
7114 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
7115 | goto destroy_rx_slots; |
7116 | } |
7117 | } |
7118 | |
7119 | tx = &q->q_tx; |
7120 | tx->tx_slots = mallocarray(sizeof(*ms), (1 << MCX_LOG_SQ_SIZE11), |
7121 | M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
7122 | if (tx->tx_slots == NULL((void *)0)) { |
7123 | printf("%s: failed to allocate tx slots\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
7124 | goto destroy_rx_slots; |
7125 | } |
7126 | |
7127 | for (i = 0; i < (1 << MCX_LOG_SQ_SIZE11); i++) { |
7128 | ms = &tx->tx_slots[i]; |
7129 | if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_hardmtu), (1 + ((4 -1) * (sizeof(struct mcx_sq_entry) / sizeof (struct mcx_sq_entry_seg)))), (sc->sc_hardmtu), (0), (0x0000 | 0x0002 | 0x2000), (&ms->ms_map)) |
7130 | MCX_SQ_MAX_SEGMENTS, sc->sc_hardmtu, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_hardmtu), (1 + ((4 -1) * (sizeof(struct mcx_sq_entry) / sizeof (struct mcx_sq_entry_seg)))), (sc->sc_hardmtu), (0), (0x0000 | 0x0002 | 0x2000), (&ms->ms_map)) |
7131 | BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_hardmtu), (1 + ((4 -1) * (sizeof(struct mcx_sq_entry) / sizeof (struct mcx_sq_entry_seg)))), (sc->sc_hardmtu), (0), (0x0000 | 0x0002 | 0x2000), (&ms->ms_map)) |
7132 | &ms->ms_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_hardmtu), (1 + ((4 -1) * (sizeof(struct mcx_sq_entry) / sizeof (struct mcx_sq_entry_seg)))), (sc->sc_hardmtu), (0), (0x0000 | 0x0002 | 0x2000), (&ms->ms_map)) != 0) { |
7133 | printf("%s: failed to allocate tx dma maps\n", |
7134 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
7135 | goto destroy_tx_slots; |
7136 | } |
7137 | } |
7138 | |
7139 | if (mcx_create_cq(sc, &q->q_cq, q->q_uar, q->q_index, |
7140 | q->q_eq.eq_n) != 0) |
7141 | goto destroy_tx_slots; |
7142 | |
7143 | if (mcx_create_sq(sc, tx, q->q_uar, q->q_index, q->q_cq.cq_n) |
7144 | != 0) |
7145 | goto destroy_cq; |
7146 | |
7147 | if (mcx_create_rq(sc, rx, q->q_index, q->q_cq.cq_n) != 0) |
7148 | goto destroy_sq; |
7149 | |
7150 | return 0; |
7151 | |
7152 | destroy_sq: |
7153 | mcx_destroy_sq(sc, tx); |
7154 | destroy_cq: |
7155 | mcx_destroy_cq(sc, &q->q_cq); |
7156 | destroy_tx_slots: |
7157 | mcx_free_slots(sc, tx->tx_slots, i, (1 << MCX_LOG_SQ_SIZE11)); |
7158 | tx->tx_slots = NULL((void *)0); |
7159 | |
7160 | i = (1 << MCX_LOG_RQ_SIZE10); |
7161 | destroy_rx_slots: |
7162 | mcx_free_slots(sc, rx->rx_slots, i, (1 << MCX_LOG_RQ_SIZE10)); |
7163 | rx->rx_slots = NULL((void *)0); |
7164 | return ENOMEM12; |
7165 | } |
7166 | |
7167 | static int |
7168 | mcx_rss_group_entry_count(struct mcx_softc *sc, int group) |
7169 | { |
7170 | int i; |
7171 | int count; |
7172 | |
7173 | count = 0; |
7174 | for (i = 0; i < nitems(mcx_rss_config)(sizeof((mcx_rss_config)) / sizeof((mcx_rss_config)[0])); i++) { |
7175 | if (mcx_rss_config[i].flow_group == group) |
7176 | count++; |
7177 | } |
7178 | |
7179 | return count; |
7180 | } |
7181 | |
7182 | static int |
7183 | mcx_up(struct mcx_softc *sc) |
7184 | { |
7185 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
7186 | struct mcx_rx *rx; |
7187 | struct mcx_tx *tx; |
7188 | int i, start, count, flow_group, flow_index; |
7189 | struct mcx_flow_match match_crit; |
7190 | struct mcx_rss_rule *rss; |
7191 | uint32_t dest; |
7192 | int rqns[MCX_MAX_QUEUES16]; |
7193 | |
7194 | if (mcx_create_tis(sc, &sc->sc_tis) != 0) |
7195 | goto down; |
7196 | |
7197 | for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) { |
7198 | if (mcx_queue_up(sc, &sc->sc_queues[i]) != 0) { |
7199 | goto down; |
7200 | } |
7201 | } |
7202 | |
7203 | /* RSS flow table and flow groups */ |
7204 | if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE5, 1, |
7205 | &sc->sc_rss_flow_table_id) != 0) |
7206 | goto down; |
7207 | |
7208 | dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE(1 << 24) | |
7209 | sc->sc_rss_flow_table_id; |
7210 | |
7211 | /* L4 RSS flow group (v4/v6 tcp/udp, no fragments) */ |
7212 | memset(&match_crit, 0, sizeof(match_crit))__builtin_memset((&match_crit), (0), (sizeof(match_crit)) ); |
7213 | match_crit.mc_ethertype = 0xffff; |
7214 | match_crit.mc_ip_proto = 0xff; |
7215 | match_crit.mc_vlan_flags = MCX_FLOW_MATCH_IP_FRAG(1 << 5); |
7216 | start = 0; |
7217 | count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_L43); |
7218 | if (count != 0) { |
7219 | if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id, |
7220 | MCX_FLOW_GROUP_RSS_L43, start, count, |
7221 | MCX_CREATE_FLOW_GROUP_CRIT_OUTER(1 << 0), &match_crit) != 0) |
7222 | goto down; |
7223 | start += count; |
7224 | } |
7225 | |
7226 | /* L3 RSS flow group (v4/v6, including fragments) */ |
7227 | memset(&match_crit, 0, sizeof(match_crit))__builtin_memset((&match_crit), (0), (sizeof(match_crit)) ); |
7228 | match_crit.mc_ethertype = 0xffff; |
7229 | count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_L34); |
7230 | if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id, |
7231 | MCX_FLOW_GROUP_RSS_L34, start, count, |
7232 | MCX_CREATE_FLOW_GROUP_CRIT_OUTER(1 << 0), &match_crit) != 0) |
7233 | goto down; |
7234 | start += count; |
7235 | |
7236 | /* non-RSS flow group */ |
7237 | count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_NONE5); |
7238 | memset(&match_crit, 0, sizeof(match_crit))__builtin_memset((&match_crit), (0), (sizeof(match_crit)) ); |
7239 | if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id, |
7240 | MCX_FLOW_GROUP_RSS_NONE5, start, count, 0, &match_crit) != 0) |
7241 | goto down; |
7242 | |
7243 | /* Root flow table, matching packets based on mac address */ |
7244 | if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE5, 0, |
7245 | &sc->sc_mac_flow_table_id) != 0) |
7246 | goto down; |
7247 | |
7248 | /* promisc flow group */ |
7249 | start = 0; |
7250 | memset(&match_crit, 0, sizeof(match_crit))__builtin_memset((&match_crit), (0), (sizeof(match_crit)) ); |
7251 | if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id, |
7252 | MCX_FLOW_GROUP_PROMISC0, start, 1, 0, &match_crit) != 0) |
7253 | goto down; |
7254 | sc->sc_promisc_flow_enabled = 0; |
7255 | start++; |
7256 | |
7257 | /* all multicast flow group */ |
7258 | match_crit.mc_dest_mac[0] = 0x01; |
7259 | if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id, |
7260 | MCX_FLOW_GROUP_ALLMULTI1, start, 1, |
7261 | MCX_CREATE_FLOW_GROUP_CRIT_OUTER(1 << 0), &match_crit) != 0) |
7262 | goto down; |
7263 | sc->sc_allmulti_flow_enabled = 0; |
7264 | start++; |
7265 | |
7266 | /* mac address matching flow group */ |
7267 | memset(&match_crit.mc_dest_mac, 0xff, sizeof(match_crit.mc_dest_mac))__builtin_memset((&match_crit.mc_dest_mac), (0xff), (sizeof (match_crit.mc_dest_mac))); |
7268 | if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id, |
7269 | MCX_FLOW_GROUP_MAC2, start, (1 << MCX_LOG_FLOW_TABLE_SIZE5) - start, |
7270 | MCX_CREATE_FLOW_GROUP_CRIT_OUTER(1 << 0), &match_crit) != 0) |
7271 | goto down; |
7272 | |
7273 | /* flow table entries for unicast and broadcast */ |
7274 | start = 0; |
7275 | if (mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC2, start, |
7276 | sc->sc_ac.ac_enaddr, dest) != 0) |
7277 | goto down; |
7278 | start++; |
7279 | |
7280 | if (mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC2, start, |
7281 | etherbroadcastaddr, dest) != 0) |
7282 | goto down; |
7283 | start++; |
7284 | |
7285 | /* multicast entries go after that */ |
7286 | sc->sc_mcast_flow_base = start; |
7287 | |
7288 | /* re-add any existing multicast flows */ |
7289 | for (i = 0; i < MCX_NUM_MCAST_FLOWS((1 << 5) - 4); i++) { |
7290 | if (sc->sc_mcast_flows[i][0] != 0) { |
7291 | mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC2, |
7292 | sc->sc_mcast_flow_base + i, |
7293 | sc->sc_mcast_flows[i], dest); |
7294 | } |
7295 | } |
7296 | |
7297 | if (mcx_set_flow_table_root(sc, sc->sc_mac_flow_table_id) != 0) |
7298 | goto down; |
7299 | |
7300 | /* |
7301 | * the RQT can be any size as long as it's a power of two. |
7302 | * since we also restrict the number of queues to a power of two, |
7303 | * we can just put each rx queue in once. |
7304 | */ |
7305 | for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) |
7306 | rqns[i] = sc->sc_queues[i].q_rx.rx_rqn; |
7307 | |
7308 | if (mcx_create_rqt(sc, intrmap_count(sc->sc_intrmap), rqns, |
7309 | &sc->sc_rqt) != 0) |
7310 | goto down; |
7311 | |
7312 | start = 0; |
7313 | flow_index = 0; |
7314 | flow_group = -1; |
7315 | for (i = 0; i < nitems(mcx_rss_config)(sizeof((mcx_rss_config)) / sizeof((mcx_rss_config)[0])); i++) { |
7316 | rss = &mcx_rss_config[i]; |
7317 | if (rss->flow_group != flow_group) { |
7318 | flow_group = rss->flow_group; |
7319 | flow_index = 0; |
7320 | } |
7321 | |
7322 | if (rss->hash_sel == 0) { |
7323 | if (mcx_create_tir_direct(sc, &sc->sc_queues[0].q_rx, |
7324 | &sc->sc_tir[i]) != 0) |
7325 | goto down; |
7326 | } else { |
7327 | if (mcx_create_tir_indirect(sc, sc->sc_rqt, |
7328 | rss->hash_sel, &sc->sc_tir[i]) != 0) |
7329 | goto down; |
7330 | } |
7331 | |
7332 | if (mcx_set_flow_table_entry_proto(sc, flow_group, |
7333 | flow_index, rss->ethertype, rss->ip_proto, |
7334 | MCX_FLOW_CONTEXT_DEST_TYPE_TIR(2 << 24) | sc->sc_tir[i]) != 0) |
7335 | goto down; |
7336 | flow_index++; |
7337 | } |
7338 | |
7339 | for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) { |
7340 | struct mcx_queues *q = &sc->sc_queues[i]; |
7341 | rx = &q->q_rx; |
7342 | tx = &q->q_tx; |
7343 | |
7344 | /* start the queues */ |
7345 | if (mcx_ready_sq(sc, tx) != 0) |
7346 | goto down; |
7347 | |
7348 | if (mcx_ready_rq(sc, rx) != 0) |
7349 | goto down; |
7350 | |
7351 | if_rxr_init(&rx->rx_rxr, 1, (1 << MCX_LOG_RQ_SIZE10)); |
7352 | rx->rx_prod = 0; |
7353 | mcx_rx_fill(sc, rx); |
7354 | |
7355 | tx->tx_cons = 0; |
7356 | tx->tx_prod = 0; |
7357 | ifq_clr_oactive(tx->tx_ifq); |
7358 | } |
7359 | |
7360 | mcx_calibrate_first(sc); |
7361 | |
7362 | SET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) |= (0x40)); |
7363 | |
7364 | return ENETRESET52; |
7365 | down: |
7366 | mcx_down(sc); |
7367 | return ENOMEM12; |
7368 | } |
7369 | |
7370 | static void |
7371 | mcx_down(struct mcx_softc *sc) |
7372 | { |
7373 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
7374 | struct mcx_rss_rule *rss; |
7375 | int group, i, flow_group, flow_index; |
7376 | |
7377 | CLR(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) &= ~(0x40)); |
7378 | |
7379 | /* |
7380 | * delete flow table entries first, so no packets can arrive |
7381 | * after the barriers |
7382 | */ |
7383 | if (sc->sc_promisc_flow_enabled) |
7384 | mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC0, 0); |
7385 | if (sc->sc_allmulti_flow_enabled) |
7386 | mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI1, 0); |
7387 | mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC2, 0); |
7388 | mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC2, 1); |
7389 | for (i = 0; i < MCX_NUM_MCAST_FLOWS((1 << 5) - 4); i++) { |
7390 | if (sc->sc_mcast_flows[i][0] != 0) { |
7391 | mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC2, |
7392 | sc->sc_mcast_flow_base + i); |
7393 | } |
7394 | } |
7395 | |
7396 | flow_group = -1; |
7397 | flow_index = 0; |
7398 | for (i = 0; i < nitems(mcx_rss_config)(sizeof((mcx_rss_config)) / sizeof((mcx_rss_config)[0])); i++) { |
7399 | rss = &mcx_rss_config[i]; |
7400 | if (rss->flow_group != flow_group) { |
7401 | flow_group = rss->flow_group; |
7402 | flow_index = 0; |
7403 | } |
7404 | |
7405 | mcx_delete_flow_table_entry(sc, flow_group, flow_index); |
7406 | |
7407 | mcx_destroy_tir(sc, sc->sc_tir[i]); |
7408 | sc->sc_tir[i] = 0; |
7409 | |
7410 | flow_index++; |
7411 | } |
7412 | intr_barrier(sc->sc_ihc); |
7413 | for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) { |
7414 | struct ifqueue *ifq = sc->sc_queues[i].q_tx.tx_ifq; |
7415 | ifq_barrier(ifq); |
7416 | |
7417 | timeout_del_barrier(&sc->sc_queues[i].q_rx.rx_refill); |
7418 | |
7419 | intr_barrier(sc->sc_queues[i].q_ihc); |
7420 | } |
7421 | |
7422 | timeout_del_barrier(&sc->sc_calibrate); |
7423 | |
7424 | for (group = 0; group < MCX_NUM_FLOW_GROUPS6; group++) { |
7425 | if (sc->sc_flow_group[group].g_id != -1) |
7426 | mcx_destroy_flow_group(sc, group); |
7427 | } |
7428 | |
7429 | if (sc->sc_mac_flow_table_id != -1) { |
7430 | mcx_destroy_flow_table(sc, sc->sc_mac_flow_table_id); |
7431 | sc->sc_mac_flow_table_id = -1; |
7432 | } |
7433 | if (sc->sc_rss_flow_table_id != -1) { |
7434 | mcx_destroy_flow_table(sc, sc->sc_rss_flow_table_id); |
7435 | sc->sc_rss_flow_table_id = -1; |
7436 | } |
7437 | if (sc->sc_rqt != -1) { |
7438 | mcx_destroy_rqt(sc, sc->sc_rqt); |
7439 | sc->sc_rqt = -1; |
7440 | } |
7441 | |
7442 | for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) { |
7443 | struct mcx_queues *q = &sc->sc_queues[i]; |
7444 | struct mcx_rx *rx = &q->q_rx; |
7445 | struct mcx_tx *tx = &q->q_tx; |
7446 | struct mcx_cq *cq = &q->q_cq; |
7447 | |
7448 | if (rx->rx_rqn != 0) |
7449 | mcx_destroy_rq(sc, rx); |
7450 | |
7451 | if (tx->tx_sqn != 0) |
7452 | mcx_destroy_sq(sc, tx); |
7453 | |
7454 | if (tx->tx_slots != NULL((void *)0)) { |
7455 | mcx_free_slots(sc, tx->tx_slots, |
7456 | (1 << MCX_LOG_SQ_SIZE11), (1 << MCX_LOG_SQ_SIZE11)); |
7457 | tx->tx_slots = NULL((void *)0); |
7458 | } |
7459 | if (rx->rx_slots != NULL((void *)0)) { |
7460 | mcx_free_slots(sc, rx->rx_slots, |
7461 | (1 << MCX_LOG_RQ_SIZE10), (1 << MCX_LOG_RQ_SIZE10)); |
7462 | rx->rx_slots = NULL((void *)0); |
7463 | } |
7464 | |
7465 | if (cq->cq_n != 0) |
7466 | mcx_destroy_cq(sc, cq); |
7467 | } |
7468 | if (sc->sc_tis != 0) { |
7469 | mcx_destroy_tis(sc, sc->sc_tis); |
7470 | sc->sc_tis = 0; |
7471 | } |
7472 | } |
7473 | |
7474 | static int |
7475 | mcx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) |
7476 | { |
7477 | struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc; |
7478 | struct ifreq *ifr = (struct ifreq *)data; |
7479 | uint8_t addrhi[ETHER_ADDR_LEN6], addrlo[ETHER_ADDR_LEN6]; |
7480 | int s, i, error = 0; |
7481 | uint32_t dest; |
7482 | |
7483 | s = splnet()splraise(0x7); |
7484 | switch (cmd) { |
7485 | case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((12))): |
7486 | ifp->if_flags |= IFF_UP0x1; |
7487 | /* FALLTHROUGH */ |
7488 | |
7489 | case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((16))): |
7490 | if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) { |
7491 | if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) |
7492 | error = ENETRESET52; |
7493 | else |
7494 | error = mcx_up(sc); |
7495 | } else { |
7496 | if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) |
7497 | mcx_down(sc); |
7498 | } |
7499 | break; |
7500 | |
7501 | case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifmediareq) & 0x1fff) << 16) | ((('i')) << 8) | ((56))): |
7502 | case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct ifreq) & 0x1fff) << 16) | ((('i')) << 8) | ((55))): |
7503 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); |
7504 | break; |
7505 | |
7506 | case SIOCGIFSFFPAGE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct if_sffpage) & 0x1fff) << 16) | ((('i')) << 8) | ((57))): |
7507 | error = mcx_get_sffpage(ifp, (struct if_sffpage *)data); |
7508 | break; |
7509 | |
7510 | case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((170))): |
7511 | error = mcx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data); |
7512 | break; |
7513 | |
7514 | case SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((49))): |
7515 | if (ether_addmulti(ifr, &sc->sc_ac) == ENETRESET52) { |
7516 | error = ether_multiaddr(&ifr->ifr_addrifr_ifru.ifru_addr, addrlo, addrhi); |
7517 | if (error != 0) |
7518 | return (error); |
7519 | |
7520 | dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE(1 << 24) | |
7521 | sc->sc_rss_flow_table_id; |
7522 | |
7523 | for (i = 0; i < MCX_NUM_MCAST_FLOWS((1 << 5) - 4); i++) { |
7524 | if (sc->sc_mcast_flows[i][0] == 0) { |
7525 | memcpy(sc->sc_mcast_flows[i], addrlo,__builtin_memcpy((sc->sc_mcast_flows[i]), (addrlo), (6)) |
7526 | ETHER_ADDR_LEN)__builtin_memcpy((sc->sc_mcast_flows[i]), (addrlo), (6)); |
7527 | if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) { |
7528 | mcx_set_flow_table_entry_mac(sc, |
7529 | MCX_FLOW_GROUP_MAC2, |
7530 | sc->sc_mcast_flow_base + i, |
7531 | sc->sc_mcast_flows[i], dest); |
7532 | } |
7533 | break; |
7534 | } |
7535 | } |
7536 | |
7537 | if (!ISSET(ifp->if_flags, IFF_ALLMULTI)((ifp->if_flags) & (0x200))) { |
7538 | if (i == MCX_NUM_MCAST_FLOWS((1 << 5) - 4)) { |
7539 | SET(ifp->if_flags, IFF_ALLMULTI)((ifp->if_flags) |= (0x200)); |
7540 | sc->sc_extra_mcast++; |
7541 | error = ENETRESET52; |
7542 | } |
7543 | |
7544 | if (sc->sc_ac.ac_multirangecnt > 0) { |
7545 | SET(ifp->if_flags, IFF_ALLMULTI)((ifp->if_flags) |= (0x200)); |
7546 | error = ENETRESET52; |
7547 | } |
7548 | } |
7549 | } |
7550 | break; |
7551 | |
7552 | case SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff ) << 16) | ((('i')) << 8) | ((50))): |
7553 | if (ether_delmulti(ifr, &sc->sc_ac) == ENETRESET52) { |
7554 | error = ether_multiaddr(&ifr->ifr_addrifr_ifru.ifru_addr, addrlo, addrhi); |
7555 | if (error != 0) |
7556 | return (error); |
7557 | |
7558 | for (i = 0; i < MCX_NUM_MCAST_FLOWS((1 << 5) - 4); i++) { |
7559 | if (memcmp(sc->sc_mcast_flows[i], addrlo,__builtin_memcmp((sc->sc_mcast_flows[i]), (addrlo), (6)) |
7560 | ETHER_ADDR_LEN)__builtin_memcmp((sc->sc_mcast_flows[i]), (addrlo), (6)) == 0) { |
7561 | if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) { |
7562 | mcx_delete_flow_table_entry(sc, |
7563 | MCX_FLOW_GROUP_MAC2, |
7564 | sc->sc_mcast_flow_base + i); |
7565 | } |
7566 | sc->sc_mcast_flows[i][0] = 0; |
7567 | break; |
7568 | } |
7569 | } |
7570 | |
7571 | if (i == MCX_NUM_MCAST_FLOWS((1 << 5) - 4)) |
7572 | sc->sc_extra_mcast--; |
7573 | |
7574 | if (ISSET(ifp->if_flags, IFF_ALLMULTI)((ifp->if_flags) & (0x200)) && |
7575 | (sc->sc_extra_mcast == 0) && |
7576 | (sc->sc_ac.ac_multirangecnt == 0)) { |
7577 | CLR(ifp->if_flags, IFF_ALLMULTI)((ifp->if_flags) &= ~(0x200)); |
7578 | error = ENETRESET52; |
7579 | } |
7580 | } |
7581 | break; |
7582 | |
7583 | default: |
7584 | error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); |
7585 | } |
7586 | |
7587 | if (error == ENETRESET52) { |
7588 | if ((ifp->if_flags & (IFF_UP0x1 | IFF_RUNNING0x40)) == |
7589 | (IFF_UP0x1 | IFF_RUNNING0x40)) |
7590 | mcx_iff(sc); |
7591 | error = 0; |
7592 | } |
7593 | splx(s)spllower(s); |
7594 | |
7595 | return (error); |
7596 | } |
7597 | |
7598 | static int |
7599 | mcx_get_sffpage(struct ifnet *ifp, struct if_sffpage *sff) |
7600 | { |
7601 | struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc; |
7602 | struct mcx_reg_mcia mcia; |
7603 | struct mcx_reg_pmlp pmlp; |
7604 | int offset, error; |
7605 | |
7606 | /* get module number */ |
7607 | memset(&pmlp, 0, sizeof(pmlp))__builtin_memset((&pmlp), (0), (sizeof(pmlp))); |
7608 | pmlp.rp_local_port = 1; |
7609 | error = mcx_access_hca_reg(sc, MCX_REG_PMLP0x5002, MCX_REG_OP_READ1, &pmlp, |
7610 | sizeof(pmlp)); |
7611 | if (error != 0) { |
7612 | printf("%s: unable to get eeprom module number\n", |
7613 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
7614 | return error; |
7615 | } |
7616 | |
7617 | for (offset = 0; offset < 256; offset += MCX_MCIA_EEPROM_BYTES32) { |
7618 | memset(&mcia, 0, sizeof(mcia))__builtin_memset((&mcia), (0), (sizeof(mcia))); |
7619 | mcia.rm_l = 0; |
7620 | mcia.rm_module = betoh32(pmlp.rp_lane0_mapping)(__uint32_t)(__builtin_constant_p(pmlp.rp_lane0_mapping) ? (__uint32_t )(((__uint32_t)(pmlp.rp_lane0_mapping) & 0xff) << 24 | ((__uint32_t)(pmlp.rp_lane0_mapping) & 0xff00) << 8 | ((__uint32_t)(pmlp.rp_lane0_mapping) & 0xff0000) >> 8 | ((__uint32_t)(pmlp.rp_lane0_mapping) & 0xff000000) >> 24) : __swap32md(pmlp.rp_lane0_mapping)) & |
7621 | MCX_PMLP_MODULE_NUM_MASK0xff; |
7622 | mcia.rm_i2c_addr = sff->sff_addr / 2; /* apparently */ |
7623 | mcia.rm_page_num = sff->sff_page; |
7624 | mcia.rm_dev_addr = htobe16(offset)(__uint16_t)(__builtin_constant_p(offset) ? (__uint16_t)(((__uint16_t )(offset) & 0xffU) << 8 | ((__uint16_t)(offset) & 0xff00U) >> 8) : __swap16md(offset)); |
7625 | mcia.rm_size = htobe16(MCX_MCIA_EEPROM_BYTES)(__uint16_t)(__builtin_constant_p(32) ? (__uint16_t)(((__uint16_t )(32) & 0xffU) << 8 | ((__uint16_t)(32) & 0xff00U ) >> 8) : __swap16md(32)); |
7626 | |
7627 | error = mcx_access_hca_reg(sc, MCX_REG_MCIA0x9014, MCX_REG_OP_READ1, |
7628 | &mcia, sizeof(mcia)); |
7629 | if (error != 0) { |
7630 | printf("%s: unable to read eeprom at %x\n", |
7631 | DEVNAME(sc)((sc)->sc_dev.dv_xname), offset); |
7632 | return error; |
7633 | } |
7634 | |
7635 | memcpy(sff->sff_data + offset, mcia.rm_data,__builtin_memcpy((sff->sff_data + offset), (mcia.rm_data), (32)) |
7636 | MCX_MCIA_EEPROM_BYTES)__builtin_memcpy((sff->sff_data + offset), (mcia.rm_data), (32)); |
7637 | } |
7638 | |
7639 | return 0; |
7640 | } |
7641 | |
7642 | static int |
7643 | mcx_rxrinfo(struct mcx_softc *sc, struct if_rxrinfo *ifri) |
7644 | { |
7645 | struct if_rxring_info *ifrs; |
7646 | unsigned int i; |
7647 | int error; |
7648 | |
7649 | ifrs = mallocarray(intrmap_count(sc->sc_intrmap), sizeof(*ifrs), |
7650 | M_TEMP127, M_WAITOK0x0001|M_ZERO0x0008|M_CANFAIL0x0004); |
7651 | if (ifrs == NULL((void *)0)) |
7652 | return (ENOMEM12); |
7653 | |
7654 | for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) { |
7655 | struct mcx_rx *rx = &sc->sc_queues[i].q_rx; |
7656 | struct if_rxring_info *ifr = &ifrs[i]; |
7657 | |
7658 | snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), "%u", i); |
7659 | ifr->ifr_size = sc->sc_hardmtu; |
7660 | ifr->ifr_info = rx->rx_rxr; |
7661 | } |
7662 | |
7663 | error = if_rxr_info_ioctl(ifri, i, ifrs); |
7664 | free(ifrs, M_TEMP127, i * sizeof(*ifrs)); |
7665 | |
7666 | return (error); |
7667 | } |
7668 | |
7669 | int |
7670 | mcx_load_mbuf(struct mcx_softc *sc, struct mcx_slot *ms, struct mbuf *m) |
7671 | { |
7672 | switch (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( ms->ms_map), (m), (0x0100 | 0x0001)) |
7673 | BUS_DMA_STREAMING | BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( ms->ms_map), (m), (0x0100 | 0x0001))) { |
7674 | case 0: |
7675 | break; |
7676 | |
7677 | case EFBIG27: |
7678 | if (m_defrag(m, M_DONTWAIT0x0002) == 0 && |
7679 | bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( ms->ms_map), (m), (0x0100 | 0x0001)) |
7680 | BUS_DMA_STREAMING | BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( ms->ms_map), (m), (0x0100 | 0x0001)) == 0) |
7681 | break; |
7682 | |
7683 | default: |
7684 | return (1); |
7685 | } |
7686 | |
7687 | ms->ms_m = m; |
7688 | return (0); |
7689 | } |
7690 | |
7691 | static void |
7692 | mcx_start(struct ifqueue *ifq) |
7693 | { |
7694 | struct mcx_tx *tx = ifq->ifq_softc_ifq_ptr._ifq_softc; |
7695 | struct ifnet *ifp = ifq->ifq_if; |
7696 | struct mcx_softc *sc = ifp->if_softc; |
7697 | struct mcx_sq_entry *sq, *sqe; |
7698 | struct mcx_sq_entry_seg *sqs; |
7699 | struct mcx_slot *ms; |
7700 | bus_dmamap_t map; |
7701 | struct mbuf *m; |
7702 | u_int idx, free, used; |
7703 | uint64_t *bf; |
7704 | uint32_t csum; |
7705 | size_t bf_base; |
7706 | int i, seg, nseg; |
7707 | |
7708 | bf_base = (tx->tx_uar * MCX_PAGE_SIZE(1 << 12)) + MCX_UAR_BF0x800; |
7709 | |
7710 | idx = tx->tx_prod % (1 << MCX_LOG_SQ_SIZE11); |
7711 | free = (tx->tx_cons + (1 << MCX_LOG_SQ_SIZE11)) - tx->tx_prod; |
7712 | |
7713 | used = 0; |
7714 | bf = NULL((void *)0); |
7715 | |
7716 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& tx->tx_sq_mem)->mxm_map)), (0), (((&tx->tx_sq_mem )->mxm_size)), (0x08)) |
7717 | 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& tx->tx_sq_mem)->mxm_map)), (0), (((&tx->tx_sq_mem )->mxm_size)), (0x08)); |
7718 | |
7719 | sq = (struct mcx_sq_entry *)MCX_DMA_KVA(&tx->tx_sq_mem)((void *)(&tx->tx_sq_mem)->mxm_kva); |
7720 | |
7721 | for (;;) { |
7722 | if (used + MCX_SQ_ENTRY_MAX_SLOTS4 >= free) { |
7723 | ifq_set_oactive(ifq); |
7724 | break; |
7725 | } |
7726 | |
7727 | m = ifq_dequeue(ifq); |
7728 | if (m == NULL((void *)0)) { |
7729 | break; |
7730 | } |
7731 | |
7732 | sqe = sq + idx; |
7733 | ms = &tx->tx_slots[idx]; |
7734 | memset(sqe, 0, sizeof(*sqe))__builtin_memset((sqe), (0), (sizeof(*sqe))); |
7735 | |
7736 | /* ctrl segment */ |
7737 | sqe->sqe_opcode_index = htobe32(MCX_SQE_WQE_OPCODE_SEND |(__uint32_t)(__builtin_constant_p(0x0a | ((tx->tx_prod & 0xffff) << 8)) ? (__uint32_t)(((__uint32_t)(0x0a | ((tx ->tx_prod & 0xffff) << 8)) & 0xff) << 24 | ((__uint32_t)(0x0a | ((tx->tx_prod & 0xffff) << 8)) & 0xff00) << 8 | ((__uint32_t)(0x0a | ((tx-> tx_prod & 0xffff) << 8)) & 0xff0000) >> 8 | ((__uint32_t)(0x0a | ((tx->tx_prod & 0xffff) << 8)) & 0xff000000) >> 24) : __swap32md(0x0a | ((tx-> tx_prod & 0xffff) << 8))) |
7738 | ((tx->tx_prod & 0xffff) << MCX_SQE_WQE_INDEX_SHIFT))(__uint32_t)(__builtin_constant_p(0x0a | ((tx->tx_prod & 0xffff) << 8)) ? (__uint32_t)(((__uint32_t)(0x0a | ((tx ->tx_prod & 0xffff) << 8)) & 0xff) << 24 | ((__uint32_t)(0x0a | ((tx->tx_prod & 0xffff) << 8)) & 0xff00) << 8 | ((__uint32_t)(0x0a | ((tx-> tx_prod & 0xffff) << 8)) & 0xff0000) >> 8 | ((__uint32_t)(0x0a | ((tx->tx_prod & 0xffff) << 8)) & 0xff000000) >> 24) : __swap32md(0x0a | ((tx-> tx_prod & 0xffff) << 8))); |
7739 | /* always generate a completion event */ |
7740 | sqe->sqe_signature = htobe32(MCX_SQE_CE_CQE_ALWAYS)(__uint32_t)(__builtin_constant_p(0x08) ? (__uint32_t)(((__uint32_t )(0x08) & 0xff) << 24 | ((__uint32_t)(0x08) & 0xff00 ) << 8 | ((__uint32_t)(0x08) & 0xff0000) >> 8 | ((__uint32_t)(0x08) & 0xff000000) >> 24) : __swap32md (0x08)); |
7741 | |
7742 | /* eth segment */ |
7743 | csum = 0; |
7744 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_IPV4_CSUM_OUT0x0001) |
7745 | csum |= MCX_SQE_L3_CSUM(1 << 30); |
7746 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & (M_TCP_CSUM_OUT0x0002 | M_UDP_CSUM_OUT0x0004)) |
7747 | csum |= MCX_SQE_L4_CSUM(1 << 31); |
7748 | sqe->sqe_mss_csum = htobe32(csum)(__uint32_t)(__builtin_constant_p(csum) ? (__uint32_t)(((__uint32_t )(csum) & 0xff) << 24 | ((__uint32_t)(csum) & 0xff00 ) << 8 | ((__uint32_t)(csum) & 0xff0000) >> 8 | ((__uint32_t)(csum) & 0xff000000) >> 24) : __swap32md (csum)); |
7749 | sqe->sqe_inline_header_size = htobe16(MCX_SQ_INLINE_SIZE)(__uint16_t)(__builtin_constant_p(18) ? (__uint16_t)(((__uint16_t )(18) & 0xffU) << 8 | ((__uint16_t)(18) & 0xff00U ) >> 8) : __swap16md(18)); |
7750 | #if NVLAN1 > 0 |
7751 | if (m->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020) { |
7752 | struct ether_vlan_header *evh; |
7753 | evh = (struct ether_vlan_header *) |
7754 | &sqe->sqe_inline_headers; |
7755 | |
7756 | /* slightly cheaper vlan_inject() */ |
7757 | m_copydata(m, 0, ETHER_HDR_LEN((6 * 2) + 2), evh); |
7758 | evh->evl_proto = evh->evl_encap_proto; |
7759 | evh->evl_encap_proto = htons(ETHERTYPE_VLAN)(__uint16_t)(__builtin_constant_p(0x8100) ? (__uint16_t)(((__uint16_t )(0x8100) & 0xffU) << 8 | ((__uint16_t)(0x8100) & 0xff00U) >> 8) : __swap16md(0x8100)); |
7760 | evh->evl_tag = htons(m->m_pkthdr.ether_vtag)(__uint16_t)(__builtin_constant_p(m->M_dat.MH.MH_pkthdr.ether_vtag ) ? (__uint16_t)(((__uint16_t)(m->M_dat.MH.MH_pkthdr.ether_vtag ) & 0xffU) << 8 | ((__uint16_t)(m->M_dat.MH.MH_pkthdr .ether_vtag) & 0xff00U) >> 8) : __swap16md(m->M_dat .MH.MH_pkthdr.ether_vtag)); |
7761 | |
7762 | m_adj(m, ETHER_HDR_LEN((6 * 2) + 2)); |
7763 | } else |
7764 | #endif |
7765 | { |
7766 | m_copydata(m, 0, MCX_SQ_INLINE_SIZE18, |
7767 | sqe->sqe_inline_headers); |
7768 | m_adj(m, MCX_SQ_INLINE_SIZE18); |
7769 | } |
7770 | |
7771 | if (mcx_load_mbuf(sc, ms, m) != 0) { |
7772 | m_freem(m); |
7773 | ifp->if_oerrorsif_data.ifi_oerrors++; |
7774 | continue; |
7775 | } |
7776 | bf = (uint64_t *)sqe; |
7777 | |
7778 | #if NBPFILTER1 > 0 |
7779 | if (ifp->if_bpf) |
7780 | bpf_mtap_hdr(ifp->if_bpf, |
7781 | (caddr_t)sqe->sqe_inline_headers, |
7782 | MCX_SQ_INLINE_SIZE18, m, BPF_DIRECTION_OUT(1 << 1)); |
7783 | #endif |
7784 | map = ms->ms_map; |
7785 | bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x04)) |
7786 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map), (0), (map->dm_mapsize), (0x04)); |
7787 | |
7788 | sqe->sqe_ds_sq_num = |
7789 | htobe32((tx->tx_sqn << MCX_SQE_SQ_NUM_SHIFT) |(__uint32_t)(__builtin_constant_p((tx->tx_sqn << 8) | (map->dm_nsegs + 3)) ? (__uint32_t)(((__uint32_t)((tx-> tx_sqn << 8) | (map->dm_nsegs + 3)) & 0xff) << 24 | ((__uint32_t)((tx->tx_sqn << 8) | (map->dm_nsegs + 3)) & 0xff00) << 8 | ((__uint32_t)((tx->tx_sqn << 8) | (map->dm_nsegs + 3)) & 0xff0000) >> 8 | ((__uint32_t)((tx->tx_sqn << 8) | (map->dm_nsegs + 3)) & 0xff000000) >> 24) : __swap32md((tx->tx_sqn << 8) | (map->dm_nsegs + 3))) |
7790 | (map->dm_nsegs + 3))(__uint32_t)(__builtin_constant_p((tx->tx_sqn << 8) | (map->dm_nsegs + 3)) ? (__uint32_t)(((__uint32_t)((tx-> tx_sqn << 8) | (map->dm_nsegs + 3)) & 0xff) << 24 | ((__uint32_t)((tx->tx_sqn << 8) | (map->dm_nsegs + 3)) & 0xff00) << 8 | ((__uint32_t)((tx->tx_sqn << 8) | (map->dm_nsegs + 3)) & 0xff0000) >> 8 | ((__uint32_t)((tx->tx_sqn << 8) | (map->dm_nsegs + 3)) & 0xff000000) >> 24) : __swap32md((tx->tx_sqn << 8) | (map->dm_nsegs + 3))); |
7791 | |
7792 | /* data segment - first wqe has one segment */ |
7793 | sqs = sqe->sqe_segs; |
7794 | seg = 0; |
7795 | nseg = 1; |
7796 | for (i = 0; i < map->dm_nsegs; i++) { |
7797 | if (seg == nseg) { |
7798 | /* next slot */ |
7799 | idx++; |
7800 | if (idx == (1 << MCX_LOG_SQ_SIZE11)) |
7801 | idx = 0; |
7802 | tx->tx_prod++; |
7803 | used++; |
7804 | |
7805 | sqs = (struct mcx_sq_entry_seg *)(sq + idx); |
7806 | seg = 0; |
7807 | nseg = MCX_SQ_SEGS_PER_SLOT(sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg )); |
7808 | } |
7809 | sqs[seg].sqs_byte_count = |
7810 | htobe32(map->dm_segs[i].ds_len)(__uint32_t)(__builtin_constant_p(map->dm_segs[i].ds_len) ? (__uint32_t)(((__uint32_t)(map->dm_segs[i].ds_len) & 0xff ) << 24 | ((__uint32_t)(map->dm_segs[i].ds_len) & 0xff00) << 8 | ((__uint32_t)(map->dm_segs[i].ds_len ) & 0xff0000) >> 8 | ((__uint32_t)(map->dm_segs[ i].ds_len) & 0xff000000) >> 24) : __swap32md(map-> dm_segs[i].ds_len)); |
7811 | sqs[seg].sqs_lkey = htobe32(sc->sc_lkey)(__uint32_t)(__builtin_constant_p(sc->sc_lkey) ? (__uint32_t )(((__uint32_t)(sc->sc_lkey) & 0xff) << 24 | ((__uint32_t )(sc->sc_lkey) & 0xff00) << 8 | ((__uint32_t)(sc ->sc_lkey) & 0xff0000) >> 8 | ((__uint32_t)(sc-> sc_lkey) & 0xff000000) >> 24) : __swap32md(sc->sc_lkey )); |
7812 | sqs[seg].sqs_addr = htobe64(map->dm_segs[i].ds_addr)(__uint64_t)(__builtin_constant_p(map->dm_segs[i].ds_addr) ? (__uint64_t)((((__uint64_t)(map->dm_segs[i].ds_addr) & 0xff) << 56) | ((__uint64_t)(map->dm_segs[i].ds_addr ) & 0xff00ULL) << 40 | ((__uint64_t)(map->dm_segs [i].ds_addr) & 0xff0000ULL) << 24 | ((__uint64_t)(map ->dm_segs[i].ds_addr) & 0xff000000ULL) << 8 | (( __uint64_t)(map->dm_segs[i].ds_addr) & 0xff00000000ULL ) >> 8 | ((__uint64_t)(map->dm_segs[i].ds_addr) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(map->dm_segs [i].ds_addr) & 0xff000000000000ULL) >> 40 | ((__uint64_t )(map->dm_segs[i].ds_addr) & 0xff00000000000000ULL) >> 56) : __swap64md(map->dm_segs[i].ds_addr)); |
7813 | seg++; |
7814 | } |
7815 | |
7816 | idx++; |
7817 | if (idx == (1 << MCX_LOG_SQ_SIZE11)) |
7818 | idx = 0; |
7819 | tx->tx_prod++; |
7820 | used++; |
7821 | } |
7822 | |
7823 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& tx->tx_sq_mem)->mxm_map)), (0), (((&tx->tx_sq_mem )->mxm_size)), (0x04)) |
7824 | 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& tx->tx_sq_mem)->mxm_map)), (0), (((&tx->tx_sq_mem )->mxm_size)), (0x04)); |
7825 | |
7826 | if (used) { |
7827 | bus_size_t blueflame; |
7828 | |
7829 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (tx->tx_doorbell), ( sizeof(uint32_t)), (0x08)) |
7830 | tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (tx->tx_doorbell), ( sizeof(uint32_t)), (0x08)); |
7831 | htobem32(MCX_DMA_OFF(&sc->sc_doorbell_mem, tx->tx_doorbell),(*(__uint32_t *)(((void *)((&sc->sc_doorbell_mem)-> mxm_kva + (tx->tx_doorbell)))) = (__uint32_t)(__builtin_constant_p (tx->tx_prod & 0xffff) ? (__uint32_t)(((__uint32_t)(tx ->tx_prod & 0xffff) & 0xff) << 24 | ((__uint32_t )(tx->tx_prod & 0xffff) & 0xff00) << 8 | ((__uint32_t )(tx->tx_prod & 0xffff) & 0xff0000) >> 8 | ( (__uint32_t)(tx->tx_prod & 0xffff) & 0xff000000) >> 24) : __swap32md(tx->tx_prod & 0xffff))) |
7832 | tx->tx_prod & MCX_WQ_DOORBELL_MASK)(*(__uint32_t *)(((void *)((&sc->sc_doorbell_mem)-> mxm_kva + (tx->tx_doorbell)))) = (__uint32_t)(__builtin_constant_p (tx->tx_prod & 0xffff) ? (__uint32_t)(((__uint32_t)(tx ->tx_prod & 0xffff) & 0xff) << 24 | ((__uint32_t )(tx->tx_prod & 0xffff) & 0xff00) << 8 | ((__uint32_t )(tx->tx_prod & 0xffff) & 0xff0000) >> 8 | ( (__uint32_t)(tx->tx_prod & 0xffff) & 0xff000000) >> 24) : __swap32md(tx->tx_prod & 0xffff))); |
7833 | bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (tx->tx_doorbell), ( sizeof(uint32_t)), (0x04)) |
7834 | tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((& sc->sc_doorbell_mem)->mxm_map)), (tx->tx_doorbell), ( sizeof(uint32_t)), (0x04)); |
7835 | |
7836 | /* |
7837 | * write the first 64 bits of the last sqe we produced |
7838 | * to the blue flame buffer |
7839 | */ |
7840 | |
7841 | blueflame = bf_base + tx->tx_bf_offset; |
7842 | bus_space_write_raw_8(sc->sc_memt, sc->sc_memh,((sc->sc_memt)->write_8((sc->sc_memh), (blueflame), ( *bf))) |
7843 | blueflame, *bf)((sc->sc_memt)->write_8((sc->sc_memh), (blueflame), ( *bf))); |
7844 | mcx_bar(sc, blueflame, sizeof(*bf), BUS_SPACE_BARRIER_WRITE0x02); |
7845 | |
7846 | /* next write goes to the other buffer */ |
7847 | tx->tx_bf_offset ^= sc->sc_bf_size; |
7848 | } |
7849 | } |
7850 | |
7851 | static void |
7852 | mcx_watchdog(struct ifnet *ifp) |
7853 | { |
7854 | } |
7855 | |
7856 | static void |
7857 | mcx_media_add_types(struct mcx_softc *sc) |
7858 | { |
7859 | struct mcx_reg_ptys ptys; |
7860 | int i; |
7861 | uint32_t proto_cap; |
7862 | |
7863 | memset(&ptys, 0, sizeof(ptys))__builtin_memset((&ptys), (0), (sizeof(ptys))); |
7864 | ptys.rp_local_port = 1; |
7865 | ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH(1 << 2); |
7866 | if (mcx_access_hca_reg(sc, MCX_REG_PTYS0x5004, MCX_REG_OP_READ1, &ptys, |
7867 | sizeof(ptys)) != 0) { |
7868 | printf("%s: unable to read port type/speed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
7869 | return; |
7870 | } |
7871 | |
7872 | proto_cap = betoh32(ptys.rp_eth_proto_cap)(__uint32_t)(__builtin_constant_p(ptys.rp_eth_proto_cap) ? (__uint32_t )(((__uint32_t)(ptys.rp_eth_proto_cap) & 0xff) << 24 | ((__uint32_t)(ptys.rp_eth_proto_cap) & 0xff00) << 8 | ((__uint32_t)(ptys.rp_eth_proto_cap) & 0xff0000) >> 8 | ((__uint32_t)(ptys.rp_eth_proto_cap) & 0xff000000) >> 24) : __swap32md(ptys.rp_eth_proto_cap)); |
7873 | for (i = 0; i < nitems(mcx_eth_cap_map)(sizeof((mcx_eth_cap_map)) / sizeof((mcx_eth_cap_map)[0])); i++) { |
7874 | const struct mcx_eth_proto_capability *cap; |
7875 | if (!ISSET(proto_cap, 1 << i)((proto_cap) & (1 << i))) |
7876 | continue; |
7877 | |
7878 | cap = &mcx_eth_cap_map[i]; |
7879 | if (cap->cap_media == 0) |
7880 | continue; |
7881 | |
7882 | ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | cap->cap_media, 0, NULL((void *)0)); |
7883 | } |
7884 | } |
7885 | |
7886 | static void |
7887 | mcx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) |
7888 | { |
7889 | struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc; |
7890 | struct mcx_reg_ptys ptys; |
7891 | int i; |
7892 | uint32_t proto_oper; |
7893 | uint64_t media_oper; |
7894 | |
7895 | memset(&ptys, 0, sizeof(ptys))__builtin_memset((&ptys), (0), (sizeof(ptys))); |
7896 | ptys.rp_local_port = 1; |
7897 | ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH(1 << 2); |
7898 | |
7899 | if (mcx_access_hca_reg(sc, MCX_REG_PTYS0x5004, MCX_REG_OP_READ1, &ptys, |
7900 | sizeof(ptys)) != 0) { |
7901 | printf("%s: unable to read port type/speed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
7902 | return; |
7903 | } |
7904 | |
7905 | proto_oper = betoh32(ptys.rp_eth_proto_oper)(__uint32_t)(__builtin_constant_p(ptys.rp_eth_proto_oper) ? ( __uint32_t)(((__uint32_t)(ptys.rp_eth_proto_oper) & 0xff) << 24 | ((__uint32_t)(ptys.rp_eth_proto_oper) & 0xff00 ) << 8 | ((__uint32_t)(ptys.rp_eth_proto_oper) & 0xff0000 ) >> 8 | ((__uint32_t)(ptys.rp_eth_proto_oper) & 0xff000000 ) >> 24) : __swap32md(ptys.rp_eth_proto_oper)); |
7906 | |
7907 | media_oper = 0; |
7908 | |
7909 | for (i = 0; i < nitems(mcx_eth_cap_map)(sizeof((mcx_eth_cap_map)) / sizeof((mcx_eth_cap_map)[0])); i++) { |
7910 | const struct mcx_eth_proto_capability *cap; |
7911 | if (!ISSET(proto_oper, 1 << i)((proto_oper) & (1 << i))) |
7912 | continue; |
7913 | |
7914 | cap = &mcx_eth_cap_map[i]; |
7915 | |
7916 | if (cap->cap_media != 0) |
7917 | media_oper = cap->cap_media; |
7918 | } |
7919 | |
7920 | ifmr->ifm_status = IFM_AVALID0x0000000000000001ULL; |
7921 | if (proto_oper != 0) { |
7922 | ifmr->ifm_status |= IFM_ACTIVE0x0000000000000002ULL; |
7923 | ifmr->ifm_active = IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL | media_oper; |
7924 | /* txpause, rxpause, duplex? */ |
7925 | } |
7926 | } |
7927 | |
7928 | static int |
7929 | mcx_media_change(struct ifnet *ifp) |
7930 | { |
7931 | struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc; |
7932 | struct mcx_reg_ptys ptys; |
7933 | struct mcx_reg_paos paos; |
7934 | uint32_t media; |
7935 | int i, error; |
7936 | |
7937 | if (IFM_TYPE(sc->sc_media.ifm_media)((sc->sc_media.ifm_media) & 0x000000000000ff00ULL) != IFM_ETHER0x0000000000000100ULL) |
7938 | return EINVAL22; |
7939 | |
7940 | error = 0; |
7941 | |
7942 | if (IFM_SUBTYPE(sc->sc_media.ifm_media)((sc->sc_media.ifm_media) & 0x00000000000000ffULL) == IFM_AUTO0ULL) { |
7943 | /* read ptys to get supported media */ |
7944 | memset(&ptys, 0, sizeof(ptys))__builtin_memset((&ptys), (0), (sizeof(ptys))); |
7945 | ptys.rp_local_port = 1; |
7946 | ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH(1 << 2); |
7947 | if (mcx_access_hca_reg(sc, MCX_REG_PTYS0x5004, MCX_REG_OP_READ1, |
7948 | &ptys, sizeof(ptys)) != 0) { |
7949 | printf("%s: unable to read port type/speed\n", |
7950 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
7951 | return EIO5; |
7952 | } |
7953 | |
7954 | media = betoh32(ptys.rp_eth_proto_cap)(__uint32_t)(__builtin_constant_p(ptys.rp_eth_proto_cap) ? (__uint32_t )(((__uint32_t)(ptys.rp_eth_proto_cap) & 0xff) << 24 | ((__uint32_t)(ptys.rp_eth_proto_cap) & 0xff00) << 8 | ((__uint32_t)(ptys.rp_eth_proto_cap) & 0xff0000) >> 8 | ((__uint32_t)(ptys.rp_eth_proto_cap) & 0xff000000) >> 24) : __swap32md(ptys.rp_eth_proto_cap)); |
7955 | } else { |
7956 | /* map media type */ |
7957 | media = 0; |
7958 | for (i = 0; i < nitems(mcx_eth_cap_map)(sizeof((mcx_eth_cap_map)) / sizeof((mcx_eth_cap_map)[0])); i++) { |
7959 | const struct mcx_eth_proto_capability *cap; |
7960 | |
7961 | cap = &mcx_eth_cap_map[i]; |
7962 | if (cap->cap_media == |
7963 | IFM_SUBTYPE(sc->sc_media.ifm_media)((sc->sc_media.ifm_media) & 0x00000000000000ffULL)) { |
7964 | media = (1 << i); |
7965 | break; |
7966 | } |
7967 | } |
7968 | } |
7969 | |
7970 | /* disable the port */ |
7971 | memset(&paos, 0, sizeof(paos))__builtin_memset((&paos), (0), (sizeof(paos))); |
7972 | paos.rp_local_port = 1; |
7973 | paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_DOWN2; |
7974 | paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN(1 << 7); |
7975 | if (mcx_access_hca_reg(sc, MCX_REG_PAOS0x5006, MCX_REG_OP_WRITE0, &paos, |
7976 | sizeof(paos)) != 0) { |
7977 | printf("%s: unable to set port state to down\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
7978 | return EIO5; |
7979 | } |
7980 | |
7981 | memset(&ptys, 0, sizeof(ptys))__builtin_memset((&ptys), (0), (sizeof(ptys))); |
7982 | ptys.rp_local_port = 1; |
7983 | ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH(1 << 2); |
7984 | ptys.rp_eth_proto_admin = htobe32(media)(__uint32_t)(__builtin_constant_p(media) ? (__uint32_t)(((__uint32_t )(media) & 0xff) << 24 | ((__uint32_t)(media) & 0xff00) << 8 | ((__uint32_t)(media) & 0xff0000) >> 8 | ((__uint32_t)(media) & 0xff000000) >> 24) : __swap32md (media)); |
7985 | if (mcx_access_hca_reg(sc, MCX_REG_PTYS0x5004, MCX_REG_OP_WRITE0, &ptys, |
7986 | sizeof(ptys)) != 0) { |
7987 | printf("%s: unable to set port media type/speed\n", |
7988 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
7989 | error = EIO5; |
7990 | } |
7991 | |
7992 | /* re-enable the port to start negotiation */ |
7993 | memset(&paos, 0, sizeof(paos))__builtin_memset((&paos), (0), (sizeof(paos))); |
7994 | paos.rp_local_port = 1; |
7995 | paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_UP1; |
7996 | paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN(1 << 7); |
7997 | if (mcx_access_hca_reg(sc, MCX_REG_PAOS0x5006, MCX_REG_OP_WRITE0, &paos, |
7998 | sizeof(paos)) != 0) { |
7999 | printf("%s: unable to set port state to up\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
8000 | error = EIO5; |
8001 | } |
8002 | |
8003 | return error; |
8004 | } |
8005 | |
8006 | static void |
8007 | mcx_port_change(void *xsc) |
8008 | { |
8009 | struct mcx_softc *sc = xsc; |
8010 | struct ifnet *ifp = &sc->sc_ac.ac_if; |
8011 | struct mcx_reg_ptys ptys = { |
8012 | .rp_local_port = 1, |
8013 | .rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH(1 << 2), |
8014 | }; |
8015 | int link_state = LINK_STATE_DOWN2; |
8016 | |
8017 | if (mcx_access_hca_reg(sc, MCX_REG_PTYS0x5004, MCX_REG_OP_READ1, &ptys, |
8018 | sizeof(ptys)) == 0) { |
8019 | uint32_t proto_oper = betoh32(ptys.rp_eth_proto_oper)(__uint32_t)(__builtin_constant_p(ptys.rp_eth_proto_oper) ? ( __uint32_t)(((__uint32_t)(ptys.rp_eth_proto_oper) & 0xff) << 24 | ((__uint32_t)(ptys.rp_eth_proto_oper) & 0xff00 ) << 8 | ((__uint32_t)(ptys.rp_eth_proto_oper) & 0xff0000 ) >> 8 | ((__uint32_t)(ptys.rp_eth_proto_oper) & 0xff000000 ) >> 24) : __swap32md(ptys.rp_eth_proto_oper)); |
8020 | uint64_t baudrate = 0; |
8021 | unsigned int i; |
8022 | |
8023 | if (proto_oper != 0) |
8024 | link_state = LINK_STATE_FULL_DUPLEX6; |
8025 | |
8026 | for (i = 0; i < nitems(mcx_eth_cap_map)(sizeof((mcx_eth_cap_map)) / sizeof((mcx_eth_cap_map)[0])); i++) { |
8027 | const struct mcx_eth_proto_capability *cap; |
8028 | if (!ISSET(proto_oper, 1 << i)((proto_oper) & (1 << i))) |
8029 | continue; |
8030 | |
8031 | cap = &mcx_eth_cap_map[i]; |
8032 | if (cap->cap_baudrate == 0) |
8033 | continue; |
8034 | |
8035 | baudrate = cap->cap_baudrate; |
8036 | break; |
8037 | } |
8038 | |
8039 | ifp->if_baudrateif_data.ifi_baudrate = baudrate; |
8040 | } |
8041 | |
8042 | if (link_state != ifp->if_link_stateif_data.ifi_link_state) { |
8043 | ifp->if_link_stateif_data.ifi_link_state = link_state; |
8044 | if_link_state_change(ifp); |
8045 | } |
8046 | } |
8047 | |
8048 | static inline uint32_t |
8049 | mcx_rd(struct mcx_softc *sc, bus_size_t r) |
8050 | { |
8051 | uint32_t word; |
8052 | |
8053 | word = bus_space_read_raw_4(sc->sc_memt, sc->sc_memh, r)((sc->sc_memt)->read_4((sc->sc_memh), (r))); |
8054 | |
8055 | return (betoh32(word)(__uint32_t)(__builtin_constant_p(word) ? (__uint32_t)(((__uint32_t )(word) & 0xff) << 24 | ((__uint32_t)(word) & 0xff00 ) << 8 | ((__uint32_t)(word) & 0xff0000) >> 8 | ((__uint32_t)(word) & 0xff000000) >> 24) : __swap32md (word))); |
8056 | } |
8057 | |
8058 | static inline void |
8059 | mcx_wr(struct mcx_softc *sc, bus_size_t r, uint32_t v) |
8060 | { |
8061 | bus_space_write_raw_4(sc->sc_memt, sc->sc_memh, r, htobe32(v))((sc->sc_memt)->write_4((sc->sc_memh), (r), ((__uint32_t )(__builtin_constant_p(v) ? (__uint32_t)(((__uint32_t)(v) & 0xff) << 24 | ((__uint32_t)(v) & 0xff00) << 8 | ((__uint32_t)(v) & 0xff0000) >> 8 | ((__uint32_t )(v) & 0xff000000) >> 24) : __swap32md(v))))); |
8062 | } |
8063 | |
8064 | static inline void |
8065 | mcx_bar(struct mcx_softc *sc, bus_size_t r, bus_size_t l, int f) |
8066 | { |
8067 | bus_space_barrier(sc->sc_memt, sc->sc_memh, r, l, f); |
8068 | } |
8069 | |
8070 | static uint64_t |
8071 | mcx_timer(struct mcx_softc *sc) |
8072 | { |
8073 | uint32_t hi, lo, ni; |
8074 | |
8075 | hi = mcx_rd(sc, MCX_INTERNAL_TIMER_H0x1000); |
8076 | for (;;) { |
8077 | lo = mcx_rd(sc, MCX_INTERNAL_TIMER_L0x1004); |
8078 | mcx_bar(sc, MCX_INTERNAL_TIMER_L0x1004, 8, BUS_SPACE_BARRIER_READ0x01); |
8079 | ni = mcx_rd(sc, MCX_INTERNAL_TIMER_H0x1000); |
8080 | |
8081 | if (ni == hi) |
8082 | break; |
8083 | |
8084 | hi = ni; |
8085 | } |
8086 | |
8087 | return (((uint64_t)hi << 32) | (uint64_t)lo); |
8088 | } |
8089 | |
8090 | static int |
8091 | mcx_dmamem_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm, |
8092 | bus_size_t size, u_int align) |
8093 | { |
8094 | mxm->mxm_size = size; |
8095 | |
8096 | if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mxm ->mxm_size), (1), (mxm->mxm_size), (0), (0x0000 | 0x0002 | 0x2000), (&mxm->mxm_map)) |
8097 | mxm->mxm_size, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mxm ->mxm_size), (1), (mxm->mxm_size), (0), (0x0000 | 0x0002 | 0x2000), (&mxm->mxm_map)) |
8098 | BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mxm ->mxm_size), (1), (mxm->mxm_size), (0), (0x0000 | 0x0002 | 0x2000), (&mxm->mxm_map)) |
8099 | &mxm->mxm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mxm ->mxm_size), (1), (mxm->mxm_size), (0), (0x0000 | 0x0002 | 0x2000), (&mxm->mxm_map)) != 0) |
8100 | return (1); |
8101 | if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (mxm-> mxm_size), (align), (0), (&mxm->mxm_seg), (1), (&mxm ->mxm_nsegs), (0x0000 | 0x1000)) |
8102 | align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (mxm-> mxm_size), (align), (0), (&mxm->mxm_seg), (1), (&mxm ->mxm_nsegs), (0x0000 | 0x1000)) |
8103 | BUS_DMA_WAITOK | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (mxm-> mxm_size), (align), (0), (&mxm->mxm_seg), (1), (&mxm ->mxm_nsegs), (0x0000 | 0x1000)) != 0) |
8104 | goto destroy; |
8105 | if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&mxm ->mxm_seg), (mxm->mxm_nsegs), (mxm->mxm_size), (& mxm->mxm_kva), (0x0000)) |
8106 | mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&mxm ->mxm_seg), (mxm->mxm_nsegs), (mxm->mxm_size), (& mxm->mxm_kva), (0x0000)) != 0) |
8107 | goto free; |
8108 | if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (mxm-> mxm_map), (mxm->mxm_kva), (mxm->mxm_size), (((void *)0) ), (0x0000)) |
8109 | mxm->mxm_size, NULL, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (mxm-> mxm_map), (mxm->mxm_kva), (mxm->mxm_size), (((void *)0) ), (0x0000)) != 0) |
8110 | goto unmap; |
8111 | |
8112 | return (0); |
8113 | unmap: |
8114 | bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (mxm-> mxm_kva), (mxm->mxm_size)); |
8115 | free: |
8116 | bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& mxm->mxm_seg), (1)); |
8117 | destroy: |
8118 | bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (mxm ->mxm_map)); |
8119 | return (1); |
8120 | } |
8121 | |
8122 | static void |
8123 | mcx_dmamem_zero(struct mcx_dmamem *mxm) |
8124 | { |
8125 | memset(MCX_DMA_KVA(mxm), 0, MCX_DMA_LEN(mxm))__builtin_memset((((void *)(mxm)->mxm_kva)), (0), (((mxm)-> mxm_size))); |
8126 | } |
8127 | |
8128 | static void |
8129 | mcx_dmamem_free(struct mcx_softc *sc, struct mcx_dmamem *mxm) |
8130 | { |
8131 | bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (mxm ->mxm_map)); |
8132 | bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (mxm-> mxm_kva), (mxm->mxm_size)); |
8133 | bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& mxm->mxm_seg), (1)); |
8134 | bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (mxm ->mxm_map)); |
8135 | } |
8136 | |
8137 | static int |
8138 | mcx_hwmem_alloc(struct mcx_softc *sc, struct mcx_hwmem *mhm, unsigned int pages) |
8139 | { |
8140 | bus_dma_segment_t *segs; |
8141 | bus_size_t len = pages * MCX_PAGE_SIZE(1 << 12); |
8142 | size_t seglen; |
8143 | |
8144 | segs = mallocarray(sizeof(*segs), pages, M_DEVBUF2, M_WAITOK0x0001|M_CANFAIL0x0004); |
8145 | if (segs == NULL((void *)0)) |
8146 | return (-1); |
8147 | |
8148 | seglen = sizeof(*segs) * pages; |
8149 | |
8150 | if (bus_dmamem_alloc(sc->sc_dmat, len, MCX_PAGE_SIZE, 0,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (len) , ((1 << 12)), (0), (segs), (pages), (&mhm->mhm_seg_count ), (0x0001)) |
8151 | segs, pages, &mhm->mhm_seg_count, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (len) , ((1 << 12)), (0), (segs), (pages), (&mhm->mhm_seg_count ), (0x0001)) != 0) |
8152 | goto free_segs; |
8153 | |
8154 | if (mhm->mhm_seg_count < pages) { |
8155 | size_t nseglen; |
8156 | |
8157 | mhm->mhm_segs = mallocarray(sizeof(*mhm->mhm_segs), |
8158 | mhm->mhm_seg_count, M_DEVBUF2, M_WAITOK0x0001|M_CANFAIL0x0004); |
8159 | if (mhm->mhm_segs == NULL((void *)0)) |
8160 | goto free_dmamem; |
8161 | |
8162 | nseglen = sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count; |
8163 | |
8164 | memcpy(mhm->mhm_segs, segs, nseglen)__builtin_memcpy((mhm->mhm_segs), (segs), (nseglen)); |
8165 | |
8166 | free(segs, M_DEVBUF2, seglen); |
8167 | |
8168 | segs = mhm->mhm_segs; |
8169 | seglen = nseglen; |
8170 | } else |
8171 | mhm->mhm_segs = segs; |
8172 | |
8173 | if (bus_dmamap_create(sc->sc_dmat, len, pages, MCX_PAGE_SIZE,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (len ), (pages), ((1 << 12)), ((1 << 12)), (0x0001|0x0002 ), (&mhm->mhm_map)) |
8174 | MCX_PAGE_SIZE, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW /*|BUS_DMA_64BIT*/,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (len ), (pages), ((1 << 12)), ((1 << 12)), (0x0001|0x0002 ), (&mhm->mhm_map)) |
8175 | &mhm->mhm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (len ), (pages), ((1 << 12)), ((1 << 12)), (0x0001|0x0002 ), (&mhm->mhm_map)) != 0) |
8176 | goto free_dmamem; |
8177 | |
8178 | if (bus_dmamap_load_raw(sc->sc_dmat, mhm->mhm_map,(*(sc->sc_dmat)->_dmamap_load_raw)((sc->sc_dmat), (mhm ->mhm_map), (mhm->mhm_segs), (mhm->mhm_seg_count), ( len), (0x0001)) |
8179 | mhm->mhm_segs, mhm->mhm_seg_count, len, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_raw)((sc->sc_dmat), (mhm ->mhm_map), (mhm->mhm_segs), (mhm->mhm_seg_count), ( len), (0x0001)) != 0) |
8180 | goto destroy; |
8181 | |
8182 | bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (mhm-> mhm_map), (0), (mhm->mhm_map->dm_mapsize), ((0x01|0x04) )) |
8183 | 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_PRERW)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (mhm-> mhm_map), (0), (mhm->mhm_map->dm_mapsize), ((0x01|0x04) )); |
8184 | |
8185 | mhm->mhm_npages = pages; |
8186 | |
8187 | return (0); |
8188 | |
8189 | destroy: |
8190 | bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (mhm ->mhm_map)); |
8191 | free_dmamem: |
8192 | bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (mhm-> mhm_segs), (mhm->mhm_seg_count)); |
8193 | free_segs: |
8194 | free(segs, M_DEVBUF2, seglen); |
8195 | mhm->mhm_segs = NULL((void *)0); |
8196 | |
8197 | return (-1); |
8198 | } |
8199 | |
8200 | static void |
8201 | mcx_hwmem_free(struct mcx_softc *sc, struct mcx_hwmem *mhm) |
8202 | { |
8203 | if (mhm->mhm_npages == 0) |
8204 | return; |
8205 | |
8206 | bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (mhm-> mhm_map), (0), (mhm->mhm_map->dm_mapsize), ((0x02|0x08) )) |
8207 | 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_POSTRW)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (mhm-> mhm_map), (0), (mhm->mhm_map->dm_mapsize), ((0x02|0x08) )); |
8208 | |
8209 | bus_dmamap_unload(sc->sc_dmat, mhm->mhm_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (mhm ->mhm_map)); |
8210 | bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (mhm ->mhm_map)); |
8211 | bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (mhm-> mhm_segs), (mhm->mhm_seg_count)); |
8212 | free(mhm->mhm_segs, M_DEVBUF2, |
8213 | sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count); |
8214 | |
8215 | mhm->mhm_npages = 0; |
8216 | } |
8217 | |
8218 | #if NKSTAT0 > 0 |
8219 | struct mcx_ppcnt { |
8220 | char name[KSTAT_KV_NAMELEN]; |
8221 | enum kstat_kv_unit unit; |
8222 | }; |
8223 | |
8224 | static const struct mcx_ppcnt mcx_ppcnt_ieee8023_tpl[] = { |
8225 | { "Good Tx", KSTAT_KV_U_PACKETS, }, |
8226 | { "Good Rx", KSTAT_KV_U_PACKETS, }, |
8227 | { "FCS errs", KSTAT_KV_U_PACKETS, }, |
8228 | { "Alignment Errs", KSTAT_KV_U_PACKETS, }, |
8229 | { "Good Tx", KSTAT_KV_U_BYTES, }, |
8230 | { "Good Rx", KSTAT_KV_U_BYTES, }, |
8231 | { "Multicast Tx", KSTAT_KV_U_PACKETS, }, |
8232 | { "Broadcast Tx", KSTAT_KV_U_PACKETS, }, |
8233 | { "Multicast Rx", KSTAT_KV_U_PACKETS, }, |
8234 | { "Broadcast Rx", KSTAT_KV_U_PACKETS, }, |
8235 | { "In Range Len", KSTAT_KV_U_PACKETS, }, |
8236 | { "Out Of Range Len", KSTAT_KV_U_PACKETS, }, |
8237 | { "Frame Too Long", KSTAT_KV_U_PACKETS, }, |
8238 | { "Symbol Errs", KSTAT_KV_U_PACKETS, }, |
8239 | { "MAC Ctrl Tx", KSTAT_KV_U_PACKETS, }, |
8240 | { "MAC Ctrl Rx", KSTAT_KV_U_PACKETS, }, |
8241 | { "MAC Ctrl Unsup", KSTAT_KV_U_PACKETS, }, |
8242 | { "Pause Rx", KSTAT_KV_U_PACKETS, }, |
8243 | { "Pause Tx", KSTAT_KV_U_PACKETS, }, |
8244 | }; |
8245 | CTASSERT(nitems(mcx_ppcnt_ieee8023_tpl) == mcx_ppcnt_ieee8023_count)extern char _ctassert[((sizeof((mcx_ppcnt_ieee8023_tpl)) / sizeof ((mcx_ppcnt_ieee8023_tpl)[0])) == mcx_ppcnt_ieee8023_count) ? 1 : -1 ] __attribute__((__unused__)); |
8246 | |
8247 | static const struct mcx_ppcnt mcx_ppcnt_rfc2863_tpl[] = { |
8248 | { "Rx Bytes", KSTAT_KV_U_BYTES, }, |
8249 | { "Rx Unicast", KSTAT_KV_U_PACKETS, }, |
8250 | { "Rx Discards", KSTAT_KV_U_PACKETS, }, |
8251 | { "Rx Errors", KSTAT_KV_U_PACKETS, }, |
8252 | { "Rx Unknown Proto", KSTAT_KV_U_PACKETS, }, |
8253 | { "Tx Bytes", KSTAT_KV_U_BYTES, }, |
8254 | { "Tx Unicast", KSTAT_KV_U_PACKETS, }, |
8255 | { "Tx Discards", KSTAT_KV_U_PACKETS, }, |
8256 | { "Tx Errors", KSTAT_KV_U_PACKETS, }, |
8257 | { "Rx Multicast", KSTAT_KV_U_PACKETS, }, |
8258 | { "Rx Broadcast", KSTAT_KV_U_PACKETS, }, |
8259 | { "Tx Multicast", KSTAT_KV_U_PACKETS, }, |
8260 | { "Tx Broadcast", KSTAT_KV_U_PACKETS, }, |
8261 | }; |
8262 | CTASSERT(nitems(mcx_ppcnt_rfc2863_tpl) == mcx_ppcnt_rfc2863_count)extern char _ctassert[((sizeof((mcx_ppcnt_rfc2863_tpl)) / sizeof ((mcx_ppcnt_rfc2863_tpl)[0])) == mcx_ppcnt_rfc2863_count) ? 1 : -1 ] __attribute__((__unused__)); |
8263 | |
8264 | static const struct mcx_ppcnt mcx_ppcnt_rfc2819_tpl[] = { |
8265 | { "Drop Events", KSTAT_KV_U_PACKETS, }, |
8266 | { "Octets", KSTAT_KV_U_BYTES, }, |
8267 | { "Packets", KSTAT_KV_U_PACKETS, }, |
8268 | { "Broadcasts", KSTAT_KV_U_PACKETS, }, |
8269 | { "Multicasts", KSTAT_KV_U_PACKETS, }, |
8270 | { "CRC Align Errs", KSTAT_KV_U_PACKETS, }, |
8271 | { "Undersize", KSTAT_KV_U_PACKETS, }, |
8272 | { "Oversize", KSTAT_KV_U_PACKETS, }, |
8273 | { "Fragments", KSTAT_KV_U_PACKETS, }, |
8274 | { "Jabbers", KSTAT_KV_U_PACKETS, }, |
8275 | { "Collisions", KSTAT_KV_U_NONE, }, |
8276 | { "64B", KSTAT_KV_U_PACKETS, }, |
8277 | { "65-127B", KSTAT_KV_U_PACKETS, }, |
8278 | { "128-255B", KSTAT_KV_U_PACKETS, }, |
8279 | { "256-511B", KSTAT_KV_U_PACKETS, }, |
8280 | { "512-1023B", KSTAT_KV_U_PACKETS, }, |
8281 | { "1024-1518B", KSTAT_KV_U_PACKETS, }, |
8282 | { "1519-2047B", KSTAT_KV_U_PACKETS, }, |
8283 | { "2048-4095B", KSTAT_KV_U_PACKETS, }, |
8284 | { "4096-8191B", KSTAT_KV_U_PACKETS, }, |
8285 | { "8192-10239B", KSTAT_KV_U_PACKETS, }, |
8286 | }; |
8287 | CTASSERT(nitems(mcx_ppcnt_rfc2819_tpl) == mcx_ppcnt_rfc2819_count)extern char _ctassert[((sizeof((mcx_ppcnt_rfc2819_tpl)) / sizeof ((mcx_ppcnt_rfc2819_tpl)[0])) == mcx_ppcnt_rfc2819_count) ? 1 : -1 ] __attribute__((__unused__)); |
8288 | |
8289 | static const struct mcx_ppcnt mcx_ppcnt_rfc3635_tpl[] = { |
8290 | { "Alignment Errs", KSTAT_KV_U_PACKETS, }, |
8291 | { "FCS Errs", KSTAT_KV_U_PACKETS, }, |
8292 | { "Single Colls", KSTAT_KV_U_PACKETS, }, |
8293 | { "Multiple Colls", KSTAT_KV_U_PACKETS, }, |
8294 | { "SQE Test Errs", KSTAT_KV_U_NONE, }, |
8295 | { "Deferred Tx", KSTAT_KV_U_PACKETS, }, |
8296 | { "Late Colls", KSTAT_KV_U_NONE, }, |
8297 | { "Exess Colls", KSTAT_KV_U_NONE, }, |
8298 | { "Int MAC Tx Errs", KSTAT_KV_U_PACKETS, }, |
8299 | { "CSM Sense Errs", KSTAT_KV_U_NONE, }, |
8300 | { "Too Long", KSTAT_KV_U_PACKETS, }, |
8301 | { "Int MAC Rx Errs", KSTAT_KV_U_PACKETS, }, |
8302 | { "Symbol Errs", KSTAT_KV_U_NONE, }, |
8303 | { "Unknown Control", KSTAT_KV_U_PACKETS, }, |
8304 | { "Pause Rx", KSTAT_KV_U_PACKETS, }, |
8305 | { "Pause Tx", KSTAT_KV_U_PACKETS, }, |
8306 | }; |
8307 | CTASSERT(nitems(mcx_ppcnt_rfc3635_tpl) == mcx_ppcnt_rfc3635_count)extern char _ctassert[((sizeof((mcx_ppcnt_rfc3635_tpl)) / sizeof ((mcx_ppcnt_rfc3635_tpl)[0])) == mcx_ppcnt_rfc3635_count) ? 1 : -1 ] __attribute__((__unused__)); |
8308 | |
8309 | struct mcx_kstat_ppcnt { |
8310 | const char *ksp_name; |
8311 | const struct mcx_ppcnt *ksp_tpl; |
8312 | unsigned int ksp_n; |
8313 | uint8_t ksp_grp; |
8314 | }; |
8315 | |
8316 | static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_ieee8023 = { |
8317 | .ksp_name = "ieee802.3", |
8318 | .ksp_tpl = mcx_ppcnt_ieee8023_tpl, |
8319 | .ksp_n = nitems(mcx_ppcnt_ieee8023_tpl)(sizeof((mcx_ppcnt_ieee8023_tpl)) / sizeof((mcx_ppcnt_ieee8023_tpl )[0])), |
8320 | .ksp_grp = MCX_REG_PPCNT_GRP_IEEE80230x00, |
8321 | }; |
8322 | |
8323 | static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2863 = { |
8324 | .ksp_name = "rfc2863", |
8325 | .ksp_tpl = mcx_ppcnt_rfc2863_tpl, |
8326 | .ksp_n = nitems(mcx_ppcnt_rfc2863_tpl)(sizeof((mcx_ppcnt_rfc2863_tpl)) / sizeof((mcx_ppcnt_rfc2863_tpl )[0])), |
8327 | .ksp_grp = MCX_REG_PPCNT_GRP_RFC28630x01, |
8328 | }; |
8329 | |
8330 | static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2819 = { |
8331 | .ksp_name = "rfc2819", |
8332 | .ksp_tpl = mcx_ppcnt_rfc2819_tpl, |
8333 | .ksp_n = nitems(mcx_ppcnt_rfc2819_tpl)(sizeof((mcx_ppcnt_rfc2819_tpl)) / sizeof((mcx_ppcnt_rfc2819_tpl )[0])), |
8334 | .ksp_grp = MCX_REG_PPCNT_GRP_RFC28190x02, |
8335 | }; |
8336 | |
8337 | static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc3635 = { |
8338 | .ksp_name = "rfc3635", |
8339 | .ksp_tpl = mcx_ppcnt_rfc3635_tpl, |
8340 | .ksp_n = nitems(mcx_ppcnt_rfc3635_tpl)(sizeof((mcx_ppcnt_rfc3635_tpl)) / sizeof((mcx_ppcnt_rfc3635_tpl )[0])), |
8341 | .ksp_grp = MCX_REG_PPCNT_GRP_RFC36350x03, |
8342 | }; |
8343 | |
8344 | static int mcx_kstat_ppcnt_read(struct kstat *); |
8345 | |
8346 | static void mcx_kstat_attach_tmps(struct mcx_softc *sc); |
8347 | static void mcx_kstat_attach_queues(struct mcx_softc *sc); |
8348 | |
8349 | static struct kstat * |
8350 | mcx_kstat_attach_ppcnt(struct mcx_softc *sc, |
8351 | const struct mcx_kstat_ppcnt *ksp) |
8352 | { |
8353 | struct kstat *ks; |
8354 | struct kstat_kv *kvs; |
8355 | unsigned int i; |
8356 | |
8357 | ks = kstat_create(DEVNAME(sc)((sc)->sc_dev.dv_xname), 0, ksp->ksp_name, 0, KSTAT_T_KV, 0); |
8358 | if (ks == NULL((void *)0)) |
8359 | return (NULL((void *)0)); |
8360 | |
8361 | kvs = mallocarray(ksp->ksp_n, sizeof(*kvs), |
8362 | M_DEVBUF2, M_WAITOK0x0001); |
8363 | |
8364 | for (i = 0; i < ksp->ksp_n; i++) { |
8365 | const struct mcx_ppcnt *tpl = &ksp->ksp_tpl[i]; |
8366 | |
8367 | kstat_kv_unit_init(&kvs[i], tpl->name, |
8368 | KSTAT_KV_T_COUNTER64, tpl->unit); |
8369 | } |
8370 | |
8371 | ks->ks_softc = sc; |
8372 | ks->ks_ptr = (void *)ksp; |
8373 | ks->ks_data = kvs; |
8374 | ks->ks_datalen = ksp->ksp_n * sizeof(*kvs); |
8375 | ks->ks_read = mcx_kstat_ppcnt_read; |
8376 | |
8377 | kstat_install(ks); |
8378 | |
8379 | return (ks); |
8380 | } |
8381 | |
8382 | static void |
8383 | mcx_kstat_attach(struct mcx_softc *sc) |
8384 | { |
8385 | sc->sc_kstat_ieee8023 = mcx_kstat_attach_ppcnt(sc, |
8386 | &mcx_kstat_ppcnt_ieee8023); |
8387 | sc->sc_kstat_rfc2863 = mcx_kstat_attach_ppcnt(sc, |
8388 | &mcx_kstat_ppcnt_rfc2863); |
8389 | sc->sc_kstat_rfc2819 = mcx_kstat_attach_ppcnt(sc, |
8390 | &mcx_kstat_ppcnt_rfc2819); |
8391 | sc->sc_kstat_rfc3635 = mcx_kstat_attach_ppcnt(sc, |
8392 | &mcx_kstat_ppcnt_rfc3635); |
8393 | |
8394 | mcx_kstat_attach_tmps(sc); |
8395 | mcx_kstat_attach_queues(sc); |
8396 | } |
8397 | |
8398 | static int |
8399 | mcx_kstat_ppcnt_read(struct kstat *ks) |
8400 | { |
8401 | struct mcx_softc *sc = ks->ks_softc; |
8402 | struct mcx_kstat_ppcnt *ksp = ks->ks_ptr; |
8403 | struct mcx_reg_ppcnt ppcnt = { |
8404 | .ppcnt_grp = ksp->ksp_grp, |
8405 | .ppcnt_local_port = 1, |
8406 | }; |
8407 | struct kstat_kv *kvs = ks->ks_data; |
8408 | uint64_t *vs = (uint64_t *)&ppcnt.ppcnt_counter_set; |
8409 | unsigned int i; |
8410 | int rv; |
8411 | |
8412 | KERNEL_LOCK()_kernel_lock(); /* XXX */ |
8413 | rv = mcx_access_hca_reg(sc, MCX_REG_PPCNT0x5008, MCX_REG_OP_READ1, |
8414 | &ppcnt, sizeof(ppcnt)); |
8415 | KERNEL_UNLOCK()_kernel_unlock(); |
8416 | if (rv != 0) |
8417 | return (EIO5); |
8418 | |
8419 | nanouptime(&ks->ks_updated); |
8420 | |
8421 | for (i = 0; i < ksp->ksp_n; i++) |
8422 | kstat_kv_u64(&kvs[i]) = bemtoh64(&vs[i])(__uint64_t)(__builtin_constant_p(*(__uint64_t *)(&vs[i]) ) ? (__uint64_t)((((__uint64_t)(*(__uint64_t *)(&vs[i])) & 0xff) << 56) | ((__uint64_t)(*(__uint64_t *)(&vs[i ])) & 0xff00ULL) << 40 | ((__uint64_t)(*(__uint64_t *)(&vs[i])) & 0xff0000ULL) << 24 | ((__uint64_t )(*(__uint64_t *)(&vs[i])) & 0xff000000ULL) << 8 | ((__uint64_t)(*(__uint64_t *)(&vs[i])) & 0xff00000000ULL ) >> 8 | ((__uint64_t)(*(__uint64_t *)(&vs[i])) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(*(__uint64_t * )(&vs[i])) & 0xff000000000000ULL) >> 40 | ((__uint64_t )(*(__uint64_t *)(&vs[i])) & 0xff00000000000000ULL) >> 56) : __swap64md(*(__uint64_t *)(&vs[i]))); |
8423 | |
8424 | return (0); |
8425 | } |
8426 | |
8427 | struct mcx_kstat_mtmp { |
8428 | struct kstat_kv ktmp_name; |
8429 | struct kstat_kv ktmp_temperature; |
8430 | struct kstat_kv ktmp_threshold_lo; |
8431 | struct kstat_kv ktmp_threshold_hi; |
8432 | }; |
8433 | |
8434 | static const struct mcx_kstat_mtmp mcx_kstat_mtmp_tpl = { |
8435 | KSTAT_KV_INITIALIZER("name", KSTAT_KV_T_ISTR), |
8436 | KSTAT_KV_INITIALIZER("temperature", KSTAT_KV_T_TEMP), |
8437 | KSTAT_KV_INITIALIZER("lo threshold", KSTAT_KV_T_TEMP), |
8438 | KSTAT_KV_INITIALIZER("hi threshold", KSTAT_KV_T_TEMP), |
8439 | }; |
8440 | |
8441 | static const struct timeval mcx_kstat_mtmp_rate = { 1, 0 }; |
8442 | |
8443 | static int mcx_kstat_mtmp_read(struct kstat *); |
8444 | |
8445 | static void |
8446 | mcx_kstat_attach_tmps(struct mcx_softc *sc) |
8447 | { |
8448 | struct kstat *ks; |
8449 | struct mcx_reg_mcam mcam; |
8450 | struct mcx_reg_mtcap mtcap; |
8451 | struct mcx_kstat_mtmp *ktmp; |
8452 | uint64_t map; |
8453 | unsigned int i, n; |
8454 | |
8455 | memset(&mtcap, 0, sizeof(mtcap))__builtin_memset((&mtcap), (0), (sizeof(mtcap))); |
8456 | memset(&mcam, 0, sizeof(mcam))__builtin_memset((&mcam), (0), (sizeof(mcam))); |
8457 | |
8458 | if (sc->sc_mcam_reg == 0) { |
8459 | /* no management capabilities */ |
8460 | return; |
8461 | } |
8462 | |
8463 | if (mcx_access_hca_reg(sc, MCX_REG_MCAM0x907f, MCX_REG_OP_READ1, |
8464 | &mcam, sizeof(mcam)) != 0) { |
8465 | /* unable to check management capabilities? */ |
8466 | return; |
8467 | } |
8468 | |
8469 | if (MCX_BITFIELD_BIT(mcam.mcam_feature_cap_mask,(mcam.mcam_feature_cap_mask[(sizeof mcam.mcam_feature_cap_mask - 1) - (6 / 8)] & (6 % 8)) |
8470 | MCX_MCAM_FEATURE_CAP_SENSOR_MAP)(mcam.mcam_feature_cap_mask[(sizeof mcam.mcam_feature_cap_mask - 1) - (6 / 8)] & (6 % 8)) == 0) { |
8471 | /* no sensor map */ |
8472 | return; |
8473 | } |
8474 | |
8475 | if (mcx_access_hca_reg(sc, MCX_REG_MTCAP0x9009, MCX_REG_OP_READ1, |
8476 | &mtcap, sizeof(mtcap)) != 0) { |
8477 | /* unable to find temperature sensors */ |
8478 | return; |
8479 | } |
8480 | |
8481 | sc->sc_kstat_mtmp_count = mtcap.mtcap_sensor_count; |
8482 | sc->sc_kstat_mtmp = mallocarray(sc->sc_kstat_mtmp_count, |
8483 | sizeof(*sc->sc_kstat_mtmp), M_DEVBUF2, M_WAITOK0x0001); |
8484 | |
8485 | n = 0; |
8486 | map = bemtoh64(&mtcap.mtcap_sensor_map)(__uint64_t)(__builtin_constant_p(*(__uint64_t *)(&mtcap. mtcap_sensor_map)) ? (__uint64_t)((((__uint64_t)(*(__uint64_t *)(&mtcap.mtcap_sensor_map)) & 0xff) << 56) | ( (__uint64_t)(*(__uint64_t *)(&mtcap.mtcap_sensor_map)) & 0xff00ULL) << 40 | ((__uint64_t)(*(__uint64_t *)(& mtcap.mtcap_sensor_map)) & 0xff0000ULL) << 24 | ((__uint64_t )(*(__uint64_t *)(&mtcap.mtcap_sensor_map)) & 0xff000000ULL ) << 8 | ((__uint64_t)(*(__uint64_t *)(&mtcap.mtcap_sensor_map )) & 0xff00000000ULL) >> 8 | ((__uint64_t)(*(__uint64_t *)(&mtcap.mtcap_sensor_map)) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(*(__uint64_t *)(&mtcap.mtcap_sensor_map )) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(*(__uint64_t *)(&mtcap.mtcap_sensor_map)) & 0xff00000000000000ULL ) >> 56) : __swap64md(*(__uint64_t *)(&mtcap.mtcap_sensor_map ))); |
8487 | for (i = 0; i < sizeof(map) * NBBY8; i++) { |
8488 | if (!ISSET(map, (1ULL << i))((map) & ((1ULL << i)))) |
8489 | continue; |
8490 | |
8491 | ks = kstat_create(DEVNAME(sc)((sc)->sc_dev.dv_xname), 0, "temperature", i, |
8492 | KSTAT_T_KV, 0); |
8493 | if (ks == NULL((void *)0)) { |
8494 | /* unable to attach temperature sensor %u, i */ |
8495 | continue; |
8496 | } |
8497 | |
8498 | ktmp = malloc(sizeof(*ktmp), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008); |
8499 | *ktmp = mcx_kstat_mtmp_tpl; |
8500 | |
8501 | ks->ks_data = ktmp; |
8502 | ks->ks_datalen = sizeof(*ktmp); |
8503 | TIMEVAL_TO_TIMESPEC(&mcx_kstat_mtmp_rate, &ks->ks_interval)do { (&ks->ks_interval)->tv_sec = (&mcx_kstat_mtmp_rate )->tv_sec; (&ks->ks_interval)->tv_nsec = (&mcx_kstat_mtmp_rate )->tv_usec * 1000; } while (0); |
8504 | ks->ks_read = mcx_kstat_mtmp_read; |
8505 | |
8506 | ks->ks_softc = sc; |
8507 | kstat_install(ks); |
8508 | |
8509 | sc->sc_kstat_mtmp[n++] = ks; |
8510 | if (n >= sc->sc_kstat_mtmp_count) |
8511 | break; |
8512 | } |
8513 | } |
8514 | |
8515 | static uint64_t |
8516 | mcx_tmp_to_uK(uint16_t *t) |
8517 | { |
8518 | int64_t mt = (int16_t)bemtoh16(t)(__uint16_t)(__builtin_constant_p(*(__uint16_t *)(t)) ? (__uint16_t )(((__uint16_t)(*(__uint16_t *)(t)) & 0xffU) << 8 | ((__uint16_t)(*(__uint16_t *)(t)) & 0xff00U) >> 8) : __swap16md(*(__uint16_t *)(t))); /* 0.125 C units */ |
8519 | mt *= 1000000 / 8; /* convert to uC */ |
8520 | mt += 273150000; /* convert to uK */ |
8521 | |
8522 | return (mt); |
8523 | } |
8524 | |
8525 | static int |
8526 | mcx_kstat_mtmp_read(struct kstat *ks) |
8527 | { |
8528 | struct mcx_softc *sc = ks->ks_softc; |
8529 | struct mcx_kstat_mtmp *ktmp = ks->ks_data; |
8530 | struct mcx_reg_mtmp mtmp; |
8531 | int rv; |
8532 | struct timeval updated; |
8533 | |
8534 | TIMESPEC_TO_TIMEVAL(&updated, &ks->ks_updated)do { (&updated)->tv_sec = (&ks->ks_updated)-> tv_sec; (&updated)->tv_usec = (&ks->ks_updated) ->tv_nsec / 1000; } while (0); |
8535 | |
8536 | if (!ratecheck(&updated, &mcx_kstat_mtmp_rate)) |
8537 | return (0); |
8538 | |
8539 | memset(&mtmp, 0, sizeof(mtmp))__builtin_memset((&mtmp), (0), (sizeof(mtmp))); |
8540 | htobem16(&mtmp.mtmp_sensor_index, ks->ks_unit)(*(__uint16_t *)(&mtmp.mtmp_sensor_index) = (__uint16_t)( __builtin_constant_p(ks->ks_unit) ? (__uint16_t)(((__uint16_t )(ks->ks_unit) & 0xffU) << 8 | ((__uint16_t)(ks-> ks_unit) & 0xff00U) >> 8) : __swap16md(ks->ks_unit ))); |
8541 | |
8542 | KERNEL_LOCK()_kernel_lock(); /* XXX */ |
8543 | rv = mcx_access_hca_reg(sc, MCX_REG_MTMP0x900a, MCX_REG_OP_READ1, |
8544 | &mtmp, sizeof(mtmp)); |
8545 | KERNEL_UNLOCK()_kernel_unlock(); |
8546 | if (rv != 0) |
8547 | return (EIO5); |
8548 | |
8549 | memset(kstat_kv_istr(&ktmp->ktmp_name), 0,__builtin_memset((kstat_kv_istr(&ktmp->ktmp_name)), (0 ), (sizeof(kstat_kv_istr(&ktmp->ktmp_name)))) |
8550 | sizeof(kstat_kv_istr(&ktmp->ktmp_name)))__builtin_memset((kstat_kv_istr(&ktmp->ktmp_name)), (0 ), (sizeof(kstat_kv_istr(&ktmp->ktmp_name)))); |
8551 | memcpy(kstat_kv_istr(&ktmp->ktmp_name),__builtin_memcpy((kstat_kv_istr(&ktmp->ktmp_name)), (mtmp .mtmp_sensor_name), (sizeof(mtmp.mtmp_sensor_name))) |
8552 | mtmp.mtmp_sensor_name, sizeof(mtmp.mtmp_sensor_name))__builtin_memcpy((kstat_kv_istr(&ktmp->ktmp_name)), (mtmp .mtmp_sensor_name), (sizeof(mtmp.mtmp_sensor_name))); |
8553 | kstat_kv_temp(&ktmp->ktmp_temperature) = |
8554 | mcx_tmp_to_uK(&mtmp.mtmp_temperature); |
8555 | kstat_kv_temp(&ktmp->ktmp_threshold_lo) = |
8556 | mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_lo); |
8557 | kstat_kv_temp(&ktmp->ktmp_threshold_hi) = |
8558 | mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_hi); |
8559 | |
8560 | TIMEVAL_TO_TIMESPEC(&updated, &ks->ks_updated)do { (&ks->ks_updated)->tv_sec = (&updated)-> tv_sec; (&ks->ks_updated)->tv_nsec = (&updated) ->tv_usec * 1000; } while (0); |
8561 | |
8562 | return (0); |
8563 | } |
8564 | |
8565 | struct mcx_queuestat { |
8566 | char name[KSTAT_KV_NAMELEN]; |
8567 | enum kstat_kv_type type; |
8568 | }; |
8569 | |
8570 | static const struct mcx_queuestat mcx_queue_kstat_tpl[] = { |
8571 | { "RQ SW prod", KSTAT_KV_T_COUNTER64 }, |
8572 | { "RQ HW prod", KSTAT_KV_T_COUNTER64 }, |
8573 | { "RQ HW cons", KSTAT_KV_T_COUNTER64 }, |
8574 | { "RQ HW state", KSTAT_KV_T_ISTR }, |
8575 | |
8576 | { "SQ SW prod", KSTAT_KV_T_COUNTER64 }, |
8577 | { "SQ SW cons", KSTAT_KV_T_COUNTER64 }, |
8578 | { "SQ HW prod", KSTAT_KV_T_COUNTER64 }, |
8579 | { "SQ HW cons", KSTAT_KV_T_COUNTER64 }, |
8580 | { "SQ HW state", KSTAT_KV_T_ISTR }, |
8581 | |
8582 | { "CQ SW cons", KSTAT_KV_T_COUNTER64 }, |
8583 | { "CQ HW prod", KSTAT_KV_T_COUNTER64 }, |
8584 | { "CQ HW cons", KSTAT_KV_T_COUNTER64 }, |
8585 | { "CQ HW notify", KSTAT_KV_T_COUNTER64 }, |
8586 | { "CQ HW solicit", KSTAT_KV_T_COUNTER64 }, |
8587 | { "CQ HW status", KSTAT_KV_T_ISTR }, |
8588 | { "CQ HW state", KSTAT_KV_T_ISTR }, |
8589 | |
8590 | { "EQ SW cons", KSTAT_KV_T_COUNTER64 }, |
8591 | { "EQ HW prod", KSTAT_KV_T_COUNTER64 }, |
8592 | { "EQ HW cons", KSTAT_KV_T_COUNTER64 }, |
8593 | { "EQ HW status", KSTAT_KV_T_ISTR }, |
8594 | { "EQ HW state", KSTAT_KV_T_ISTR }, |
8595 | }; |
8596 | |
8597 | static int mcx_kstat_queue_read(struct kstat *); |
8598 | |
8599 | static void |
8600 | mcx_kstat_attach_queues(struct mcx_softc *sc) |
8601 | { |
8602 | struct kstat *ks; |
8603 | struct kstat_kv *kvs; |
8604 | int q, i; |
8605 | |
8606 | for (q = 0; q < intrmap_count(sc->sc_intrmap); q++) { |
8607 | ks = kstat_create(DEVNAME(sc)((sc)->sc_dev.dv_xname), 0, "mcx-queues", q, |
8608 | KSTAT_T_KV, 0); |
8609 | if (ks == NULL((void *)0)) { |
8610 | /* unable to attach queue stats %u, q */ |
8611 | continue; |
8612 | } |
8613 | |
8614 | kvs = mallocarray(nitems(mcx_queue_kstat_tpl)(sizeof((mcx_queue_kstat_tpl)) / sizeof((mcx_queue_kstat_tpl) [0])), |
8615 | sizeof(*kvs), M_DEVBUF2, M_WAITOK0x0001); |
8616 | |
8617 | for (i = 0; i < nitems(mcx_queue_kstat_tpl)(sizeof((mcx_queue_kstat_tpl)) / sizeof((mcx_queue_kstat_tpl) [0])); i++) { |
8618 | const struct mcx_queuestat *tpl = |
8619 | &mcx_queue_kstat_tpl[i]; |
8620 | |
8621 | kstat_kv_init(&kvs[i], tpl->name, tpl->type); |
8622 | } |
8623 | |
8624 | ks->ks_softc = &sc->sc_queues[q]; |
8625 | ks->ks_data = kvs; |
8626 | ks->ks_datalen = nitems(mcx_queue_kstat_tpl)(sizeof((mcx_queue_kstat_tpl)) / sizeof((mcx_queue_kstat_tpl) [0])) * sizeof(*kvs); |
8627 | ks->ks_read = mcx_kstat_queue_read; |
8628 | |
8629 | sc->sc_queues[q].q_kstat = ks; |
8630 | kstat_install(ks); |
8631 | } |
8632 | } |
8633 | |
8634 | static int |
8635 | mcx_kstat_queue_read(struct kstat *ks) |
8636 | { |
8637 | struct mcx_queues *q = ks->ks_softc; |
8638 | struct mcx_softc *sc = q->q_sc; |
8639 | struct kstat_kv *kvs = ks->ks_data; |
8640 | union { |
8641 | struct mcx_rq_ctx rq; |
8642 | struct mcx_sq_ctx sq; |
8643 | struct mcx_cq_ctx cq; |
8644 | struct mcx_eq_ctx eq; |
8645 | } u; |
8646 | const char *text; |
8647 | int error = 0; |
8648 | |
8649 | KERNEL_LOCK()_kernel_lock(); |
8650 | |
8651 | if (mcx_query_rq(sc, &q->q_rx, &u.rq) != 0) { |
8652 | error = EIO5; |
8653 | goto out; |
8654 | } |
8655 | |
8656 | kstat_kv_u64(kvs++) = q->q_rx.rx_prod; |
8657 | kstat_kv_u64(kvs++) = bemtoh32(&u.rq.rq_wq.wq_sw_counter)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&u.rq.rq_wq .wq_sw_counter)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *) (&u.rq.rq_wq.wq_sw_counter)) & 0xff) << 24 | (( __uint32_t)(*(__uint32_t *)(&u.rq.rq_wq.wq_sw_counter)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(&u.rq .rq_wq.wq_sw_counter)) & 0xff0000) >> 8 | ((__uint32_t )(*(__uint32_t *)(&u.rq.rq_wq.wq_sw_counter)) & 0xff000000 ) >> 24) : __swap32md(*(__uint32_t *)(&u.rq.rq_wq.wq_sw_counter ))); |
8658 | kstat_kv_u64(kvs++) = bemtoh32(&u.rq.rq_wq.wq_hw_counter)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&u.rq.rq_wq .wq_hw_counter)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *) (&u.rq.rq_wq.wq_hw_counter)) & 0xff) << 24 | (( __uint32_t)(*(__uint32_t *)(&u.rq.rq_wq.wq_hw_counter)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(&u.rq .rq_wq.wq_hw_counter)) & 0xff0000) >> 8 | ((__uint32_t )(*(__uint32_t *)(&u.rq.rq_wq.wq_hw_counter)) & 0xff000000 ) >> 24) : __swap32md(*(__uint32_t *)(&u.rq.rq_wq.wq_hw_counter ))); |
8659 | switch ((bemtoh32(&u.rq.rq_flags)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&u.rq.rq_flags )) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&u.rq.rq_flags )) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(& u.rq.rq_flags)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(&u.rq.rq_flags)) & 0xff0000) >> 8 | ((__uint32_t )(*(__uint32_t *)(&u.rq.rq_flags)) & 0xff000000) >> 24) : __swap32md(*(__uint32_t *)(&u.rq.rq_flags))) & MCX_RQ_CTX_STATE_MASK(0xf << 20)) >> |
8660 | MCX_RQ_CTX_STATE_SHIFT20) { |
8661 | case MCX_RQ_CTX_STATE_RST0: |
8662 | text = "RST"; |
8663 | break; |
8664 | case MCX_RQ_CTX_STATE_RDY1: |
8665 | text = "RDY"; |
8666 | break; |
8667 | case MCX_RQ_CTX_STATE_ERR3: |
8668 | text = "ERR"; |
8669 | break; |
8670 | default: |
8671 | text = "unknown"; |
8672 | break; |
8673 | } |
8674 | strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs))); |
8675 | kvs++; |
8676 | |
8677 | if (mcx_query_sq(sc, &q->q_tx, &u.sq) != 0) { |
8678 | error = EIO5; |
8679 | goto out; |
8680 | } |
8681 | |
8682 | kstat_kv_u64(kvs++) = q->q_tx.tx_prod; |
8683 | kstat_kv_u64(kvs++) = q->q_tx.tx_cons; |
8684 | kstat_kv_u64(kvs++) = bemtoh32(&u.sq.sq_wq.wq_sw_counter)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&u.sq.sq_wq .wq_sw_counter)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *) (&u.sq.sq_wq.wq_sw_counter)) & 0xff) << 24 | (( __uint32_t)(*(__uint32_t *)(&u.sq.sq_wq.wq_sw_counter)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(&u.sq .sq_wq.wq_sw_counter)) & 0xff0000) >> 8 | ((__uint32_t )(*(__uint32_t *)(&u.sq.sq_wq.wq_sw_counter)) & 0xff000000 ) >> 24) : __swap32md(*(__uint32_t *)(&u.sq.sq_wq.wq_sw_counter ))); |
8685 | kstat_kv_u64(kvs++) = bemtoh32(&u.sq.sq_wq.wq_hw_counter)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&u.sq.sq_wq .wq_hw_counter)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *) (&u.sq.sq_wq.wq_hw_counter)) & 0xff) << 24 | (( __uint32_t)(*(__uint32_t *)(&u.sq.sq_wq.wq_hw_counter)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(&u.sq .sq_wq.wq_hw_counter)) & 0xff0000) >> 8 | ((__uint32_t )(*(__uint32_t *)(&u.sq.sq_wq.wq_hw_counter)) & 0xff000000 ) >> 24) : __swap32md(*(__uint32_t *)(&u.sq.sq_wq.wq_hw_counter ))); |
8686 | switch ((bemtoh32(&u.sq.sq_flags)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&u.sq.sq_flags )) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&u.sq.sq_flags )) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(& u.sq.sq_flags)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(&u.sq.sq_flags)) & 0xff0000) >> 8 | ((__uint32_t )(*(__uint32_t *)(&u.sq.sq_flags)) & 0xff000000) >> 24) : __swap32md(*(__uint32_t *)(&u.sq.sq_flags))) & MCX_SQ_CTX_STATE_MASK(0xf << 20)) >> |
8687 | MCX_SQ_CTX_STATE_SHIFT20) { |
8688 | case MCX_SQ_CTX_STATE_RST0: |
8689 | text = "RST"; |
8690 | break; |
8691 | case MCX_SQ_CTX_STATE_RDY1: |
8692 | text = "RDY"; |
8693 | break; |
8694 | case MCX_SQ_CTX_STATE_ERR3: |
8695 | text = "ERR"; |
8696 | break; |
8697 | default: |
8698 | text = "unknown"; |
8699 | break; |
8700 | } |
8701 | strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs))); |
8702 | kvs++; |
8703 | |
8704 | if (mcx_query_cq(sc, &q->q_cq, &u.cq) != 0) { |
8705 | error = EIO5; |
8706 | goto out; |
8707 | } |
8708 | |
8709 | kstat_kv_u64(kvs++) = q->q_cq.cq_cons; |
8710 | kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_producer_counter)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&u.cq.cq_producer_counter )) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&u.cq.cq_producer_counter )) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(& u.cq.cq_producer_counter)) & 0xff00) << 8 | ((__uint32_t )(*(__uint32_t *)(&u.cq.cq_producer_counter)) & 0xff0000 ) >> 8 | ((__uint32_t)(*(__uint32_t *)(&u.cq.cq_producer_counter )) & 0xff000000) >> 24) : __swap32md(*(__uint32_t * )(&u.cq.cq_producer_counter))); |
8711 | kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_consumer_counter)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&u.cq.cq_consumer_counter )) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&u.cq.cq_consumer_counter )) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(& u.cq.cq_consumer_counter)) & 0xff00) << 8 | ((__uint32_t )(*(__uint32_t *)(&u.cq.cq_consumer_counter)) & 0xff0000 ) >> 8 | ((__uint32_t)(*(__uint32_t *)(&u.cq.cq_consumer_counter )) & 0xff000000) >> 24) : __swap32md(*(__uint32_t * )(&u.cq.cq_consumer_counter))); |
8712 | kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_last_notified)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&u.cq.cq_last_notified )) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&u.cq.cq_last_notified )) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(& u.cq.cq_last_notified)) & 0xff00) << 8 | ((__uint32_t )(*(__uint32_t *)(&u.cq.cq_last_notified)) & 0xff0000 ) >> 8 | ((__uint32_t)(*(__uint32_t *)(&u.cq.cq_last_notified )) & 0xff000000) >> 24) : __swap32md(*(__uint32_t * )(&u.cq.cq_last_notified))); |
8713 | kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_last_solicit)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&u.cq.cq_last_solicit )) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&u.cq.cq_last_solicit )) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(& u.cq.cq_last_solicit)) & 0xff00) << 8 | ((__uint32_t )(*(__uint32_t *)(&u.cq.cq_last_solicit)) & 0xff0000) >> 8 | ((__uint32_t)(*(__uint32_t *)(&u.cq.cq_last_solicit )) & 0xff000000) >> 24) : __swap32md(*(__uint32_t * )(&u.cq.cq_last_solicit))); |
8714 | |
8715 | switch ((bemtoh32(&u.cq.cq_status)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&u.cq.cq_status )) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&u.cq.cq_status )) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(& u.cq.cq_status)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(&u.cq.cq_status)) & 0xff0000) >> 8 | ((__uint32_t )(*(__uint32_t *)(&u.cq.cq_status)) & 0xff000000) >> 24) : __swap32md(*(__uint32_t *)(&u.cq.cq_status))) & MCX_CQ_CTX_STATUS_MASK(0xf << 28)) >> |
8716 | MCX_CQ_CTX_STATUS_SHIFT28) { |
8717 | case MCX_CQ_CTX_STATUS_OK0x0: |
8718 | text = "OK"; |
8719 | break; |
8720 | case MCX_CQ_CTX_STATUS_OVERFLOW0x9: |
8721 | text = "overflow"; |
8722 | break; |
8723 | case MCX_CQ_CTX_STATUS_WRITE_FAIL0xa: |
8724 | text = "write fail"; |
8725 | break; |
8726 | default: |
8727 | text = "unknown"; |
8728 | break; |
8729 | } |
8730 | strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs))); |
8731 | kvs++; |
8732 | |
8733 | switch ((bemtoh32(&u.cq.cq_status)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&u.cq.cq_status )) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&u.cq.cq_status )) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(& u.cq.cq_status)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(&u.cq.cq_status)) & 0xff0000) >> 8 | ((__uint32_t )(*(__uint32_t *)(&u.cq.cq_status)) & 0xff000000) >> 24) : __swap32md(*(__uint32_t *)(&u.cq.cq_status))) & MCX_CQ_CTX_STATE_MASK(0xf << 8)) >> |
8734 | MCX_CQ_CTX_STATE_SHIFT8) { |
8735 | case MCX_CQ_CTX_STATE_SOLICITED0x6: |
8736 | text = "solicited"; |
8737 | break; |
8738 | case MCX_CQ_CTX_STATE_ARMED0x9: |
8739 | text = "armed"; |
8740 | break; |
8741 | case MCX_CQ_CTX_STATE_FIRED0xa: |
8742 | text = "fired"; |
8743 | break; |
8744 | default: |
8745 | text = "unknown"; |
8746 | break; |
8747 | } |
8748 | strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs))); |
8749 | kvs++; |
8750 | |
8751 | if (mcx_query_eq(sc, &q->q_eq, &u.eq) != 0) { |
8752 | error = EIO5; |
8753 | goto out; |
8754 | } |
8755 | |
8756 | kstat_kv_u64(kvs++) = q->q_eq.eq_cons; |
8757 | kstat_kv_u64(kvs++) = bemtoh32(&u.eq.eq_producer_counter)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&u.eq.eq_producer_counter )) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&u.eq.eq_producer_counter )) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(& u.eq.eq_producer_counter)) & 0xff00) << 8 | ((__uint32_t )(*(__uint32_t *)(&u.eq.eq_producer_counter)) & 0xff0000 ) >> 8 | ((__uint32_t)(*(__uint32_t *)(&u.eq.eq_producer_counter )) & 0xff000000) >> 24) : __swap32md(*(__uint32_t * )(&u.eq.eq_producer_counter))); |
8758 | kstat_kv_u64(kvs++) = bemtoh32(&u.eq.eq_consumer_counter)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&u.eq.eq_consumer_counter )) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&u.eq.eq_consumer_counter )) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(& u.eq.eq_consumer_counter)) & 0xff00) << 8 | ((__uint32_t )(*(__uint32_t *)(&u.eq.eq_consumer_counter)) & 0xff0000 ) >> 8 | ((__uint32_t)(*(__uint32_t *)(&u.eq.eq_consumer_counter )) & 0xff000000) >> 24) : __swap32md(*(__uint32_t * )(&u.eq.eq_consumer_counter))); |
8759 | |
8760 | switch ((bemtoh32(&u.eq.eq_status)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&u.eq.eq_status )) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&u.eq.eq_status )) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(& u.eq.eq_status)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(&u.eq.eq_status)) & 0xff0000) >> 8 | ((__uint32_t )(*(__uint32_t *)(&u.eq.eq_status)) & 0xff000000) >> 24) : __swap32md(*(__uint32_t *)(&u.eq.eq_status))) & MCX_EQ_CTX_STATUS_MASK(0xf << 28)) >> |
8761 | MCX_EQ_CTX_STATUS_SHIFT28) { |
8762 | case MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE0xa: |
8763 | text = "write fail"; |
8764 | break; |
8765 | case MCX_EQ_CTX_STATUS_OK0x0: |
8766 | text = "OK"; |
8767 | break; |
8768 | default: |
8769 | text = "unknown"; |
8770 | break; |
8771 | } |
8772 | strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs))); |
8773 | kvs++; |
8774 | |
8775 | switch ((bemtoh32(&u.eq.eq_status)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&u.eq.eq_status )) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&u.eq.eq_status )) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(& u.eq.eq_status)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t *)(&u.eq.eq_status)) & 0xff0000) >> 8 | ((__uint32_t )(*(__uint32_t *)(&u.eq.eq_status)) & 0xff000000) >> 24) : __swap32md(*(__uint32_t *)(&u.eq.eq_status))) & MCX_EQ_CTX_STATE_MASK(0xf << 8)) >> |
8776 | MCX_EQ_CTX_STATE_SHIFT8) { |
8777 | case MCX_EQ_CTX_STATE_ARMED0x9: |
8778 | text = "armed"; |
8779 | break; |
8780 | case MCX_EQ_CTX_STATE_FIRED0xa: |
8781 | text = "fired"; |
8782 | break; |
8783 | default: |
8784 | text = "unknown"; |
8785 | break; |
8786 | } |
8787 | strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs))); |
8788 | kvs++; |
8789 | |
8790 | nanouptime(&ks->ks_updated); |
8791 | out: |
8792 | KERNEL_UNLOCK()_kernel_unlock(); |
8793 | return (error); |
8794 | } |
8795 | |
8796 | #endif /* NKSTAT > 0 */ |
8797 | |
8798 | static unsigned int |
8799 | mcx_timecounter_read(struct timecounter *tc) |
8800 | { |
8801 | struct mcx_softc *sc = tc->tc_priv; |
8802 | |
8803 | return (mcx_rd(sc, MCX_INTERNAL_TIMER_L0x1004)); |
8804 | } |
8805 | |
8806 | static void |
8807 | mcx_timecounter_attach(struct mcx_softc *sc) |
8808 | { |
8809 | struct timecounter *tc = &sc->sc_timecounter; |
8810 | |
8811 | tc->tc_get_timecount = mcx_timecounter_read; |
8812 | tc->tc_counter_mask = ~0U; |
8813 | tc->tc_frequency = sc->sc_khz * 1000; |
8814 | tc->tc_name = sc->sc_dev.dv_xname; |
8815 | tc->tc_quality = -100; |
8816 | tc->tc_priv = sc; |
8817 | |
8818 | tc_init(tc); |
8819 | } |