Bug Summary

File:dev/pci/if_oce.c
Warning:line 3198, column 3
Value stored to 'status' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name if_oce.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/if_oce.c
1/* $OpenBSD: if_oce.c,v 1.105 2022/01/09 05:42:54 jsg Exp $ */
2
3/*
4 * Copyright (c) 2012 Mike Belopuhov
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/*-
20 * Copyright (C) 2012 Emulex
21 * All rights reserved.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions are met:
25 *
26 * 1. Redistributions of source code must retain the above copyright notice,
27 * this list of conditions and the following disclaimer.
28 *
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 *
33 * 3. Neither the name of the Emulex Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived from
35 * this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
38 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
41 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
46 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
47 * POSSIBILITY OF SUCH DAMAGE.
48 *
49 * Contact Information:
50 * freebsd-drivers@emulex.com
51 *
52 * Emulex
53 * 3333 Susan Street
54 * Costa Mesa, CA 92626
55 */
56
57#include "bpfilter.h"
58#include "vlan.h"
59
60#include <sys/param.h>
61#include <sys/systm.h>
62#include <sys/sockio.h>
63#include <sys/mbuf.h>
64#include <sys/malloc.h>
65#include <sys/kernel.h>
66#include <sys/device.h>
67#include <sys/socket.h>
68#include <sys/queue.h>
69#include <sys/timeout.h>
70#include <sys/pool.h>
71
72#include <net/if.h>
73#include <net/if_media.h>
74
75#include <netinet/in.h>
76#include <netinet/if_ether.h>
77
78#ifdef INET61
79#include <netinet/ip6.h>
80#endif
81
82#if NBPFILTER1 > 0
83#include <net/bpf.h>
84#endif
85
86#include <dev/pci/pcireg.h>
87#include <dev/pci/pcivar.h>
88#include <dev/pci/pcidevs.h>
89
90#include <dev/pci/if_ocereg.h>
91
92#ifndef TRUE1
93#define TRUE1 1
94#endif
95#ifndef FALSE0
96#define FALSE0 0
97#endif
98
99#define OCE_MBX_TIMEOUT5 5
100
101#define OCE_MAX_PAYLOAD65536 65536
102
103#define OCE_TX_RING_SIZE512 512
104#define OCE_RX_RING_SIZE1024 1024
105
106/* This should be powers of 2. Like 2,4,8 & 16 */
107#define OCE_MAX_RSS4 4 /* TODO: 8 */
108#define OCE_MAX_RQ4 + 1 OCE_MAX_RSS4 + 1 /* one default queue */
109#define OCE_MAX_WQ8 8
110
111#define OCE_MAX_EQ32 32
112#define OCE_MAX_CQ4 + 1 + 8 + 1 OCE_MAX_RQ4 + 1 + OCE_MAX_WQ8 + 1 /* one MCC queue */
113#define OCE_MAX_CQ_EQ8 8 /* Max CQ that can attached to an EQ */
114
115#define OCE_DEFAULT_EQD80 80
116
117#define OCE_MIN_MTU256 256
118#define OCE_MAX_MTU9000 9000
119
120#define OCE_MAX_RQ_COMPL64 64
121#define OCE_MAX_RQ_POSTS255 255
122#define OCE_RX_BUF_SIZE2048 2048
123
124#define OCE_MAX_TX_ELEMENTS29 29
125#define OCE_MAX_TX_DESC1024 1024
126#define OCE_MAX_TX_SIZE65535 65535
127
128#define OCE_MEM_KVA(_m)((void *)((_m)->vaddr)) ((void *)((_m)->vaddr))
129#define OCE_MEM_DVA(_m)((_m)->paddr) ((_m)->paddr)
130
131#define OCE_WQ_FOREACH(sc, wq, i)for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq
= sc->sc_wq[i])
\
132 for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq = sc->sc_wq[i])
133#define OCE_RQ_FOREACH(sc, rq, i)for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq
= sc->sc_rq[i])
\
134 for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq = sc->sc_rq[i])
135#define OCE_EQ_FOREACH(sc, eq, i)for (i = 0, eq = sc->sc_eq[0]; i < sc->sc_neq; i++, eq
= sc->sc_eq[i])
\
136 for (i = 0, eq = sc->sc_eq[0]; i < sc->sc_neq; i++, eq = sc->sc_eq[i])
137#define OCE_CQ_FOREACH(sc, cq, i)for (i = 0, cq = sc->sc_cq[0]; i < sc->sc_ncq; i++, cq
= sc->sc_cq[i])
\
138 for (i = 0, cq = sc->sc_cq[0]; i < sc->sc_ncq; i++, cq = sc->sc_cq[i])
139#define OCE_RING_FOREACH(_r, _v, _c)for ((_v) = oce_ring_first(_r); _c; (_v) = oce_ring_next(_r)) \
140 for ((_v) = oce_ring_first(_r); _c; (_v) = oce_ring_next(_r))
141
142static inline int
143ilog2(unsigned int v)
144{
145 int r = 0;
146
147 while (v >>= 1)
148 r++;
149 return (r);
150}
151
152struct oce_pkt {
153 struct mbuf * mbuf;
154 bus_dmamap_t map;
155 int nsegs;
156 SIMPLEQ_ENTRY(oce_pkt)struct { struct oce_pkt *sqe_next; } entry;
157};
158SIMPLEQ_HEAD(oce_pkt_list, oce_pkt)struct oce_pkt_list { struct oce_pkt *sqh_first; struct oce_pkt
**sqh_last; }
;
159
160struct oce_dma_mem {
161 bus_dma_tag_t tag;
162 bus_dmamap_t map;
163 bus_dma_segment_t segs;
164 int nsegs;
165 bus_size_t size;
166 caddr_t vaddr;
167 bus_addr_t paddr;
168};
169
170struct oce_ring {
171 int index;
172 int nitems;
173 int nused;
174 int isize;
175 struct oce_dma_mem dma;
176};
177
178struct oce_softc;
179
180enum cq_len {
181 CQ_LEN_256 = 256,
182 CQ_LEN_512 = 512,
183 CQ_LEN_1024 = 1024
184};
185
186enum eq_len {
187 EQ_LEN_256 = 256,
188 EQ_LEN_512 = 512,
189 EQ_LEN_1024 = 1024,
190 EQ_LEN_2048 = 2048,
191 EQ_LEN_4096 = 4096
192};
193
194enum eqe_size {
195 EQE_SIZE_4 = 4,
196 EQE_SIZE_16 = 16
197};
198
199enum qtype {
200 QTYPE_EQ,
201 QTYPE_MQ,
202 QTYPE_WQ,
203 QTYPE_RQ,
204 QTYPE_CQ,
205 QTYPE_RSS
206};
207
208struct oce_eq {
209 struct oce_softc * sc;
210 struct oce_ring * ring;
211 enum qtype type;
212 int id;
213
214 struct oce_cq * cq[OCE_MAX_CQ_EQ8];
215 int cq_valid;
216
217 int nitems;
218 int isize;
219 int delay;
220};
221
222struct oce_cq {
223 struct oce_softc * sc;
224 struct oce_ring * ring;
225 enum qtype type;
226 int id;
227
228 struct oce_eq * eq;
229
230 void (*cq_intr)(void *);
231 void * cb_arg;
232
233 int nitems;
234 int nodelay;
235 int eventable;
236 int ncoalesce;
237};
238
239struct oce_mq {
240 struct oce_softc * sc;
241 struct oce_ring * ring;
242 enum qtype type;
243 int id;
244
245 struct oce_cq * cq;
246
247 int nitems;
248};
249
250struct oce_wq {
251 struct oce_softc * sc;
252 struct oce_ring * ring;
253 enum qtype type;
254 int id;
255
256 struct oce_cq * cq;
257
258 struct oce_pkt_list pkt_list;
259 struct oce_pkt_list pkt_free;
260
261 int nitems;
262};
263
264struct oce_rq {
265 struct oce_softc * sc;
266 struct oce_ring * ring;
267 enum qtype type;
268 int id;
269
270 struct oce_cq * cq;
271
272 struct if_rxring rxring;
273 struct oce_pkt_list pkt_list;
274 struct oce_pkt_list pkt_free;
275
276 uint32_t rss_cpuid;
277
278#ifdef OCE_LRO
279 struct lro_ctrl lro;
280 int lro_pkts_queued;
281#endif
282
283 int nitems;
284 int fragsize;
285 int mtu;
286 int rss;
287};
288
289struct oce_softc {
290 struct device sc_dev;
291
292 uint sc_flags;
293#define OCE_F_BE20x00000001 0x00000001
294#define OCE_F_BE30x00000002 0x00000002
295#define OCE_F_XE2010x00000008 0x00000008
296#define OCE_F_BE3_NATIVE0x00000100 0x00000100
297#define OCE_F_RESET_RQD0x00001000 0x00001000
298#define OCE_F_MBOX_ENDIAN_RQD0x00002000 0x00002000
299
300 bus_dma_tag_t sc_dmat;
301
302 bus_space_tag_t sc_cfg_iot;
303 bus_space_handle_t sc_cfg_ioh;
304 bus_size_t sc_cfg_size;
305
306 bus_space_tag_t sc_csr_iot;
307 bus_space_handle_t sc_csr_ioh;
308 bus_size_t sc_csr_size;
309
310 bus_space_tag_t sc_db_iot;
311 bus_space_handle_t sc_db_ioh;
312 bus_size_t sc_db_size;
313
314 void * sc_ih;
315
316 struct arpcom sc_ac;
317 struct ifmedia sc_media;
318 ushort sc_link_up;
319 ushort sc_link_speed;
320 uint64_t sc_fc;
321
322 struct oce_dma_mem sc_mbx;
323 struct oce_dma_mem sc_pld;
324
325 uint sc_port;
326 uint sc_fmode;
327
328 struct oce_wq * sc_wq[OCE_MAX_WQ8]; /* TX work queues */
329 struct oce_rq * sc_rq[OCE_MAX_RQ4 + 1]; /* RX work queues */
330 struct oce_cq * sc_cq[OCE_MAX_CQ4 + 1 + 8 + 1]; /* Completion queues */
331 struct oce_eq * sc_eq[OCE_MAX_EQ32]; /* Event queues */
332 struct oce_mq * sc_mq; /* Mailbox queue */
333
334 ushort sc_neq;
335 ushort sc_ncq;
336 ushort sc_nrq;
337 ushort sc_nwq;
338 ushort sc_nintr;
339
340 ushort sc_tx_ring_size;
341 ushort sc_rx_ring_size;
342 ushort sc_rss_enable;
343
344 uint32_t sc_if_id; /* interface ID */
345 uint32_t sc_pmac_id; /* PMAC id */
346 char sc_macaddr[ETHER_ADDR_LEN6];
347
348 uint32_t sc_pvid;
349
350 uint64_t sc_rx_errors;
351 uint64_t sc_tx_errors;
352
353 struct timeout sc_tick;
354 struct timeout sc_rxrefill;
355
356 void * sc_statcmd;
357};
358
359#define IS_BE(sc)(((sc)->sc_flags) & (0x00000001 | 0x00000002)) ISSET((sc)->sc_flags, OCE_F_BE2 | OCE_F_BE3)(((sc)->sc_flags) & (0x00000001 | 0x00000002))
360#define IS_XE201(sc)(((sc)->sc_flags) & (0x00000008)) ISSET((sc)->sc_flags, OCE_F_XE201)(((sc)->sc_flags) & (0x00000008))
361
362#define ADDR_HI(x)((uint32_t)((uint64_t)(x) >> 32)) ((uint32_t)((uint64_t)(x) >> 32))
363#define ADDR_LO(x)((uint32_t)((uint64_t)(x) & 0xffffffff)) ((uint32_t)((uint64_t)(x) & 0xffffffff))
364
365#define IF_LRO_ENABLED(ifp)(((ifp)->if_data.ifi_capabilities) & (IFCAP_LRO)) ISSET((ifp)->if_capabilities, IFCAP_LRO)(((ifp)->if_data.ifi_capabilities) & (IFCAP_LRO))
366
367int oce_match(struct device *, void *, void *);
368void oce_attach(struct device *, struct device *, void *);
369int oce_pci_alloc(struct oce_softc *, struct pci_attach_args *);
370void oce_attachhook(struct device *);
371void oce_attach_ifp(struct oce_softc *);
372int oce_ioctl(struct ifnet *, u_long, caddr_t);
373int oce_rxrinfo(struct oce_softc *, struct if_rxrinfo *);
374void oce_iff(struct oce_softc *);
375void oce_link_status(struct oce_softc *);
376void oce_media_status(struct ifnet *, struct ifmediareq *);
377int oce_media_change(struct ifnet *);
378void oce_tick(void *);
379void oce_init(void *);
380void oce_stop(struct oce_softc *);
381void oce_watchdog(struct ifnet *);
382void oce_start(struct ifnet *);
383int oce_encap(struct oce_softc *, struct mbuf **, int wqidx);
384#ifdef OCE_TSO
385struct mbuf *
386 oce_tso(struct oce_softc *, struct mbuf **);
387#endif
388int oce_intr(void *);
389void oce_intr_wq(void *);
390void oce_txeof(struct oce_wq *);
391void oce_intr_rq(void *);
392void oce_rxeof(struct oce_rq *, struct oce_nic_rx_cqe *);
393void oce_rxeoc(struct oce_rq *, struct oce_nic_rx_cqe *);
394int oce_vtp_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
395int oce_port_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
396#ifdef OCE_LRO
397void oce_flush_lro(struct oce_rq *);
398int oce_init_lro(struct oce_softc *);
399void oce_free_lro(struct oce_softc *);
400#endif
401int oce_get_buf(struct oce_rq *);
402int oce_alloc_rx_bufs(struct oce_rq *);
403void oce_refill_rx(void *);
404void oce_free_posted_rxbuf(struct oce_rq *);
405void oce_intr_mq(void *);
406void oce_link_event(struct oce_softc *,
407 struct oce_async_cqe_link_state *);
408
409int oce_init_queues(struct oce_softc *);
410void oce_release_queues(struct oce_softc *);
411struct oce_wq *oce_create_wq(struct oce_softc *, struct oce_eq *);
412void oce_drain_wq(struct oce_wq *);
413void oce_destroy_wq(struct oce_wq *);
414struct oce_rq *
415 oce_create_rq(struct oce_softc *, struct oce_eq *, int rss);
416void oce_drain_rq(struct oce_rq *);
417void oce_destroy_rq(struct oce_rq *);
418struct oce_eq *
419 oce_create_eq(struct oce_softc *);
420static inline void
421 oce_arm_eq(struct oce_eq *, int neqe, int rearm, int clearint);
422void oce_drain_eq(struct oce_eq *);
423void oce_destroy_eq(struct oce_eq *);
424struct oce_mq *
425 oce_create_mq(struct oce_softc *, struct oce_eq *);
426void oce_drain_mq(struct oce_mq *);
427void oce_destroy_mq(struct oce_mq *);
428struct oce_cq *
429 oce_create_cq(struct oce_softc *, struct oce_eq *, int nitems,
430 int isize, int eventable, int nodelay, int ncoalesce);
431static inline void
432 oce_arm_cq(struct oce_cq *, int ncqe, int rearm);
433void oce_destroy_cq(struct oce_cq *);
434
435int oce_dma_alloc(struct oce_softc *, bus_size_t, struct oce_dma_mem *);
436void oce_dma_free(struct oce_softc *, struct oce_dma_mem *);
437#define oce_dma_sync(d, f)(*((d)->tag)->_dmamap_sync)(((d)->tag), ((d)->map
), (0), ((d)->map->dm_mapsize), (f))
\
438 bus_dmamap_sync((d)->tag, (d)->map, 0, (d)->map->dm_mapsize, f)(*((d)->tag)->_dmamap_sync)(((d)->tag), ((d)->map
), (0), ((d)->map->dm_mapsize), (f))
439
440struct oce_ring *
441 oce_create_ring(struct oce_softc *, int nitems, int isize, int maxseg);
442void oce_destroy_ring(struct oce_softc *, struct oce_ring *);
443int oce_load_ring(struct oce_softc *, struct oce_ring *,
444 struct oce_pa *, int max_segs);
445static inline void *
446 oce_ring_get(struct oce_ring *);
447static inline void *
448 oce_ring_first(struct oce_ring *);
449static inline void *
450 oce_ring_next(struct oce_ring *);
451struct oce_pkt *
452 oce_pkt_alloc(struct oce_softc *, size_t size, int nsegs,
453 int maxsegsz);
454void oce_pkt_free(struct oce_softc *, struct oce_pkt *);
455static inline struct oce_pkt *
456 oce_pkt_get(struct oce_pkt_list *);
457static inline void
458 oce_pkt_put(struct oce_pkt_list *, struct oce_pkt *);
459
460int oce_init_fw(struct oce_softc *);
461int oce_mbox_init(struct oce_softc *);
462int oce_mbox_dispatch(struct oce_softc *);
463int oce_cmd(struct oce_softc *, int subsys, int opcode, int version,
464 void *payload, int length);
465void oce_first_mcc(struct oce_softc *);
466
467int oce_get_fw_config(struct oce_softc *);
468int oce_check_native_mode(struct oce_softc *);
469int oce_create_iface(struct oce_softc *, uint8_t *macaddr);
470int oce_config_vlan(struct oce_softc *, struct normal_vlan *vtags,
471 int nvtags, int untagged, int promisc);
472int oce_set_flow_control(struct oce_softc *, uint64_t);
473int oce_config_rss(struct oce_softc *, int enable);
474int oce_update_mcast(struct oce_softc *, uint8_t multi[][ETHER_ADDR_LEN6],
475 int naddr);
476int oce_set_promisc(struct oce_softc *, int enable);
477int oce_get_link_status(struct oce_softc *);
478
479void oce_macaddr_set(struct oce_softc *);
480int oce_macaddr_get(struct oce_softc *, uint8_t *macaddr);
481int oce_macaddr_add(struct oce_softc *, uint8_t *macaddr, uint32_t *pmac);
482int oce_macaddr_del(struct oce_softc *, uint32_t pmac);
483
484int oce_new_rq(struct oce_softc *, struct oce_rq *);
485int oce_new_wq(struct oce_softc *, struct oce_wq *);
486int oce_new_mq(struct oce_softc *, struct oce_mq *);
487int oce_new_eq(struct oce_softc *, struct oce_eq *);
488int oce_new_cq(struct oce_softc *, struct oce_cq *);
489
490int oce_init_stats(struct oce_softc *);
491int oce_update_stats(struct oce_softc *);
492int oce_stats_be2(struct oce_softc *, uint64_t *, uint64_t *);
493int oce_stats_be3(struct oce_softc *, uint64_t *, uint64_t *);
494int oce_stats_xe(struct oce_softc *, uint64_t *, uint64_t *);
495
496struct pool *oce_pkt_pool;
497
498struct cfdriver oce_cd = {
499 NULL((void *)0), "oce", DV_IFNET
500};
501
502struct cfattach oce_ca = {
503 sizeof(struct oce_softc), oce_match, oce_attach, NULL((void *)0), NULL((void *)0)
504};
505
506const struct pci_matchid oce_devices[] = {
507 { PCI_VENDOR_SERVERENGINES0x19a2, PCI_PRODUCT_SERVERENGINES_BE20x0211 },
508 { PCI_VENDOR_SERVERENGINES0x19a2, PCI_PRODUCT_SERVERENGINES_BE30x0221 },
509 { PCI_VENDOR_SERVERENGINES0x19a2, PCI_PRODUCT_SERVERENGINES_OCBE20x0700 },
510 { PCI_VENDOR_SERVERENGINES0x19a2, PCI_PRODUCT_SERVERENGINES_OCBE30x0710 },
511 { PCI_VENDOR_EMULEX0x10df, PCI_PRODUCT_EMULEX_XE2010xe220 },
512};
513
514int
515oce_match(struct device *parent, void *match, void *aux)
516{
517 return (pci_matchbyid(aux, oce_devices, nitems(oce_devices)(sizeof((oce_devices)) / sizeof((oce_devices)[0]))));
518}
519
520void
521oce_attach(struct device *parent, struct device *self, void *aux)
522{
523 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
524 struct oce_softc *sc = (struct oce_softc *)self;
525 const char *intrstr = NULL((void *)0);
526 pci_intr_handle_t ih;
527
528 switch (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff)) {
529 case PCI_PRODUCT_SERVERENGINES_BE20x0211:
530 case PCI_PRODUCT_SERVERENGINES_OCBE20x0700:
531 SET(sc->sc_flags, OCE_F_BE2)((sc->sc_flags) |= (0x00000001));
532 break;
533 case PCI_PRODUCT_SERVERENGINES_BE30x0221:
534 case PCI_PRODUCT_SERVERENGINES_OCBE30x0710:
535 SET(sc->sc_flags, OCE_F_BE3)((sc->sc_flags) |= (0x00000002));
536 break;
537 case PCI_PRODUCT_EMULEX_XE2010xe220:
538 SET(sc->sc_flags, OCE_F_XE201)((sc->sc_flags) |= (0x00000008));
539 break;
540 }
541
542 sc->sc_dmat = pa->pa_dmat;
543 if (oce_pci_alloc(sc, pa))
544 return;
545
546 sc->sc_tx_ring_size = OCE_TX_RING_SIZE512;
547 sc->sc_rx_ring_size = OCE_RX_RING_SIZE1024;
548
549 /* create the bootstrap mailbox */
550 if (oce_dma_alloc(sc, sizeof(struct oce_bmbx), &sc->sc_mbx)) {
551 printf(": failed to allocate mailbox memory\n");
552 return;
553 }
554 if (oce_dma_alloc(sc, OCE_MAX_PAYLOAD65536, &sc->sc_pld)) {
555 printf(": failed to allocate payload memory\n");
556 goto fail_1;
557 }
558
559 if (oce_init_fw(sc))
560 goto fail_2;
561
562 if (oce_mbox_init(sc)) {
563 printf(": failed to initialize mailbox\n");
564 goto fail_2;
565 }
566
567 if (oce_get_fw_config(sc)) {
568 printf(": failed to get firmware configuration\n");
569 goto fail_2;
570 }
571
572 if (ISSET(sc->sc_flags, OCE_F_BE3)((sc->sc_flags) & (0x00000002))) {
573 if (oce_check_native_mode(sc))
574 goto fail_2;
575 }
576
577 if (oce_macaddr_get(sc, sc->sc_macaddr)) {
578 printf(": failed to fetch MAC address\n");
579 goto fail_2;
580 }
581 memcpy(sc->sc_ac.ac_enaddr, sc->sc_macaddr, ETHER_ADDR_LEN)__builtin_memcpy((sc->sc_ac.ac_enaddr), (sc->sc_macaddr
), (6))
;
582
583 if (oce_pkt_pool == NULL((void *)0)) {
584 oce_pkt_pool = malloc(sizeof(struct pool), M_DEVBUF2, M_NOWAIT0x0002);
585 if (oce_pkt_pool == NULL((void *)0)) {
586 printf(": unable to allocate descriptor pool\n");
587 goto fail_2;
588 }
589 pool_init(oce_pkt_pool, sizeof(struct oce_pkt), 0, IPL_NET0x7,
590 0, "ocepkts", NULL((void *)0));
591 }
592
593 /* We allocate a single interrupt resource */
594 sc->sc_nintr = 1;
595 if (pci_intr_map_msi(pa, &ih) != 0 &&
596 pci_intr_map(pa, &ih) != 0) {
597 printf(": couldn't map interrupt\n");
598 goto fail_2;
599 }
600
601 intrstr = pci_intr_string(pa->pa_pc, ih);
602 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET0x7, oce_intr, sc,
603 sc->sc_dev.dv_xname);
604 if (sc->sc_ih == NULL((void *)0)) {
605 printf(": couldn't establish interrupt\n");
606 if (intrstr != NULL((void *)0))
607 printf(" at %s", intrstr);
608 printf("\n");
609 goto fail_2;
610 }
611 printf(": %s", intrstr);
612
613 if (oce_init_stats(sc))
614 goto fail_3;
615
616 if (oce_init_queues(sc))
617 goto fail_3;
618
619 oce_attach_ifp(sc);
620
621#ifdef OCE_LRO
622 if (oce_init_lro(sc))
623 goto fail_4;
624#endif
625
626 timeout_set(&sc->sc_tick, oce_tick, sc);
627 timeout_set(&sc->sc_rxrefill, oce_refill_rx, sc);
628
629 config_mountroot(self, oce_attachhook);
630
631 printf(", address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));
632
633 return;
634
635#ifdef OCE_LRO
636fail_4:
637 oce_free_lro(sc);
638 ether_ifdetach(&sc->sc_ac.ac_if);
639 if_detach(&sc->sc_ac.ac_if);
640 oce_release_queues(sc);
641#endif
642fail_3:
643 pci_intr_disestablish(pa->pa_pc, sc->sc_ih);
644fail_2:
645 oce_dma_free(sc, &sc->sc_pld);
646fail_1:
647 oce_dma_free(sc, &sc->sc_mbx);
648}
649
650int
651oce_pci_alloc(struct oce_softc *sc, struct pci_attach_args *pa)
652{
653 pcireg_t memtype, reg;
654
655 /* setup the device config region */
656 if (ISSET(sc->sc_flags, OCE_F_BE2)((sc->sc_flags) & (0x00000001)))
657 reg = OCE_BAR_CFG_BE20x14;
658 else
659 reg = OCE_BAR_CFG0x10;
660
661 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
662 if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_cfg_iot,
663 &sc->sc_cfg_ioh, NULL((void *)0), &sc->sc_cfg_size,
664 IS_BE(sc)(((sc)->sc_flags) & (0x00000001 | 0x00000002)) ? 0 : 32768)) {
665 printf(": can't find cfg mem space\n");
666 return (ENXIO6);
667 }
668
669 /*
670 * Read the SLI_INTF register and determine whether we
671 * can use this port and its features
672 */
673 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, OCE_INTF_REG_OFFSET0x58);
674 if (OCE_SLI_SIGNATURE(reg)(((reg) >> 29) & 0x7) != OCE_INTF_VALID_SIG6) {
675 printf(": invalid signature\n");
676 goto fail_1;
677 }
678 if (OCE_SLI_REVISION(reg)(((reg) >> 4) & 0xf) != OCE_INTF_SLI_REV44) {
679 printf(": unsupported SLI revision\n");
680 goto fail_1;
681 }
682 if (OCE_SLI_IFTYPE(reg)(((reg) >> 12) & 0xf) == OCE_INTF_IF_TYPE_11)
683 SET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD)((sc->sc_flags) |= (0x00002000));
684 if (OCE_SLI_HINT1(reg)(((reg) >> 16) & 0xff) == OCE_INTF_FUNC_RESET_REQD1)
685 SET(sc->sc_flags, OCE_F_RESET_RQD)((sc->sc_flags) |= (0x00001000));
686
687 /* Lancer has one BAR (CFG) but BE3 has three (CFG, CSR, DB) */
688 if (IS_BE(sc)(((sc)->sc_flags) & (0x00000001 | 0x00000002))) {
689 /* set up CSR region */
690 reg = OCE_BAR_CSR0x18;
691 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
692 if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_csr_iot,
693 &sc->sc_csr_ioh, NULL((void *)0), &sc->sc_csr_size, 0)) {
694 printf(": can't find csr mem space\n");
695 goto fail_1;
696 }
697
698 /* set up DB doorbell region */
699 reg = OCE_BAR_DB0x20;
700 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
701 if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_db_iot,
702 &sc->sc_db_ioh, NULL((void *)0), &sc->sc_db_size, 0)) {
703 printf(": can't find csr mem space\n");
704 goto fail_2;
705 }
706 } else {
707 sc->sc_csr_iot = sc->sc_db_iot = sc->sc_cfg_iot;
708 sc->sc_csr_ioh = sc->sc_db_ioh = sc->sc_cfg_ioh;
709 }
710
711 return (0);
712
713fail_2:
714 bus_space_unmap(sc->sc_csr_iot, sc->sc_csr_ioh, sc->sc_csr_size);
715fail_1:
716 bus_space_unmap(sc->sc_cfg_iot, sc->sc_cfg_ioh, sc->sc_cfg_size);
717 return (ENXIO6);
718}
719
720static inline uint32_t
721oce_read_cfg(struct oce_softc *sc, bus_size_t off)
722{
723 bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
724 BUS_SPACE_BARRIER_READ0x01);
725 return (bus_space_read_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off)((sc->sc_cfg_iot)->read_4((sc->sc_cfg_ioh), (off))));
726}
727
728static inline uint32_t
729oce_read_csr(struct oce_softc *sc, bus_size_t off)
730{
731 bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
732 BUS_SPACE_BARRIER_READ0x01);
733 return (bus_space_read_4(sc->sc_csr_iot, sc->sc_csr_ioh, off)((sc->sc_csr_iot)->read_4((sc->sc_csr_ioh), (off))));
734}
735
736static inline uint32_t
737oce_read_db(struct oce_softc *sc, bus_size_t off)
738{
739 bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
740 BUS_SPACE_BARRIER_READ0x01);
741 return (bus_space_read_4(sc->sc_db_iot, sc->sc_db_ioh, off)((sc->sc_db_iot)->read_4((sc->sc_db_ioh), (off))));
742}
743
744static inline void
745oce_write_cfg(struct oce_softc *sc, bus_size_t off, uint32_t val)
746{
747 bus_space_write_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, val)((sc->sc_cfg_iot)->write_4((sc->sc_cfg_ioh), (off), (
val)))
;
748 bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
749 BUS_SPACE_BARRIER_WRITE0x02);
750}
751
752static inline void
753oce_write_csr(struct oce_softc *sc, bus_size_t off, uint32_t val)
754{
755 bus_space_write_4(sc->sc_csr_iot, sc->sc_csr_ioh, off, val)((sc->sc_csr_iot)->write_4((sc->sc_csr_ioh), (off), (
val)))
;
756 bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
757 BUS_SPACE_BARRIER_WRITE0x02);
758}
759
760static inline void
761oce_write_db(struct oce_softc *sc, bus_size_t off, uint32_t val)
762{
763 bus_space_write_4(sc->sc_db_iot, sc->sc_db_ioh, off, val)((sc->sc_db_iot)->write_4((sc->sc_db_ioh), (off), (val
)))
;
764 bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
765 BUS_SPACE_BARRIER_WRITE0x02);
766}
767
768static inline void
769oce_intr_enable(struct oce_softc *sc)
770{
771 uint32_t reg;
772
773 reg = oce_read_cfg(sc, PCI_INTR_CTRL0xfc);
774 oce_write_cfg(sc, PCI_INTR_CTRL0xfc, reg | HOSTINTR_MASK(1<<29));
775}
776
777static inline void
778oce_intr_disable(struct oce_softc *sc)
779{
780 uint32_t reg;
781
782 reg = oce_read_cfg(sc, PCI_INTR_CTRL0xfc);
783 oce_write_cfg(sc, PCI_INTR_CTRL0xfc, reg & ~HOSTINTR_MASK(1<<29));
784}
785
786void
787oce_attachhook(struct device *self)
788{
789 struct oce_softc *sc = (struct oce_softc *)self;
790
791 oce_get_link_status(sc);
792
793 oce_arm_cq(sc->sc_mq->cq, 0, TRUE1);
794
795 /*
796 * We need to get MCC async events. So enable intrs and arm
797 * first EQ, Other EQs will be armed after interface is UP
798 */
799 oce_intr_enable(sc);
800 oce_arm_eq(sc->sc_eq[0], 0, TRUE1, FALSE0);
801
802 /*
803 * Send first mcc cmd and after that we get gracious
804 * MCC notifications from FW
805 */
806 oce_first_mcc(sc);
807}
808
809void
810oce_attach_ifp(struct oce_softc *sc)
811{
812 struct ifnet *ifp = &sc->sc_ac.ac_if;
813
814 ifmedia_init(&sc->sc_media, IFM_IMASK0xff00000000000000ULL, oce_media_change,
815 oce_media_status);
816 ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0));
817 ifmedia_set(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL);
818
819 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ16);
820 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
821 ifp->if_ioctl = oce_ioctl;
822 ifp->if_start = oce_start;
823 ifp->if_watchdog = oce_watchdog;
824 ifp->if_hardmtu = OCE_MAX_MTU9000;
825 ifp->if_softc = sc;
826 ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_size - 1)((&ifp->if_snd)->ifq_maxlen = (sc->sc_tx_ring_size
- 1))
;
827
828 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010 | IFCAP_CSUM_IPv40x00000001 |
829 IFCAP_CSUM_TCPv40x00000002 | IFCAP_CSUM_UDPv40x00000004;
830
831#if NVLAN1 > 0
832 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020;
833#endif
834
835#ifdef OCE_TSO
836 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_TSO;
837 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTSO;
838#endif
839#ifdef OCE_LRO
840 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_LRO;
841#endif
842
843 if_attach(ifp);
844 ether_ifattach(ifp);
845}
846
847int
848oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
849{
850 struct oce_softc *sc = ifp->if_softc;
851 struct ifreq *ifr = (struct ifreq *)data;
852 int s, error = 0;
853
854 s = splnet()splraise(0x7);
855
856 switch (command) {
857 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
858 ifp->if_flags |= IFF_UP0x1;
859 if (!(ifp->if_flags & IFF_RUNNING0x40))
860 oce_init(sc);
861 break;
862 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
863 if (ifp->if_flags & IFF_UP0x1) {
864 if (ifp->if_flags & IFF_RUNNING0x40)
865 error = ENETRESET52;
866 else
867 oce_init(sc);
868 } else {
869 if (ifp->if_flags & IFF_RUNNING0x40)
870 oce_stop(sc);
871 }
872 break;
873 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
874 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
875 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
876 break;
877 case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((170)))
:
878 error = oce_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data);
879 break;
880 default:
881 error = ether_ioctl(ifp, &sc->sc_ac, command, data);
882 break;
883 }
884
885 if (error == ENETRESET52) {
886 if (ifp->if_flags & IFF_RUNNING0x40)
887 oce_iff(sc);
888 error = 0;
889 }
890
891 splx(s)spllower(s);
892
893 return (error);
894}
895
896int
897oce_rxrinfo(struct oce_softc *sc, struct if_rxrinfo *ifri)
898{
899 struct if_rxring_info *ifr, ifr1;
900 struct oce_rq *rq;
901 int error, i;
902 u_int n = 0;
903
904 if (sc->sc_nrq > 1) {
905 if ((ifr = mallocarray(sc->sc_nrq, sizeof(*ifr), M_DEVBUF2,
906 M_WAITOK0x0001 | M_ZERO0x0008)) == NULL((void *)0))
907 return (ENOMEM12);
908 } else
909 ifr = &ifr1;
910
911 OCE_RQ_FOREACH(sc, rq, i)for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq
= sc->sc_rq[i])
{
912 ifr[n].ifr_size = MCLBYTES(1 << 11);
913 snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "/%d", i);
914 ifr[n].ifr_info = rq->rxring;
915 n++;
916 }
917
918 error = if_rxr_info_ioctl(ifri, sc->sc_nrq, ifr);
919
920 if (sc->sc_nrq > 1)
921 free(ifr, M_DEVBUF2, sc->sc_nrq * sizeof(*ifr));
922 return (error);
923}
924
925
926void
927oce_iff(struct oce_softc *sc)
928{
929 uint8_t multi[OCE_MAX_MC_FILTER_SIZE32][ETHER_ADDR_LEN6];
930 struct arpcom *ac = &sc->sc_ac;
931 struct ifnet *ifp = &ac->ac_if;
932 struct ether_multi *enm;
933 struct ether_multistep step;
934 int naddr = 0, promisc = 0;
935
936 ifp->if_flags &= ~IFF_ALLMULTI0x200;
937
938 if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0 ||
939 ac->ac_multicnt >= OCE_MAX_MC_FILTER_SIZE32) {
940 ifp->if_flags |= IFF_ALLMULTI0x200;
941 promisc = 1;
942 } else {
943 ETHER_FIRST_MULTI(step, &sc->sc_ac, enm)do { (step).e_enm = ((&(&sc->sc_ac)->ac_multiaddrs
)->lh_first); do { if ((((enm)) = ((step)).e_enm) != ((void
*)0)) ((step)).e_enm = ((((enm)))->enm_list.le_next); } while
( 0); } while ( 0)
;
944 while (enm != NULL((void *)0)) {
945 memcpy(multi[naddr++], enm->enm_addrlo, ETHER_ADDR_LEN)__builtin_memcpy((multi[naddr++]), (enm->enm_addrlo), (6));
946 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
947 }
948 oce_update_mcast(sc, multi, naddr);
949 }
950
951 oce_set_promisc(sc, promisc);
952}
953
954void
955oce_link_status(struct oce_softc *sc)
956{
957 struct ifnet *ifp = &sc->sc_ac.ac_if;
958 int link_state = LINK_STATE_DOWN2;
959
960 ifp->if_baudrateif_data.ifi_baudrate = 0;
961 if (sc->sc_link_up) {
962 link_state = LINK_STATE_FULL_DUPLEX6;
963
964 switch (sc->sc_link_speed) {
965 case 1:
966 ifp->if_baudrateif_data.ifi_baudrate = IF_Mbps(10)((((10) * 1000ULL) * 1000ULL));
967 break;
968 case 2:
969 ifp->if_baudrateif_data.ifi_baudrate = IF_Mbps(100)((((100) * 1000ULL) * 1000ULL));
970 break;
971 case 3:
972 ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL)));
973 break;
974 case 4:
975 ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL)));
976 break;
977 }
978 }
979 if (ifp->if_link_stateif_data.ifi_link_state != link_state) {
980 ifp->if_link_stateif_data.ifi_link_state = link_state;
981 if_link_state_change(ifp);
982 }
983}
984
985void
986oce_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
987{
988 struct oce_softc *sc = ifp->if_softc;
989
990 ifmr->ifm_status = IFM_AVALID0x0000000000000001ULL;
991 ifmr->ifm_active = IFM_ETHER0x0000000000000100ULL;
992
993 if (oce_get_link_status(sc) == 0)
994 oce_link_status(sc);
995
996 if (!sc->sc_link_up) {
997 ifmr->ifm_active |= IFM_NONE2ULL;
998 return;
999 }
1000
1001 ifmr->ifm_status |= IFM_ACTIVE0x0000000000000002ULL;
1002
1003 switch (sc->sc_link_speed) {
1004 case 1: /* 10 Mbps */
1005 ifmr->ifm_active |= IFM_10_T3 | IFM_FDX0x0000010000000000ULL;
1006 break;
1007 case 2: /* 100 Mbps */
1008 ifmr->ifm_active |= IFM_100_TX6 | IFM_FDX0x0000010000000000ULL;
1009 break;
1010 case 3: /* 1 Gbps */
1011 ifmr->ifm_active |= IFM_1000_T16 | IFM_FDX0x0000010000000000ULL;
1012 break;
1013 case 4: /* 10 Gbps */
1014 ifmr->ifm_active |= IFM_10G_SR19 | IFM_FDX0x0000010000000000ULL;
1015 break;
1016 }
1017
1018 if (sc->sc_fc & IFM_ETH_RXPAUSE0x0000000000020000ULL)
1019 ifmr->ifm_active |= IFM_FLOW0x0000040000000000ULL | IFM_ETH_RXPAUSE0x0000000000020000ULL;
1020 if (sc->sc_fc & IFM_ETH_TXPAUSE0x0000000000040000ULL)
1021 ifmr->ifm_active |= IFM_FLOW0x0000040000000000ULL | IFM_ETH_TXPAUSE0x0000000000040000ULL;
1022}
1023
1024int
1025oce_media_change(struct ifnet *ifp)
1026{
1027 return (0);
1028}
1029
1030void
1031oce_tick(void *arg)
1032{
1033 struct oce_softc *sc = arg;
1034 int s;
1035
1036 s = splnet()splraise(0x7);
1037
1038 if (oce_update_stats(sc) == 0)
1039 timeout_add_sec(&sc->sc_tick, 1);
1040
1041 splx(s)spllower(s);
1042}
1043
1044void
1045oce_init(void *arg)
1046{
1047 struct oce_softc *sc = arg;
1048 struct ifnet *ifp = &sc->sc_ac.ac_if;
1049 struct oce_eq *eq;
1050 struct oce_rq *rq;
1051 struct oce_wq *wq;
1052 int i;
1053
1054 oce_stop(sc);
1055
1056 DELAY(10)(*delay_func)(10);
1057
1058 oce_macaddr_set(sc);
1059
1060 oce_iff(sc);
1061
1062 /* Enable VLAN promiscuous mode */
1063 if (oce_config_vlan(sc, NULL((void *)0), 0, 1, 1))
1064 goto error;
1065
1066 if (oce_set_flow_control(sc, IFM_ETH_RXPAUSE0x0000000000020000ULL | IFM_ETH_TXPAUSE0x0000000000040000ULL))
1067 goto error;
1068
1069 OCE_RQ_FOREACH(sc, rq, i)for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq
= sc->sc_rq[i])
{
1070 rq->mtu = ifp->if_hardmtu + ETHER_HDR_LEN((6 * 2) + 2) + ETHER_CRC_LEN4 +
1071 ETHER_VLAN_ENCAP_LEN4;
1072 if (oce_new_rq(sc, rq)) {
1073 printf("%s: failed to create rq\n",
1074 sc->sc_dev.dv_xname);
1075 goto error;
1076 }
1077 rq->ring->index = 0;
1078
1079 /* oce splits jumbos into 2k chunks... */
1080 if_rxr_init(&rq->rxring, 8, rq->nitems);
1081
1082 if (!oce_alloc_rx_bufs(rq)) {
1083 printf("%s: failed to allocate rx buffers\n",
1084 sc->sc_dev.dv_xname);
1085 goto error;
1086 }
1087 }
1088
1089#ifdef OCE_RSS
1090 /* RSS config */
1091 if (sc->sc_rss_enable) {
1092 if (oce_config_rss(sc, (uint8_t)sc->sc_if_id, 1)) {
1093 printf("%s: failed to configure RSS\n",
1094 sc->sc_dev.dv_xname);
1095 goto error;
1096 }
1097 }
1098#endif
1099
1100 OCE_RQ_FOREACH(sc, rq, i)for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq
= sc->sc_rq[i])
1101 oce_arm_cq(rq->cq, 0, TRUE1);
1102
1103 OCE_WQ_FOREACH(sc, wq, i)for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq
= sc->sc_wq[i])
1104 oce_arm_cq(wq->cq, 0, TRUE1);
1105
1106 oce_arm_cq(sc->sc_mq->cq, 0, TRUE1);
1107
1108 OCE_EQ_FOREACH(sc, eq, i)for (i = 0, eq = sc->sc_eq[0]; i < sc->sc_neq; i++, eq
= sc->sc_eq[i])
1109 oce_arm_eq(eq, 0, TRUE1, FALSE0);
1110
1111 if (oce_get_link_status(sc) == 0)
1112 oce_link_status(sc);
1113
1114 ifp->if_flags |= IFF_RUNNING0x40;
1115 ifq_clr_oactive(&ifp->if_snd);
1116
1117 timeout_add_sec(&sc->sc_tick, 1);
1118
1119 oce_intr_enable(sc);
1120
1121 return;
1122error:
1123 oce_stop(sc);
1124}
1125
1126void
1127oce_stop(struct oce_softc *sc)
1128{
1129 struct mbx_delete_nic_rq cmd;
1130 struct ifnet *ifp = &sc->sc_ac.ac_if;
1131 struct oce_rq *rq;
1132 struct oce_wq *wq;
1133 struct oce_eq *eq;
1134 int i;
1135
1136 timeout_del(&sc->sc_tick);
1137 timeout_del(&sc->sc_rxrefill);
1138
1139 ifp->if_flags &= ~IFF_RUNNING0x40;
1140 ifq_clr_oactive(&ifp->if_snd);
1141
1142 /* Stop intrs and finish any bottom halves pending */
1143 oce_intr_disable(sc);
1144
1145 /* Invalidate any pending cq and eq entries */
1146 OCE_EQ_FOREACH(sc, eq, i)for (i = 0, eq = sc->sc_eq[0]; i < sc->sc_neq; i++, eq
= sc->sc_eq[i])
1147 oce_drain_eq(eq);
1148 OCE_RQ_FOREACH(sc, rq, i)for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq
= sc->sc_rq[i])
{
1149 /* destroy the work queue in the firmware */
1150 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
1151 cmd.params.req.rq_id = htole16(rq->id)((__uint16_t)(rq->id));
1152 oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ,
1153 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
1154 DELAY(1000)(*delay_func)(1000);
1155 oce_drain_rq(rq);
1156 oce_free_posted_rxbuf(rq);
1157 }
1158 OCE_WQ_FOREACH(sc, wq, i)for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq
= sc->sc_wq[i])
1159 oce_drain_wq(wq);
1160}
1161
1162void
1163oce_watchdog(struct ifnet *ifp)
1164{
1165 printf("%s: watchdog timeout -- resetting\n", ifp->if_xname);
1166
1167 oce_init(ifp->if_softc);
1168
1169 ifp->if_oerrorsif_data.ifi_oerrors++;
1170}
1171
1172void
1173oce_start(struct ifnet *ifp)
1174{
1175 struct oce_softc *sc = ifp->if_softc;
1176 struct mbuf *m;
1177 int pkts = 0;
1178
1179 if (!(ifp->if_flags & IFF_RUNNING0x40) || ifq_is_oactive(&ifp->if_snd))
1180 return;
1181
1182 for (;;) {
1183 m = ifq_dequeue(&ifp->if_snd);
1184 if (m == NULL((void *)0))
1185 break;
1186
1187 if (oce_encap(sc, &m, 0)) {
1188 ifq_set_oactive(&ifp->if_snd);
1189 break;
1190 }
1191
1192#if NBPFILTER1 > 0
1193 if (ifp->if_bpf)
1194 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
1195#endif
1196 pkts++;
1197 }
1198
1199 /* Set a timeout in case the chip goes out to lunch */
1200 if (pkts)
1201 ifp->if_timer = 5;
1202}
1203
1204int
1205oce_encap(struct oce_softc *sc, struct mbuf **mpp, int wqidx)
1206{
1207 struct mbuf *m = *mpp;
1208 struct oce_wq *wq = sc->sc_wq[wqidx];
1209 struct oce_pkt *pkt = NULL((void *)0);
1210 struct oce_nic_hdr_wqe *nhe;
1211 struct oce_nic_frag_wqe *nfe;
1212 int i, nwqe, err;
1213
1214#ifdef OCE_TSO
1215 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & CSUM_TSO) {
1216 /* consolidate packet buffers for TSO/LSO segment offload */
1217 m = oce_tso(sc, mpp);
1218 if (m == NULL((void *)0))
1219 goto error;
1220 }
1221#endif
1222
1223 if ((pkt = oce_pkt_get(&wq->pkt_free)) == NULL((void *)0))
1224 goto error;
1225
1226 err = bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
pkt->map), (m), (0x0001))
;
1227 if (err == EFBIG27) {
1228 if (m_defrag(m, M_DONTWAIT0x0002) ||
1229 bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
pkt->map), (m), (0x0001))
1230 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
pkt->map), (m), (0x0001))
)
1231 goto error;
1232 *mpp = m;
1233 } else if (err != 0)
1234 goto error;
1235
1236 pkt->nsegs = pkt->map->dm_nsegs;
1237
1238 nwqe = pkt->nsegs + 1;
1239 if (IS_BE(sc)(((sc)->sc_flags) & (0x00000001 | 0x00000002))) {
1240 /* BE2 and BE3 require even number of WQEs */
1241 if (nwqe & 1)
1242 nwqe++;
1243 }
1244
1245 /* Fail if there's not enough free WQEs */
1246 if (nwqe >= wq->ring->nitems - wq->ring->nused) {
1247 bus_dmamap_unload(sc->sc_dmat, pkt->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkt
->map))
;
1248 goto error;
1249 }
1250
1251 bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x04))
1252 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x04))
;
1253 pkt->mbuf = m;
1254
1255 /* TX work queue entry for the header */
1256 nhe = oce_ring_get(wq->ring);
1257 memset(nhe, 0, sizeof(*nhe))__builtin_memset((nhe), (0), (sizeof(*nhe)));
1258
1259 nhe->u0.s.complete = 1;
1260 nhe->u0.s.event = 1;
1261 nhe->u0.s.crc = 1;
1262 nhe->u0.s.forward = 0;
1263 nhe->u0.s.ipcs = (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_IPV4_CSUM_OUT0x0001) ? 1 : 0;
1264 nhe->u0.s.udpcs = (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_UDP_CSUM_OUT0x0004) ? 1 : 0;
1265 nhe->u0.s.tcpcs = (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_TCP_CSUM_OUT0x0002) ? 1 : 0;
1266 nhe->u0.s.num_wqe = nwqe;
1267 nhe->u0.s.total_length = m->m_pkthdrM_dat.MH.MH_pkthdr.len;
1268
1269#if NVLAN1 > 0
1270 if (m->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020) {
1271 nhe->u0.s.vlan = 1; /* Vlan present */
1272 nhe->u0.s.vlan_tag = m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag;
1273 }
1274#endif
1275
1276#ifdef OCE_TSO
1277 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & CSUM_TSO) {
1278 if (m->m_pkthdrM_dat.MH.MH_pkthdr.tso_segsz) {
1279 nhe->u0.s.lso = 1;
1280 nhe->u0.s.lso_mss = m->m_pkthdrM_dat.MH.MH_pkthdr.tso_segsz;
1281 }
1282 if (!IS_BE(sc)(((sc)->sc_flags) & (0x00000001 | 0x00000002)))
1283 nhe->u0.s.ipcs = 1;
1284 }
1285#endif
1286
1287 oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_PREREAD |(*((&wq->ring->dma)->tag)->_dmamap_sync)(((&
wq->ring->dma)->tag), ((&wq->ring->dma)->
map), (0), ((&wq->ring->dma)->map->dm_mapsize
), (0x01 | 0x04))
1288 BUS_DMASYNC_PREWRITE)(*((&wq->ring->dma)->tag)->_dmamap_sync)(((&
wq->ring->dma)->tag), ((&wq->ring->dma)->
map), (0), ((&wq->ring->dma)->map->dm_mapsize
), (0x01 | 0x04))
;
1289
1290 wq->ring->nused++;
1291
1292 /* TX work queue entries for data chunks */
1293 for (i = 0; i < pkt->nsegs; i++) {
1294 nfe = oce_ring_get(wq->ring);
1295 memset(nfe, 0, sizeof(*nfe))__builtin_memset((nfe), (0), (sizeof(*nfe)));
1296 nfe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[i].ds_addr)((uint32_t)((uint64_t)(pkt->map->dm_segs[i].ds_addr) >>
32))
;
1297 nfe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[i].ds_addr)((uint32_t)((uint64_t)(pkt->map->dm_segs[i].ds_addr) &
0xffffffff))
;
1298 nfe->u0.s.frag_len = pkt->map->dm_segs[i].ds_len;
1299 wq->ring->nused++;
1300 }
1301 if (nwqe > (pkt->nsegs + 1)) {
1302 nfe = oce_ring_get(wq->ring);
1303 memset(nfe, 0, sizeof(*nfe))__builtin_memset((nfe), (0), (sizeof(*nfe)));
1304 wq->ring->nused++;
1305 pkt->nsegs++;
1306 }
1307
1308 oce_pkt_put(&wq->pkt_list, pkt);
1309
1310 oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_POSTREAD |(*((&wq->ring->dma)->tag)->_dmamap_sync)(((&
wq->ring->dma)->tag), ((&wq->ring->dma)->
map), (0), ((&wq->ring->dma)->map->dm_mapsize
), (0x02 | 0x08))
1311 BUS_DMASYNC_POSTWRITE)(*((&wq->ring->dma)->tag)->_dmamap_sync)(((&
wq->ring->dma)->tag), ((&wq->ring->dma)->
map), (0), ((&wq->ring->dma)->map->dm_mapsize
), (0x02 | 0x08))
;
1312
1313 oce_write_db(sc, PD_TXULP_DB0x0060, wq->id | (nwqe << 16));
1314
1315 return (0);
1316
1317error:
1318 if (pkt)
1319 oce_pkt_put(&wq->pkt_free, pkt);
1320 m_freem(*mpp);
1321 *mpp = NULL((void *)0);
1322 return (1);
1323}
1324
1325#ifdef OCE_TSO
1326struct mbuf *
1327oce_tso(struct oce_softc *sc, struct mbuf **mpp)
1328{
1329 struct mbuf *m;
1330 struct ip *ip;
1331#ifdef INET61
1332 struct ip6_hdr *ip6;
1333#endif
1334 struct ether_vlan_header *eh;
1335 struct tcphdr *th;
1336 uint16_t etype;
1337 int total_len = 0, ehdrlen = 0;
1338
1339 m = *mpp;
1340
1341 if (M_WRITABLE(m) == 0) {
1342 m = m_dup(*mpp, M_DONTWAIT0x0002);
1343 if (!m)
1344 return (NULL((void *)0));
1345 m_freem(*mpp);
1346 *mpp = m;
1347 }
1348
1349 eh = mtod(m, struct ether_vlan_header *)((struct ether_vlan_header *)((m)->m_hdr.mh_data));
1350 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)(__uint16_t)(__builtin_constant_p(0x8100) ? (__uint16_t)(((__uint16_t
)(0x8100) & 0xffU) << 8 | ((__uint16_t)(0x8100) &
0xff00U) >> 8) : __swap16md(0x8100))
) {
1351 etype = ntohs(eh->evl_proto)(__uint16_t)(__builtin_constant_p(eh->evl_proto) ? (__uint16_t
)(((__uint16_t)(eh->evl_proto) & 0xffU) << 8 | (
(__uint16_t)(eh->evl_proto) & 0xff00U) >> 8) : __swap16md
(eh->evl_proto))
;
1352 ehdrlen = ETHER_HDR_LEN((6 * 2) + 2) + ETHER_VLAN_ENCAP_LEN4;
1353 } else {
1354 etype = ntohs(eh->evl_encap_proto)(__uint16_t)(__builtin_constant_p(eh->evl_encap_proto) ? (
__uint16_t)(((__uint16_t)(eh->evl_encap_proto) & 0xffU
) << 8 | ((__uint16_t)(eh->evl_encap_proto) & 0xff00U
) >> 8) : __swap16md(eh->evl_encap_proto))
;
1355 ehdrlen = ETHER_HDR_LEN((6 * 2) + 2);
1356 }
1357
1358 switch (etype) {
1359 case ETHERTYPE_IP0x0800:
1360 ip = (struct ip *)(m->m_datam_hdr.mh_data + ehdrlen);
1361 if (ip->ip_p != IPPROTO_TCP6)
1362 return (NULL((void *)0));
1363 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1364
1365 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1366 break;
1367#ifdef INET61
1368 case ETHERTYPE_IPV60x86DD:
1369 ip6 = (struct ip6_hdr *)(m->m_datam_hdr.mh_data + ehdrlen);
1370 if (ip6->ip6_nxtip6_ctlun.ip6_un1.ip6_un1_nxt != IPPROTO_TCP6)
1371 return NULL((void *)0);
1372 th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1373
1374 total_len = ehdrlen + sizeof(struct ip6_hdr) +
1375 (th->th_off << 2);
1376 break;
1377#endif
1378 default:
1379 return (NULL((void *)0));
1380 }
1381
1382 m = m_pullup(m, total_len);
1383 if (!m)
1384 return (NULL((void *)0));
1385 *mpp = m;
1386 return (m);
1387
1388}
1389#endif /* OCE_TSO */
1390
1391int
1392oce_intr(void *arg)
1393{
1394 struct oce_softc *sc = arg;
1395 struct oce_eq *eq = sc->sc_eq[0];
1396 struct oce_eqe *eqe;
1397 struct oce_cq *cq = NULL((void *)0);
1398 int i, neqe = 0;
1399
1400 oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD)(*((&eq->ring->dma)->tag)->_dmamap_sync)(((&
eq->ring->dma)->tag), ((&eq->ring->dma)->
map), (0), ((&eq->ring->dma)->map->dm_mapsize
), (0x02))
;
1401
1402 OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0)for ((eqe) = oce_ring_first(eq->ring); eqe->evnt != 0; (
eqe) = oce_ring_next(eq->ring))
{
1403 eqe->evnt = 0;
1404 neqe++;
1405 }
1406
1407 /* Spurious? */
1408 if (!neqe) {
1409 oce_arm_eq(eq, 0, TRUE1, FALSE0);
1410 return (0);
1411 }
1412
1413 oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE)(*((&eq->ring->dma)->tag)->_dmamap_sync)(((&
eq->ring->dma)->tag), ((&eq->ring->dma)->
map), (0), ((&eq->ring->dma)->map->dm_mapsize
), (0x04))
;
1414
1415 /* Clear EQ entries, but dont arm */
1416 oce_arm_eq(eq, neqe, FALSE0, TRUE1);
1417
1418 /* Process TX, RX and MCC completion queues */
1419 for (i = 0; i < eq->cq_valid; i++) {
1420 cq = eq->cq[i];
1421 (*cq->cq_intr)(cq->cb_arg);
1422 oce_arm_cq(cq, 0, TRUE1);
1423 }
1424
1425 oce_arm_eq(eq, 0, TRUE1, FALSE0);
1426 return (1);
1427}
1428
1429/* Handle the Completion Queue for transmit */
1430void
1431oce_intr_wq(void *arg)
1432{
1433 struct oce_wq *wq = (struct oce_wq *)arg;
1434 struct oce_cq *cq = wq->cq;
1435 struct oce_nic_tx_cqe *cqe;
1436 struct oce_softc *sc = wq->sc;
1437 struct ifnet *ifp = &sc->sc_ac.ac_if;
1438 int ncqe = 0;
1439
1440 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x02))
;
1441 OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe))for ((cqe) = oce_ring_first(cq->ring); ((cqe)->u0.dw[3]
); (cqe) = oce_ring_next(cq->ring))
{
1442 oce_txeof(wq);
1443 WQ_CQE_INVALIDATE(cqe)((cqe)->u0.dw[3] = 0);
1444 ncqe++;
1445 }
1446 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x04))
;
1447
1448 if (ifq_is_oactive(&ifp->if_snd)) {
1449 if (wq->ring->nused < (wq->ring->nitems / 2)) {
1450 ifq_clr_oactive(&ifp->if_snd);
1451 oce_start(ifp);
1452 }
1453 }
1454 if (wq->ring->nused == 0)
1455 ifp->if_timer = 0;
1456
1457 if (ncqe)
1458 oce_arm_cq(cq, ncqe, FALSE0);
1459}
1460
1461void
1462oce_txeof(struct oce_wq *wq)
1463{
1464 struct oce_softc *sc = wq->sc;
1465 struct oce_pkt *pkt;
1466 struct mbuf *m;
1467
1468 if ((pkt = oce_pkt_get(&wq->pkt_list)) == NULL((void *)0)) {
1469 printf("%s: missing descriptor in txeof\n",
1470 sc->sc_dev.dv_xname);
1471 return;
1472 }
1473
1474 wq->ring->nused -= pkt->nsegs + 1;
1475 bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x08))
1476 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x08))
;
1477 bus_dmamap_unload(sc->sc_dmat, pkt->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkt
->map))
;
1478
1479 m = pkt->mbuf;
1480 m_freem(m);
1481 pkt->mbuf = NULL((void *)0);
1482 oce_pkt_put(&wq->pkt_free, pkt);
1483}
1484
1485/* Handle the Completion Queue for receive */
1486void
1487oce_intr_rq(void *arg)
1488{
1489 struct oce_rq *rq = (struct oce_rq *)arg;
1490 struct oce_cq *cq = rq->cq;
1491 struct oce_softc *sc = rq->sc;
1492 struct oce_nic_rx_cqe *cqe;
1493 struct ifnet *ifp = &sc->sc_ac.ac_if;
1494 int maxrx, ncqe = 0;
1495
1496 maxrx = IS_XE201(sc)(((sc)->sc_flags) & (0x00000008)) ? 8 : OCE_MAX_RQ_COMPL64;
1497
1498 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x02))
;
1499
1500 OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe) && ncqe <= maxrx)for ((cqe) = oce_ring_first(cq->ring); ((cqe)->u0.dw[2]
) && ncqe <= maxrx; (cqe) = oce_ring_next(cq->ring
))
{
1501 if (cqe->u0.s.error == 0) {
1502 if (cqe->u0.s.pkt_size == 0)
1503 /* partial DMA workaround for Lancer */
1504 oce_rxeoc(rq, cqe);
1505 else
1506 oce_rxeof(rq, cqe);
1507 } else {
1508 ifp->if_ierrorsif_data.ifi_ierrors++;
1509 if (IS_XE201(sc)(((sc)->sc_flags) & (0x00000008)))
1510 /* Lancer A0 no buffer workaround */
1511 oce_rxeoc(rq, cqe);
1512 else
1513 /* Post L3/L4 errors to stack.*/
1514 oce_rxeof(rq, cqe);
1515 }
1516#ifdef OCE_LRO
1517 if (IF_LRO_ENABLED(ifp)(((ifp)->if_data.ifi_capabilities) & (IFCAP_LRO)) && rq->lro_pkts_queued >= 16)
1518 oce_flush_lro(rq);
1519#endif
1520 RQ_CQE_INVALIDATE(cqe)((cqe)->u0.dw[2] = 0);
1521 ncqe++;
1522 }
1523
1524 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x04))
;
1525
1526#ifdef OCE_LRO
1527 if (IF_LRO_ENABLED(ifp)(((ifp)->if_data.ifi_capabilities) & (IFCAP_LRO)))
1528 oce_flush_lro(rq);
1529#endif
1530
1531 if (ncqe) {
1532 oce_arm_cq(cq, ncqe, FALSE0);
1533 if (!oce_alloc_rx_bufs(rq))
1534 timeout_add(&sc->sc_rxrefill, 1);
1535 }
1536}
1537
1538void
1539oce_rxeof(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1540{
1541 struct oce_softc *sc = rq->sc;
1542 struct oce_pkt *pkt = NULL((void *)0);
1543 struct ifnet *ifp = &sc->sc_ac.ac_if;
1544 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
1545 struct mbuf *m = NULL((void *)0), *tail = NULL((void *)0);
1546 int i, len, frag_len;
1547 uint16_t vtag;
1548
1549 len = cqe->u0.s.pkt_size;
1550
1551 /* Get vlan_tag value */
1552 if (IS_BE(sc)(((sc)->sc_flags) & (0x00000001 | 0x00000002)))
1553 vtag = ntohs(cqe->u0.s.vlan_tag)(__uint16_t)(__builtin_constant_p(cqe->u0.s.vlan_tag) ? (__uint16_t
)(((__uint16_t)(cqe->u0.s.vlan_tag) & 0xffU) << 8
| ((__uint16_t)(cqe->u0.s.vlan_tag) & 0xff00U) >>
8) : __swap16md(cqe->u0.s.vlan_tag))
;
1554 else
1555 vtag = cqe->u0.s.vlan_tag;
1556
1557 for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1558 if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL((void *)0)) {
1559 printf("%s: missing descriptor in rxeof\n",
1560 sc->sc_dev.dv_xname);
1561 goto exit;
1562 }
1563
1564 bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x02))
1565 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x02))
;
1566 bus_dmamap_unload(sc->sc_dmat, pkt->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkt
->map))
;
1567 if_rxr_put(&rq->rxring, 1)do { (&rq->rxring)->rxr_alive -= (1); } while (0);
1568
1569 frag_len = (len > rq->fragsize) ? rq->fragsize : len;
1570 pkt->mbuf->m_lenm_hdr.mh_len = frag_len;
1571
1572 if (tail != NULL((void *)0)) {
1573 /* additional fragments */
1574 pkt->mbuf->m_flagsm_hdr.mh_flags &= ~M_PKTHDR0x0002;
1575 tail->m_nextm_hdr.mh_next = pkt->mbuf;
1576 tail = pkt->mbuf;
1577 } else {
1578 /* first fragment, fill out most of the header */
1579 pkt->mbuf->m_pkthdrM_dat.MH.MH_pkthdr.len = len;
1580 pkt->mbuf->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags = 0;
1581 if (cqe->u0.s.ip_cksum_pass) {
1582 if (!cqe->u0.s.ip_ver) { /* IPV4 */
1583 pkt->mbuf->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags =
1584 M_IPV4_CSUM_IN_OK0x0008;
1585 }
1586 }
1587 if (cqe->u0.s.l4_cksum_pass) {
1588 pkt->mbuf->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |=
1589 M_TCP_CSUM_IN_OK0x0020 | M_UDP_CSUM_IN_OK0x0080;
1590 }
1591 m = tail = pkt->mbuf;
1592 }
1593 pkt->mbuf = NULL((void *)0);
1594 oce_pkt_put(&rq->pkt_free, pkt);
1595 len -= frag_len;
1596 }
1597
1598 if (m) {
1599 if (!oce_port_valid(sc, cqe)) {
1600 m_freem(m);
1601 goto exit;
1602 }
1603
1604#if NVLAN1 > 0
1605 /* This determines if vlan tag is valid */
1606 if (oce_vtp_valid(sc, cqe)) {
1607 if (sc->sc_fmode & FNM_FLEX10_MODE) {
1608 /* FLEX10. If QnQ is not set, neglect VLAN */
1609 if (cqe->u0.s.qnq) {
1610 m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag = vtag;
1611 m->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020;
1612 }
1613 } else if (sc->sc_pvid != (vtag & VLAN_VID_MASK0x0FFF)) {
1614 /*
1615 * In UMC mode generally pvid will be striped.
1616 * But in some cases we have seen it comes
1617 * with pvid. So if pvid == vlan, neglect vlan.
1618 */
1619 m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag = vtag;
1620 m->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020;
1621 }
1622 }
1623#endif
1624
1625#ifdef OCE_LRO
1626 /* Try to queue to LRO */
1627 if (IF_LRO_ENABLED(ifp)(((ifp)->if_data.ifi_capabilities) & (IFCAP_LRO)) && !(m->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020) &&
1628 cqe->u0.s.ip_cksum_pass && cqe->u0.s.l4_cksum_pass &&
1629 !cqe->u0.s.ip_ver && rq->lro.lro_cnt != 0) {
1630
1631 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1632 rq->lro_pkts_queued ++;
1633 goto exit;
1634 }
1635 /* If LRO posting fails then try to post to STACK */
1636 }
1637#endif
1638
1639 ml_enqueue(&ml, m);
1640 }
1641exit:
1642 if (ifiq_input(&ifp->if_rcv, &ml))
1643 if_rxr_livelocked(&rq->rxring);
1644}
1645
1646void
1647oce_rxeoc(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1648{
1649 struct oce_softc *sc = rq->sc;
1650 struct oce_pkt *pkt;
1651 int i, num_frags = cqe->u0.s.num_fragments;
1652
1653 if (IS_XE201(sc)(((sc)->sc_flags) & (0x00000008)) && cqe->u0.s.error) {
1654 /*
1655 * Lancer A0 workaround:
1656 * num_frags will be 1 more than actual in case of error
1657 */
1658 if (num_frags)
1659 num_frags--;
1660 }
1661 for (i = 0; i < num_frags; i++) {
1662 if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL((void *)0)) {
1663 printf("%s: missing descriptor in rxeoc\n",
1664 sc->sc_dev.dv_xname);
1665 return;
1666 }
1667 bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x02))
1668 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x02))
;
1669 bus_dmamap_unload(sc->sc_dmat, pkt->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkt
->map))
;
1670 if_rxr_put(&rq->rxring, 1)do { (&rq->rxring)->rxr_alive -= (1); } while (0);
1671 m_freem(pkt->mbuf);
1672 oce_pkt_put(&rq->pkt_free, pkt);
1673 }
1674}
1675
1676int
1677oce_vtp_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1678{
1679 struct oce_nic_rx_cqe_v1 *cqe_v1;
1680
1681 if (IS_BE(sc)(((sc)->sc_flags) & (0x00000001 | 0x00000002)) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)((sc->sc_flags) & (0x00000100))) {
1682 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1683 return (cqe_v1->u0.s.vlan_tag_present);
1684 }
1685 return (cqe->u0.s.vlan_tag_present);
1686}
1687
1688int
1689oce_port_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1690{
1691 struct oce_nic_rx_cqe_v1 *cqe_v1;
1692
1693 if (IS_BE(sc)(((sc)->sc_flags) & (0x00000001 | 0x00000002)) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)((sc->sc_flags) & (0x00000100))) {
1694 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1695 if (sc->sc_port != cqe_v1->u0.s.port)
1696 return (0);
1697 }
1698 return (1);
1699}
1700
1701#ifdef OCE_LRO
1702void
1703oce_flush_lro(struct oce_rq *rq)
1704{
1705 struct oce_softc *sc = rq->sc;
1706 struct ifnet *ifp = &sc->sc_ac.ac_if;
1707 struct lro_ctrl *lro = &rq->lro;
1708 struct lro_entry *queued;
1709
1710 if (!IF_LRO_ENABLED(ifp)(((ifp)->if_data.ifi_capabilities) & (IFCAP_LRO)))
1711 return;
1712
1713 while ((queued = SLIST_FIRST(&lro->lro_active)((&lro->lro_active)->slh_first)) != NULL((void *)0)) {
1714 SLIST_REMOVE_HEAD(&lro->lro_active, next)do { (&lro->lro_active)->slh_first = (&lro->
lro_active)->slh_first->next.sle_next; } while (0)
;
1715 tcp_lro_flush(lro, queued);
1716 }
1717 rq->lro_pkts_queued = 0;
1718}
1719
1720int
1721oce_init_lro(struct oce_softc *sc)
1722{
1723 struct lro_ctrl *lro = NULL((void *)0);
1724 int i = 0, rc = 0;
1725
1726 for (i = 0; i < sc->sc_nrq; i++) {
1727 lro = &sc->sc_rq[i]->lro;
1728 rc = tcp_lro_init(lro);
1729 if (rc != 0) {
1730 printf("%s: LRO init failed\n",
1731 sc->sc_dev.dv_xname);
1732 return rc;
1733 }
1734 lro->ifp = &sc->sc_ac.ac_if;
1735 }
1736
1737 return (rc);
1738}
1739
1740void
1741oce_free_lro(struct oce_softc *sc)
1742{
1743 struct lro_ctrl *lro = NULL((void *)0);
1744 int i = 0;
1745
1746 for (i = 0; i < sc->sc_nrq; i++) {
1747 lro = &sc->sc_rq[i]->lro;
1748 if (lro)
1749 tcp_lro_free(lro);
1750 }
1751}
1752#endif /* OCE_LRO */
1753
1754int
1755oce_get_buf(struct oce_rq *rq)
1756{
1757 struct oce_softc *sc = rq->sc;
1758 struct oce_pkt *pkt;
1759 struct oce_nic_rqe *rqe;
1760
1761 if ((pkt = oce_pkt_get(&rq->pkt_free)) == NULL((void *)0))
1762 return (0);
1763
1764 pkt->mbuf = MCLGETL(NULL, M_DONTWAIT, MCLBYTES)m_clget((((void *)0)), (0x0002), ((1 << 11)));
1765 if (pkt->mbuf == NULL((void *)0)) {
1766 oce_pkt_put(&rq->pkt_free, pkt);
1767 return (0);
1768 }
1769
1770 pkt->mbuf->m_lenm_hdr.mh_len = pkt->mbuf->m_pkthdrM_dat.MH.MH_pkthdr.len = MCLBYTES(1 << 11);
1771#ifdef __STRICT_ALIGNMENT
1772 m_adj(pkt->mbuf, ETHER_ALIGN2);
1773#endif
1774
1775 if (bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, pkt->mbuf,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
pkt->map), (pkt->mbuf), (0x0001))
1776 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
pkt->map), (pkt->mbuf), (0x0001))
) {
1777 m_freem(pkt->mbuf);
1778 pkt->mbuf = NULL((void *)0);
1779 oce_pkt_put(&rq->pkt_free, pkt);
1780 return (0);
1781 }
1782
1783 bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x01))
1784 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x01))
;
1785
1786 oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_PREREAD |(*((&rq->ring->dma)->tag)->_dmamap_sync)(((&
rq->ring->dma)->tag), ((&rq->ring->dma)->
map), (0), ((&rq->ring->dma)->map->dm_mapsize
), (0x01 | 0x04))
1787 BUS_DMASYNC_PREWRITE)(*((&rq->ring->dma)->tag)->_dmamap_sync)(((&
rq->ring->dma)->tag), ((&rq->ring->dma)->
map), (0), ((&rq->ring->dma)->map->dm_mapsize
), (0x01 | 0x04))
;
1788
1789 rqe = oce_ring_get(rq->ring);
1790 rqe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[0].ds_addr)((uint32_t)((uint64_t)(pkt->map->dm_segs[0].ds_addr) >>
32))
;
1791 rqe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[0].ds_addr)((uint32_t)((uint64_t)(pkt->map->dm_segs[0].ds_addr) &
0xffffffff))
;
1792
1793 oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_POSTREAD |(*((&rq->ring->dma)->tag)->_dmamap_sync)(((&
rq->ring->dma)->tag), ((&rq->ring->dma)->
map), (0), ((&rq->ring->dma)->map->dm_mapsize
), (0x02 | 0x08))
1794 BUS_DMASYNC_POSTWRITE)(*((&rq->ring->dma)->tag)->_dmamap_sync)(((&
rq->ring->dma)->tag), ((&rq->ring->dma)->
map), (0), ((&rq->ring->dma)->map->dm_mapsize
), (0x02 | 0x08))
;
1795
1796 oce_pkt_put(&rq->pkt_list, pkt);
1797
1798 return (1);
1799}
1800
1801int
1802oce_alloc_rx_bufs(struct oce_rq *rq)
1803{
1804 struct oce_softc *sc = rq->sc;
1805 int i, nbufs = 0;
1806 u_int slots;
1807
1808 for (slots = if_rxr_get(&rq->rxring, rq->nitems); slots > 0; slots--) {
1809 if (oce_get_buf(rq) == 0)
1810 break;
1811
1812 nbufs++;
1813 }
1814 if_rxr_put(&rq->rxring, slots)do { (&rq->rxring)->rxr_alive -= (slots); } while (
0)
;
1815
1816 if (!nbufs)
1817 return (0);
1818 for (i = nbufs / OCE_MAX_RQ_POSTS255; i > 0; i--) {
1819 oce_write_db(sc, PD_RXULP_DB0x0100, rq->id |
1820 (OCE_MAX_RQ_POSTS255 << 24));
1821 nbufs -= OCE_MAX_RQ_POSTS255;
1822 }
1823 if (nbufs > 0)
1824 oce_write_db(sc, PD_RXULP_DB0x0100, rq->id | (nbufs << 24));
1825 return (1);
1826}
1827
1828void
1829oce_refill_rx(void *arg)
1830{
1831 struct oce_softc *sc = arg;
1832 struct oce_rq *rq;
1833 int i, s;
1834
1835 s = splnet()splraise(0x7);
1836 OCE_RQ_FOREACH(sc, rq, i)for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq
= sc->sc_rq[i])
{
1837 if (!oce_alloc_rx_bufs(rq))
1838 timeout_add(&sc->sc_rxrefill, 5);
1839 }
1840 splx(s)spllower(s);
1841}
1842
1843/* Handle the Completion Queue for the Mailbox/Async notifications */
1844void
1845oce_intr_mq(void *arg)
1846{
1847 struct oce_mq *mq = (struct oce_mq *)arg;
1848 struct oce_softc *sc = mq->sc;
1849 struct oce_cq *cq = mq->cq;
1850 struct oce_mq_cqe *cqe;
1851 struct oce_async_cqe_link_state *acqe;
1852 struct oce_async_event_grp5_pvid_state *gcqe;
1853 int evtype, optype, ncqe = 0;
1854
1855 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x02))
;
1856
1857 OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe))for ((cqe) = oce_ring_first(cq->ring); ((cqe)->u0.dw[3]
); (cqe) = oce_ring_next(cq->ring))
{
1858 if (cqe->u0.s.async_event) {
1859 evtype = cqe->u0.s.event_type;
1860 optype = cqe->u0.s.async_type;
1861 if (evtype == ASYNC_EVENT_CODE_LINK_STATE0x1) {
1862 /* Link status evt */
1863 acqe = (struct oce_async_cqe_link_state *)cqe;
1864 oce_link_event(sc, acqe);
1865 } else if ((evtype == ASYNC_EVENT_GRP50x5) &&
1866 (optype == ASYNC_EVENT_PVID_STATE0x3)) {
1867 /* GRP5 PVID */
1868 gcqe =
1869 (struct oce_async_event_grp5_pvid_state *)cqe;
1870 if (gcqe->enabled)
1871 sc->sc_pvid =
1872 gcqe->tag & VLAN_VID_MASK0x0FFF;
1873 else
1874 sc->sc_pvid = 0;
1875 }
1876 }
1877 MQ_CQE_INVALIDATE(cqe)((cqe)->u0.dw[3] = 0);
1878 ncqe++;
1879 }
1880
1881 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x04))
;
1882
1883 if (ncqe)
1884 oce_arm_cq(cq, ncqe, FALSE0);
1885}
1886
1887void
1888oce_link_event(struct oce_softc *sc, struct oce_async_cqe_link_state *acqe)
1889{
1890 /* Update Link status */
1891 sc->sc_link_up = ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL0x02) ==
1892 ASYNC_EVENT_LINK_UP0x1);
1893 /* Update speed */
1894 sc->sc_link_speed = acqe->u0.s.speed;
1895 oce_link_status(sc);
1896}
1897
1898int
1899oce_init_queues(struct oce_softc *sc)
1900{
1901 struct oce_wq *wq;
1902 struct oce_rq *rq;
1903 int i;
1904
1905 sc->sc_nrq = 1;
1906 sc->sc_nwq = 1;
1907
1908 /* Create network interface on card */
1909 if (oce_create_iface(sc, sc->sc_macaddr))
1910 goto error;
1911
1912 /* create all of the event queues */
1913 for (i = 0; i < sc->sc_nintr; i++) {
1914 sc->sc_eq[i] = oce_create_eq(sc);
1915 if (!sc->sc_eq[i])
1916 goto error;
1917 }
1918
1919 /* alloc tx queues */
1920 OCE_WQ_FOREACH(sc, wq, i)for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq
= sc->sc_wq[i])
{
1921 sc->sc_wq[i] = oce_create_wq(sc, sc->sc_eq[i]);
1922 if (!sc->sc_wq[i])
1923 goto error;
1924 }
1925
1926 /* alloc rx queues */
1927 OCE_RQ_FOREACH(sc, rq, i)for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq
= sc->sc_rq[i])
{
1928 sc->sc_rq[i] = oce_create_rq(sc, sc->sc_eq[i > 0 ? i - 1 : 0],
1929 i > 0 ? sc->sc_rss_enable : 0);
1930 if (!sc->sc_rq[i])
1931 goto error;
1932 }
1933
1934 /* alloc mailbox queue */
1935 sc->sc_mq = oce_create_mq(sc, sc->sc_eq[0]);
1936 if (!sc->sc_mq)
1937 goto error;
1938
1939 return (0);
1940error:
1941 oce_release_queues(sc);
1942 return (1);
1943}
1944
1945void
1946oce_release_queues(struct oce_softc *sc)
1947{
1948 struct oce_wq *wq;
1949 struct oce_rq *rq;
1950 struct oce_eq *eq;
1951 int i;
1952
1953 OCE_RQ_FOREACH(sc, rq, i)for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq
= sc->sc_rq[i])
{
1954 if (rq)
1955 oce_destroy_rq(sc->sc_rq[i]);
1956 }
1957
1958 OCE_WQ_FOREACH(sc, wq, i)for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq
= sc->sc_wq[i])
{
1959 if (wq)
1960 oce_destroy_wq(sc->sc_wq[i]);
1961 }
1962
1963 if (sc->sc_mq)
1964 oce_destroy_mq(sc->sc_mq);
1965
1966 OCE_EQ_FOREACH(sc, eq, i)for (i = 0, eq = sc->sc_eq[0]; i < sc->sc_neq; i++, eq
= sc->sc_eq[i])
{
1967 if (eq)
1968 oce_destroy_eq(sc->sc_eq[i]);
1969 }
1970}
1971
1972/**
1973 * @brief Function to create a WQ for NIC Tx
1974 * @param sc software handle to the device
1975 * @returns the pointer to the WQ created or NULL on failure
1976 */
1977struct oce_wq *
1978oce_create_wq(struct oce_softc *sc, struct oce_eq *eq)
1979{
1980 struct oce_wq *wq;
1981 struct oce_cq *cq;
1982 struct oce_pkt *pkt;
1983 int i;
1984
1985 if (sc->sc_tx_ring_size < 256 || sc->sc_tx_ring_size > 2048)
1986 return (NULL((void *)0));
1987
1988 wq = malloc(sizeof(struct oce_wq), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
1989 if (!wq)
1990 return (NULL((void *)0));
1991
1992 wq->ring = oce_create_ring(sc, sc->sc_tx_ring_size, NIC_WQE_SIZE16, 8);
1993 if (!wq->ring) {
1994 free(wq, M_DEVBUF2, 0);
1995 return (NULL((void *)0));
1996 }
1997
1998 cq = oce_create_cq(sc, eq, CQ_LEN_512, sizeof(struct oce_nic_tx_cqe),
1999 1, 0, 3);
2000 if (!cq) {
2001 oce_destroy_ring(sc, wq->ring);
2002 free(wq, M_DEVBUF2, 0);
2003 return (NULL((void *)0));
2004 }
2005
2006 wq->id = -1;
2007 wq->sc = sc;
2008
2009 wq->cq = cq;
2010 wq->nitems = sc->sc_tx_ring_size;
2011
2012 SIMPLEQ_INIT(&wq->pkt_free)do { (&wq->pkt_free)->sqh_first = ((void *)0); (&
wq->pkt_free)->sqh_last = &(&wq->pkt_free)->
sqh_first; } while (0)
;
2013 SIMPLEQ_INIT(&wq->pkt_list)do { (&wq->pkt_list)->sqh_first = ((void *)0); (&
wq->pkt_list)->sqh_last = &(&wq->pkt_list)->
sqh_first; } while (0)
;
2014
2015 for (i = 0; i < sc->sc_tx_ring_size / 2; i++) {
2016 pkt = oce_pkt_alloc(sc, OCE_MAX_TX_SIZE65535, OCE_MAX_TX_ELEMENTS29,
2017 PAGE_SIZE(1 << 12));
2018 if (pkt == NULL((void *)0)) {
2019 oce_destroy_wq(wq);
2020 return (NULL((void *)0));
2021 }
2022 oce_pkt_put(&wq->pkt_free, pkt);
2023 }
2024
2025 if (oce_new_wq(sc, wq)) {
2026 oce_destroy_wq(wq);
2027 return (NULL((void *)0));
2028 }
2029
2030 eq->cq[eq->cq_valid] = cq;
2031 eq->cq_valid++;
2032 cq->cb_arg = wq;
2033 cq->cq_intr = oce_intr_wq;
2034
2035 return (wq);
2036}
2037
2038void
2039oce_drain_wq(struct oce_wq *wq)
2040{
2041 struct oce_cq *cq = wq->cq;
2042 struct oce_nic_tx_cqe *cqe;
2043 int ncqe = 0;
2044
2045 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x02))
;
2046 OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe))for ((cqe) = oce_ring_first(cq->ring); ((cqe)->u0.dw[3]
); (cqe) = oce_ring_next(cq->ring))
{
2047 WQ_CQE_INVALIDATE(cqe)((cqe)->u0.dw[3] = 0);
2048 ncqe++;
2049 }
2050 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x04))
;
2051 oce_arm_cq(cq, ncqe, FALSE0);
2052}
2053
2054void
2055oce_destroy_wq(struct oce_wq *wq)
2056{
2057 struct mbx_delete_nic_wq cmd;
2058 struct oce_softc *sc = wq->sc;
2059 struct oce_pkt *pkt;
2060
2061 if (wq->id >= 0) {
2062 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2063 cmd.params.req.wq_id = htole16(wq->id)((__uint16_t)(wq->id));
2064 oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_WQ, OCE_MBX_VER_V00x0000,
2065 &cmd, sizeof(cmd));
2066 }
2067 if (wq->cq != NULL((void *)0))
2068 oce_destroy_cq(wq->cq);
2069 if (wq->ring != NULL((void *)0))
2070 oce_destroy_ring(sc, wq->ring);
2071 while ((pkt = oce_pkt_get(&wq->pkt_free)) != NULL((void *)0))
2072 oce_pkt_free(sc, pkt);
2073 free(wq, M_DEVBUF2, 0);
2074}
2075
2076/**
2077 * @brief function to allocate receive queue resources
2078 * @param sc software handle to the device
2079 * @param eq pointer to associated event queue
2080 * @param rss is-rss-queue flag
2081 * @returns the pointer to the RQ created or NULL on failure
2082 */
2083struct oce_rq *
2084oce_create_rq(struct oce_softc *sc, struct oce_eq *eq, int rss)
2085{
2086 struct oce_rq *rq;
2087 struct oce_cq *cq;
2088 struct oce_pkt *pkt;
2089 int i;
2090
2091 /* Hardware doesn't support any other value */
2092 if (sc->sc_rx_ring_size != 1024)
2093 return (NULL((void *)0));
2094
2095 rq = malloc(sizeof(struct oce_rq), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2096 if (!rq)
2097 return (NULL((void *)0));
2098
2099 rq->ring = oce_create_ring(sc, sc->sc_rx_ring_size,
2100 sizeof(struct oce_nic_rqe), 2);
2101 if (!rq->ring) {
2102 free(rq, M_DEVBUF2, 0);
2103 return (NULL((void *)0));
2104 }
2105
2106 cq = oce_create_cq(sc, eq, CQ_LEN_1024, sizeof(struct oce_nic_rx_cqe),
2107 1, 0, 3);
2108 if (!cq) {
2109 oce_destroy_ring(sc, rq->ring);
2110 free(rq, M_DEVBUF2, 0);
2111 return (NULL((void *)0));
2112 }
2113
2114 rq->id = -1;
2115 rq->sc = sc;
2116
2117 rq->nitems = sc->sc_rx_ring_size;
2118 rq->fragsize = OCE_RX_BUF_SIZE2048;
2119 rq->rss = rss;
2120
2121 SIMPLEQ_INIT(&rq->pkt_free)do { (&rq->pkt_free)->sqh_first = ((void *)0); (&
rq->pkt_free)->sqh_last = &(&rq->pkt_free)->
sqh_first; } while (0)
;
2122 SIMPLEQ_INIT(&rq->pkt_list)do { (&rq->pkt_list)->sqh_first = ((void *)0); (&
rq->pkt_list)->sqh_last = &(&rq->pkt_list)->
sqh_first; } while (0)
;
2123
2124 for (i = 0; i < sc->sc_rx_ring_size; i++) {
2125 pkt = oce_pkt_alloc(sc, OCE_RX_BUF_SIZE2048, 1, OCE_RX_BUF_SIZE2048);
2126 if (pkt == NULL((void *)0)) {
2127 oce_destroy_rq(rq);
2128 return (NULL((void *)0));
2129 }
2130 oce_pkt_put(&rq->pkt_free, pkt);
2131 }
2132
2133 rq->cq = cq;
2134 eq->cq[eq->cq_valid] = cq;
2135 eq->cq_valid++;
2136 cq->cb_arg = rq;
2137 cq->cq_intr = oce_intr_rq;
2138
2139 /* RX queue is created in oce_init */
2140
2141 return (rq);
2142}
2143
2144void
2145oce_drain_rq(struct oce_rq *rq)
2146{
2147 struct oce_nic_rx_cqe *cqe;
2148 struct oce_cq *cq = rq->cq;
2149 int ncqe = 0;
2150
2151 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x02))
;
2152 OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe))for ((cqe) = oce_ring_first(cq->ring); ((cqe)->u0.dw[2]
); (cqe) = oce_ring_next(cq->ring))
{
2153 RQ_CQE_INVALIDATE(cqe)((cqe)->u0.dw[2] = 0);
2154 ncqe++;
2155 }
2156 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x04))
;
2157 oce_arm_cq(cq, ncqe, FALSE0);
2158}
2159
2160void
2161oce_destroy_rq(struct oce_rq *rq)
2162{
2163 struct mbx_delete_nic_rq cmd;
2164 struct oce_softc *sc = rq->sc;
2165 struct oce_pkt *pkt;
2166
2167 if (rq->id >= 0) {
2168 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2169 cmd.params.req.rq_id = htole16(rq->id)((__uint16_t)(rq->id));
2170 oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ, OCE_MBX_VER_V00x0000,
2171 &cmd, sizeof(cmd));
2172 }
2173 if (rq->cq != NULL((void *)0))
2174 oce_destroy_cq(rq->cq);
2175 if (rq->ring != NULL((void *)0))
2176 oce_destroy_ring(sc, rq->ring);
2177 while ((pkt = oce_pkt_get(&rq->pkt_free)) != NULL((void *)0))
2178 oce_pkt_free(sc, pkt);
2179 free(rq, M_DEVBUF2, 0);
2180}
2181
2182struct oce_eq *
2183oce_create_eq(struct oce_softc *sc)
2184{
2185 struct oce_eq *eq;
2186
2187 /* allocate an eq */
2188 eq = malloc(sizeof(struct oce_eq), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2189 if (eq == NULL((void *)0))
2190 return (NULL((void *)0));
2191
2192 eq->ring = oce_create_ring(sc, EQ_LEN_1024, EQE_SIZE_4, 8);
2193 if (!eq->ring) {
2194 free(eq, M_DEVBUF2, 0);
2195 return (NULL((void *)0));
2196 }
2197
2198 eq->id = -1;
2199 eq->sc = sc;
2200 eq->nitems = EQ_LEN_1024; /* length of event queue */
2201 eq->isize = EQE_SIZE_4; /* size of a queue item */
2202 eq->delay = OCE_DEFAULT_EQD80; /* event queue delay */
2203
2204 if (oce_new_eq(sc, eq)) {
2205 oce_destroy_ring(sc, eq->ring);
2206 free(eq, M_DEVBUF2, 0);
2207 return (NULL((void *)0));
2208 }
2209
2210 return (eq);
2211}
2212
2213/**
2214 * @brief Function to arm an EQ so that it can generate events
2215 * @param eq pointer to event queue structure
2216 * @param neqe number of EQEs to arm
2217 * @param rearm rearm bit enable/disable
2218 * @param clearint bit to clear the interrupt condition because of which
2219 * EQEs are generated
2220 */
2221static inline void
2222oce_arm_eq(struct oce_eq *eq, int neqe, int rearm, int clearint)
2223{
2224 oce_write_db(eq->sc, PD_EQ_DB0x0120, eq->id | PD_EQ_DB_EVENT(1<<10) |
2225 (clearint << 9) | (neqe << 16) | (rearm << 29));
2226}
2227
2228void
2229oce_drain_eq(struct oce_eq *eq)
2230{
2231 struct oce_eqe *eqe;
2232 int neqe = 0;
2233
2234 oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD)(*((&eq->ring->dma)->tag)->_dmamap_sync)(((&
eq->ring->dma)->tag), ((&eq->ring->dma)->
map), (0), ((&eq->ring->dma)->map->dm_mapsize
), (0x02))
;
2235 OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0)for ((eqe) = oce_ring_first(eq->ring); eqe->evnt != 0; (
eqe) = oce_ring_next(eq->ring))
{
2236 eqe->evnt = 0;
2237 neqe++;
2238 }
2239 oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE)(*((&eq->ring->dma)->tag)->_dmamap_sync)(((&
eq->ring->dma)->tag), ((&eq->ring->dma)->
map), (0), ((&eq->ring->dma)->map->dm_mapsize
), (0x04))
;
2240 oce_arm_eq(eq, neqe, FALSE0, TRUE1);
2241}
2242
2243void
2244oce_destroy_eq(struct oce_eq *eq)
2245{
2246 struct mbx_destroy_common_eq cmd;
2247 struct oce_softc *sc = eq->sc;
2248
2249 if (eq->id >= 0) {
2250 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2251 cmd.params.req.id = htole16(eq->id)((__uint16_t)(eq->id));
2252 oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_EQ,
2253 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
2254 }
2255 if (eq->ring != NULL((void *)0))
2256 oce_destroy_ring(sc, eq->ring);
2257 free(eq, M_DEVBUF2, 0);
2258}
2259
2260struct oce_mq *
2261oce_create_mq(struct oce_softc *sc, struct oce_eq *eq)
2262{
2263 struct oce_mq *mq = NULL((void *)0);
2264 struct oce_cq *cq;
2265
2266 /* allocate the mq */
2267 mq = malloc(sizeof(struct oce_mq), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2268 if (!mq)
2269 return (NULL((void *)0));
2270
2271 mq->ring = oce_create_ring(sc, 128, sizeof(struct oce_mbx), 8);
2272 if (!mq->ring) {
2273 free(mq, M_DEVBUF2, 0);
2274 return (NULL((void *)0));
2275 }
2276
2277 cq = oce_create_cq(sc, eq, CQ_LEN_256, sizeof(struct oce_mq_cqe),
2278 1, 0, 0);
2279 if (!cq) {
2280 oce_destroy_ring(sc, mq->ring);
2281 free(mq, M_DEVBUF2, 0);
2282 return (NULL((void *)0));
2283 }
2284
2285 mq->id = -1;
2286 mq->sc = sc;
2287 mq->cq = cq;
2288
2289 mq->nitems = 128;
2290
2291 if (oce_new_mq(sc, mq)) {
2292 oce_destroy_cq(mq->cq);
2293 oce_destroy_ring(sc, mq->ring);
2294 free(mq, M_DEVBUF2, 0);
2295 return (NULL((void *)0));
2296 }
2297
2298 eq->cq[eq->cq_valid] = cq;
2299 eq->cq_valid++;
2300 mq->cq->eq = eq;
2301 mq->cq->cb_arg = mq;
2302 mq->cq->cq_intr = oce_intr_mq;
2303
2304 return (mq);
2305}
2306
2307void
2308oce_drain_mq(struct oce_mq *mq)
2309{
2310 struct oce_cq *cq = mq->cq;
2311 struct oce_mq_cqe *cqe;
2312 int ncqe = 0;
2313
2314 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x02))
;
2315 OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe))for ((cqe) = oce_ring_first(cq->ring); ((cqe)->u0.dw[3]
); (cqe) = oce_ring_next(cq->ring))
{
2316 MQ_CQE_INVALIDATE(cqe)((cqe)->u0.dw[3] = 0);
2317 ncqe++;
2318 }
2319 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x04))
;
2320 oce_arm_cq(cq, ncqe, FALSE0);
2321}
2322
2323void
2324oce_destroy_mq(struct oce_mq *mq)
2325{
2326 struct mbx_destroy_common_mq cmd;
2327 struct oce_softc *sc = mq->sc;
2328
2329 if (mq->id >= 0) {
2330 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2331 cmd.params.req.id = htole16(mq->id)((__uint16_t)(mq->id));
2332 oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_MQ,
2333 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
2334 }
2335 if (mq->ring != NULL((void *)0))
2336 oce_destroy_ring(sc, mq->ring);
2337 if (mq->cq != NULL((void *)0))
2338 oce_destroy_cq(mq->cq);
2339 free(mq, M_DEVBUF2, 0);
2340}
2341
2342/**
2343 * @brief Function to create a completion queue
2344 * @param sc software handle to the device
2345 * @param eq optional eq to be associated with to the cq
2346 * @param nitems length of completion queue
2347 * @param isize size of completion queue items
2348 * @param eventable event table
2349 * @param nodelay no delay flag
2350 * @param ncoalesce no coalescence flag
2351 * @returns pointer to the cq created, NULL on failure
2352 */
2353struct oce_cq *
2354oce_create_cq(struct oce_softc *sc, struct oce_eq *eq, int nitems, int isize,
2355 int eventable, int nodelay, int ncoalesce)
2356{
2357 struct oce_cq *cq = NULL((void *)0);
2358
2359 cq = malloc(sizeof(struct oce_cq), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2360 if (!cq)
2361 return (NULL((void *)0));
2362
2363 cq->ring = oce_create_ring(sc, nitems, isize, 4);
2364 if (!cq->ring) {
2365 free(cq, M_DEVBUF2, 0);
2366 return (NULL((void *)0));
2367 }
2368
2369 cq->sc = sc;
2370 cq->eq = eq;
2371 cq->nitems = nitems;
2372 cq->nodelay = nodelay;
2373 cq->ncoalesce = ncoalesce;
2374 cq->eventable = eventable;
2375
2376 if (oce_new_cq(sc, cq)) {
2377 oce_destroy_ring(sc, cq->ring);
2378 free(cq, M_DEVBUF2, 0);
2379 return (NULL((void *)0));
2380 }
2381
2382 sc->sc_cq[sc->sc_ncq++] = cq;
2383
2384 return (cq);
2385}
2386
2387void
2388oce_destroy_cq(struct oce_cq *cq)
2389{
2390 struct mbx_destroy_common_cq cmd;
2391 struct oce_softc *sc = cq->sc;
2392
2393 if (cq->id >= 0) {
2394 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2395 cmd.params.req.id = htole16(cq->id)((__uint16_t)(cq->id));
2396 oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_CQ,
2397 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
2398 }
2399 if (cq->ring != NULL((void *)0))
2400 oce_destroy_ring(sc, cq->ring);
2401 free(cq, M_DEVBUF2, 0);
2402}
2403
2404/**
2405 * @brief Function to arm a CQ with CQEs
2406 * @param cq pointer to the completion queue structure
2407 * @param ncqe number of CQEs to arm
2408 * @param rearm rearm bit enable/disable
2409 */
2410static inline void
2411oce_arm_cq(struct oce_cq *cq, int ncqe, int rearm)
2412{
2413 oce_write_db(cq->sc, PD_CQ_DB0x0120, cq->id | (ncqe << 16) | (rearm << 29));
2414}
2415
2416void
2417oce_free_posted_rxbuf(struct oce_rq *rq)
2418{
2419 struct oce_softc *sc = rq->sc;
2420 struct oce_pkt *pkt;
2421
2422 while ((pkt = oce_pkt_get(&rq->pkt_list)) != NULL((void *)0)) {
2423 bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x02))
2424 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x02))
;
2425 bus_dmamap_unload(sc->sc_dmat, pkt->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkt
->map))
;
2426 if (pkt->mbuf != NULL((void *)0)) {
2427 m_freem(pkt->mbuf);
2428 pkt->mbuf = NULL((void *)0);
2429 }
2430 oce_pkt_put(&rq->pkt_free, pkt);
2431 if_rxr_put(&rq->rxring, 1)do { (&rq->rxring)->rxr_alive -= (1); } while (0);
2432 }
2433}
2434
2435int
2436oce_dma_alloc(struct oce_softc *sc, bus_size_t size, struct oce_dma_mem *dma)
2437{
2438 int rc;
2439
2440 memset(dma, 0, sizeof(struct oce_dma_mem))__builtin_memset((dma), (0), (sizeof(struct oce_dma_mem)));
2441
2442 dma->tag = sc->sc_dmat;
2443 rc = bus_dmamap_create(dma->tag, size, 1, size, 0, BUS_DMA_NOWAIT,(*(dma->tag)->_dmamap_create)((dma->tag), (size), (1
), (size), (0), (0x0001), (&dma->map))
2444 &dma->map)(*(dma->tag)->_dmamap_create)((dma->tag), (size), (1
), (size), (0), (0x0001), (&dma->map))
;
2445 if (rc != 0) {
2446 printf("%s: failed to allocate DMA handle",
2447 sc->sc_dev.dv_xname);
2448 goto fail_0;
2449 }
2450
2451 rc = bus_dmamem_alloc(dma->tag, size, PAGE_SIZE, 0, &dma->segs, 1,(*(dma->tag)->_dmamem_alloc)((dma->tag), (size), ((1
<< 12)), (0), (&dma->segs), (1), (&dma->
nsegs), (0x0001 | 0x1000))
2452 &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(dma->tag)->_dmamem_alloc)((dma->tag), (size), ((1
<< 12)), (0), (&dma->segs), (1), (&dma->
nsegs), (0x0001 | 0x1000))
;
2453 if (rc != 0) {
2454 printf("%s: failed to allocate DMA memory",
2455 sc->sc_dev.dv_xname);
2456 goto fail_1;
2457 }
2458
2459 rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,(*(dma->tag)->_dmamem_map)((dma->tag), (&dma->
segs), (dma->nsegs), (size), (&dma->vaddr), (0x0001
))
2460 &dma->vaddr, BUS_DMA_NOWAIT)(*(dma->tag)->_dmamem_map)((dma->tag), (&dma->
segs), (dma->nsegs), (size), (&dma->vaddr), (0x0001
))
;
2461 if (rc != 0) {
2462 printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2463 goto fail_2;
2464 }
2465
2466 rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, NULL,(*(dma->tag)->_dmamap_load)((dma->tag), (dma->map
), (dma->vaddr), (size), (((void *)0)), (0x0001))
2467 BUS_DMA_NOWAIT)(*(dma->tag)->_dmamap_load)((dma->tag), (dma->map
), (dma->vaddr), (size), (((void *)0)), (0x0001))
;
2468 if (rc != 0) {
2469 printf("%s: failed to load DMA memory", sc->sc_dev.dv_xname);
2470 goto fail_3;
2471 }
2472
2473 bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->map->dm_mapsize), (0x01 | 0x04))
2474 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->map->dm_mapsize), (0x01 | 0x04))
;
2475
2476 dma->paddr = dma->map->dm_segs[0].ds_addr;
2477 dma->size = size;
2478
2479 return (0);
2480
2481fail_3:
2482 bus_dmamem_unmap(dma->tag, dma->vaddr, size)(*(dma->tag)->_dmamem_unmap)((dma->tag), (dma->vaddr
), (size))
;
2483fail_2:
2484 bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs)(*(dma->tag)->_dmamem_free)((dma->tag), (&dma->
segs), (dma->nsegs))
;
2485fail_1:
2486 bus_dmamap_destroy(dma->tag, dma->map)(*(dma->tag)->_dmamap_destroy)((dma->tag), (dma->
map))
;
2487fail_0:
2488 return (rc);
2489}
2490
2491void
2492oce_dma_free(struct oce_softc *sc, struct oce_dma_mem *dma)
2493{
2494 if (dma->tag == NULL((void *)0))
2495 return;
2496
2497 if (dma->map != NULL((void *)0)) {
2498 oce_dma_sync(dma, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*((dma)->tag)->_dmamap_sync)(((dma)->tag), ((dma)->
map), (0), ((dma)->map->dm_mapsize), (0x02 | 0x08))
;
2499 bus_dmamap_unload(dma->tag, dma->map)(*(dma->tag)->_dmamap_unload)((dma->tag), (dma->map
))
;
2500
2501 if (dma->vaddr != 0) {
2502 bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs)(*(dma->tag)->_dmamem_free)((dma->tag), (&dma->
segs), (dma->nsegs))
;
2503 dma->vaddr = 0;
2504 }
2505
2506 bus_dmamap_destroy(dma->tag, dma->map)(*(dma->tag)->_dmamap_destroy)((dma->tag), (dma->
map))
;
2507 dma->map = NULL((void *)0);
2508 dma->tag = NULL((void *)0);
2509 }
2510}
2511
2512struct oce_ring *
2513oce_create_ring(struct oce_softc *sc, int nitems, int isize, int maxsegs)
2514{
2515 struct oce_dma_mem *dma;
2516 struct oce_ring *ring;
2517 bus_size_t size = nitems * isize;
2518 int rc;
2519
2520 if (size > maxsegs * PAGE_SIZE(1 << 12))
2521 return (NULL((void *)0));
2522
2523 ring = malloc(sizeof(struct oce_ring), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2524 if (ring == NULL((void *)0))
2525 return (NULL((void *)0));
2526
2527 ring->isize = isize;
2528 ring->nitems = nitems;
2529
2530 dma = &ring->dma;
2531 dma->tag = sc->sc_dmat;
2532 rc = bus_dmamap_create(dma->tag, size, maxsegs, PAGE_SIZE, 0,(*(dma->tag)->_dmamap_create)((dma->tag), (size), (maxsegs
), ((1 << 12)), (0), (0x0001), (&dma->map))
2533 BUS_DMA_NOWAIT, &dma->map)(*(dma->tag)->_dmamap_create)((dma->tag), (size), (maxsegs
), ((1 << 12)), (0), (0x0001), (&dma->map))
;
2534 if (rc != 0) {
2535 printf("%s: failed to allocate DMA handle",
2536 sc->sc_dev.dv_xname);
2537 goto fail_0;
2538 }
2539
2540 rc = bus_dmamem_alloc(dma->tag, size, 0, 0, &dma->segs, maxsegs,(*(dma->tag)->_dmamem_alloc)((dma->tag), (size), (0)
, (0), (&dma->segs), (maxsegs), (&dma->nsegs), (
0x0001 | 0x1000))
2541 &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(dma->tag)->_dmamem_alloc)((dma->tag), (size), (0)
, (0), (&dma->segs), (maxsegs), (&dma->nsegs), (
0x0001 | 0x1000))
;
2542 if (rc != 0) {
2543 printf("%s: failed to allocate DMA memory",
2544 sc->sc_dev.dv_xname);
2545 goto fail_1;
2546 }
2547
2548 rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,(*(dma->tag)->_dmamem_map)((dma->tag), (&dma->
segs), (dma->nsegs), (size), (&dma->vaddr), (0x0001
))
2549 &dma->vaddr, BUS_DMA_NOWAIT)(*(dma->tag)->_dmamem_map)((dma->tag), (&dma->
segs), (dma->nsegs), (size), (&dma->vaddr), (0x0001
))
;
2550 if (rc != 0) {
2551 printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2552 goto fail_2;
2553 }
2554
2555 bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->map->dm_mapsize), (0x01 | 0x04))
2556 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->map->dm_mapsize), (0x01 | 0x04))
;
2557
2558 dma->paddr = 0;
2559 dma->size = size;
2560
2561 return (ring);
2562
2563fail_2:
2564 bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs)(*(dma->tag)->_dmamem_free)((dma->tag), (&dma->
segs), (dma->nsegs))
;
2565fail_1:
2566 bus_dmamap_destroy(dma->tag, dma->map)(*(dma->tag)->_dmamap_destroy)((dma->tag), (dma->
map))
;
2567fail_0:
2568 free(ring, M_DEVBUF2, 0);
2569 return (NULL((void *)0));
2570}
2571
2572void
2573oce_destroy_ring(struct oce_softc *sc, struct oce_ring *ring)
2574{
2575 oce_dma_free(sc, &ring->dma);
2576 free(ring, M_DEVBUF2, 0);
2577}
2578
2579int
2580oce_load_ring(struct oce_softc *sc, struct oce_ring *ring,
2581 struct oce_pa *pa, int maxsegs)
2582{
2583 struct oce_dma_mem *dma = &ring->dma;
2584 int i;
2585
2586 if (bus_dmamap_load(dma->tag, dma->map, dma->vaddr,(*(dma->tag)->_dmamap_load)((dma->tag), (dma->map
), (dma->vaddr), (ring->isize * ring->nitems), (((void
*)0)), (0x0001))
2587 ring->isize * ring->nitems, NULL, BUS_DMA_NOWAIT)(*(dma->tag)->_dmamap_load)((dma->tag), (dma->map
), (dma->vaddr), (ring->isize * ring->nitems), (((void
*)0)), (0x0001))
) {
2588 printf("%s: failed to load a ring map\n", sc->sc_dev.dv_xname);
2589 return (0);
2590 }
2591
2592 if (dma->map->dm_nsegs > maxsegs) {
2593 printf("%s: too many segments\n", sc->sc_dev.dv_xname);
2594 return (0);
2595 }
2596
2597 bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->map->dm_mapsize), (0x01 | 0x04))
2598 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->map->dm_mapsize), (0x01 | 0x04))
;
2599
2600 for (i = 0; i < dma->map->dm_nsegs; i++)
2601 pa[i].addr = dma->map->dm_segs[i].ds_addr;
2602
2603 return (dma->map->dm_nsegs);
2604}
2605
2606static inline void *
2607oce_ring_get(struct oce_ring *ring)
2608{
2609 int index = ring->index;
2610
2611 if (++ring->index == ring->nitems)
2612 ring->index = 0;
2613 return ((void *)(ring->dma.vaddr + index * ring->isize));
2614}
2615
2616static inline void *
2617oce_ring_first(struct oce_ring *ring)
2618{
2619 return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2620}
2621
2622static inline void *
2623oce_ring_next(struct oce_ring *ring)
2624{
2625 if (++ring->index == ring->nitems)
2626 ring->index = 0;
2627 return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2628}
2629
2630struct oce_pkt *
2631oce_pkt_alloc(struct oce_softc *sc, size_t size, int nsegs, int maxsegsz)
2632{
2633 struct oce_pkt *pkt;
2634
2635 if ((pkt = pool_get(oce_pkt_pool, PR_NOWAIT0x0002 | PR_ZERO0x0008)) == NULL((void *)0))
2636 return (NULL((void *)0));
2637
2638 if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (nsegs), (maxsegsz), (0), (0x0001 | 0x0002), (&pkt->
map))
2639 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &pkt->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (nsegs), (maxsegsz), (0), (0x0001 | 0x0002), (&pkt->
map))
) {
2640 pool_put(oce_pkt_pool, pkt);
2641 return (NULL((void *)0));
2642 }
2643
2644 return (pkt);
2645}
2646
2647void
2648oce_pkt_free(struct oce_softc *sc, struct oce_pkt *pkt)
2649{
2650 if (pkt->map) {
2651 bus_dmamap_unload(sc->sc_dmat, pkt->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkt
->map))
;
2652 bus_dmamap_destroy(sc->sc_dmat, pkt->map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (pkt
->map))
;
2653 }
2654 pool_put(oce_pkt_pool, pkt);
2655}
2656
2657static inline struct oce_pkt *
2658oce_pkt_get(struct oce_pkt_list *lst)
2659{
2660 struct oce_pkt *pkt;
2661
2662 pkt = SIMPLEQ_FIRST(lst)((lst)->sqh_first);
2663 if (pkt == NULL((void *)0))
2664 return (NULL((void *)0));
2665
2666 SIMPLEQ_REMOVE_HEAD(lst, entry)do { if (((lst)->sqh_first = (lst)->sqh_first->entry
.sqe_next) == ((void *)0)) (lst)->sqh_last = &(lst)->
sqh_first; } while (0)
;
2667
2668 return (pkt);
2669}
2670
2671static inline void
2672oce_pkt_put(struct oce_pkt_list *lst, struct oce_pkt *pkt)
2673{
2674 SIMPLEQ_INSERT_TAIL(lst, pkt, entry)do { (pkt)->entry.sqe_next = ((void *)0); *(lst)->sqh_last
= (pkt); (lst)->sqh_last = &(pkt)->entry.sqe_next;
} while (0)
;
2675}
2676
2677/**
2678 * @brief Wait for FW to become ready and reset it
2679 * @param sc software handle to the device
2680 */
2681int
2682oce_init_fw(struct oce_softc *sc)
2683{
2684 struct ioctl_common_function_reset cmd;
2685 uint32_t reg;
2686 int err = 0, tmo = 60000;
2687
2688 /* read semaphore CSR */
2689 reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc)(((((sc)->sc_flags) & (0x00000001 | 0x00000002))) ? 0x0ac
: 0x400)
);
2690
2691 /* if host is ready then wait for fw ready else send POST */
2692 if ((reg & MPU_EP_SEM_STAGE_MASK0xffff) <= POST_STAGE_AWAITING_HOST_RDY0x01) {
2693 reg = (reg & ~MPU_EP_SEM_STAGE_MASK0xffff) | POST_STAGE_CHIP_RESET0x03;
2694 oce_write_csr(sc, MPU_EP_SEMAPHORE(sc)(((((sc)->sc_flags) & (0x00000001 | 0x00000002))) ? 0x0ac
: 0x400)
, reg);
2695 }
2696
2697 /* wait for FW to become ready */
2698 for (;;) {
2699 if (--tmo == 0)
2700 break;
2701
2702 DELAY(1000)(*delay_func)(1000);
2703
2704 reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc)(((((sc)->sc_flags) & (0x00000001 | 0x00000002))) ? 0x0ac
: 0x400)
);
2705 if (reg & MPU_EP_SEM_ERROR(1<<31)) {
2706 printf(": POST failed: %#x\n", reg);
2707 return (ENXIO6);
2708 }
2709 if ((reg & MPU_EP_SEM_STAGE_MASK0xffff) == POST_STAGE_ARMFW_READY0xc000) {
2710 /* reset FW */
2711 if (ISSET(sc->sc_flags, OCE_F_RESET_RQD)((sc->sc_flags) & (0x00001000))) {
2712 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2713 err = oce_cmd(sc, SUBSYS_COMMON,
2714 OPCODE_COMMON_FUNCTION_RESET,
2715 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
2716 }
2717 return (err);
2718 }
2719 }
2720
2721 printf(": POST timed out: %#x\n", reg);
2722
2723 return (ENXIO6);
2724}
2725
2726static inline int
2727oce_mbox_wait(struct oce_softc *sc)
2728{
2729 int i;
2730
2731 for (i = 0; i < 20000; i++) {
2732 if (oce_read_db(sc, PD_MPU_MBOX_DB0x0160) & PD_MPU_MBOX_DB_READY(1<<0))
2733 return (0);
2734 DELAY(100)(*delay_func)(100);
2735 }
2736 return (ETIMEDOUT60);
2737}
2738
2739/**
2740 * @brief Mailbox dispatch
2741 * @param sc software handle to the device
2742 */
2743int
2744oce_mbox_dispatch(struct oce_softc *sc)
2745{
2746 uint32_t pa, reg;
2747 int err;
2748
2749 pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx)((&sc->sc_mbx)->paddr) >> 34);
2750 reg = PD_MPU_MBOX_DB_HI(1<<1) | (pa << PD_MPU_MBOX_DB_ADDR_SHIFT2);
2751
2752 if ((err = oce_mbox_wait(sc)) != 0)
2753 goto out;
2754
2755 oce_write_db(sc, PD_MPU_MBOX_DB0x0160, reg);
2756
2757 pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx)((&sc->sc_mbx)->paddr) >> 4) & 0x3fffffff;
2758 reg = pa << PD_MPU_MBOX_DB_ADDR_SHIFT2;
2759
2760 if ((err = oce_mbox_wait(sc)) != 0)
2761 goto out;
2762
2763 oce_write_db(sc, PD_MPU_MBOX_DB0x0160, reg);
2764
2765 oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_POSTWRITE)(*((&sc->sc_mbx)->tag)->_dmamap_sync)(((&sc->
sc_mbx)->tag), ((&sc->sc_mbx)->map), (0), ((&
sc->sc_mbx)->map->dm_mapsize), (0x08))
;
2766
2767 if ((err = oce_mbox_wait(sc)) != 0)
2768 goto out;
2769
2770out:
2771 oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD)(*((&sc->sc_mbx)->tag)->_dmamap_sync)(((&sc->
sc_mbx)->tag), ((&sc->sc_mbx)->map), (0), ((&
sc->sc_mbx)->map->dm_mapsize), (0x01))
;
2772 return (err);
2773}
2774
2775/**
2776 * @brief Function to initialize the hw with host endian information
2777 * @param sc software handle to the device
2778 * @returns 0 on success, ETIMEDOUT on failure
2779 */
2780int
2781oce_mbox_init(struct oce_softc *sc)
2782{
2783 struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx)((void *)((&sc->sc_mbx)->vaddr));
2784 uint8_t *ptr = (uint8_t *)&bmbx->mbx;
2785
2786 if (!ISSET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD)((sc->sc_flags) & (0x00002000)))
2787 return (0);
2788
2789 /* Endian Signature */
2790 *ptr++ = 0xff;
2791 *ptr++ = 0x12;
2792 *ptr++ = 0x34;
2793 *ptr++ = 0xff;
2794 *ptr++ = 0xff;
2795 *ptr++ = 0x56;
2796 *ptr++ = 0x78;
2797 *ptr = 0xff;
2798
2799 return (oce_mbox_dispatch(sc));
2800}
2801
2802int
2803oce_cmd(struct oce_softc *sc, int subsys, int opcode, int version,
2804 void *payload, int length)
2805{
2806 struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx)((void *)((&sc->sc_mbx)->vaddr));
2807 struct oce_mbx *mbx = &bmbx->mbx;
2808 struct mbx_hdr *hdr;
2809 caddr_t epayload = NULL((void *)0);
2810 int err;
2811
2812 if (length > OCE_MBX_PAYLOAD(59 * 4))
2813 epayload = OCE_MEM_KVA(&sc->sc_pld)((void *)((&sc->sc_pld)->vaddr));
2814 if (length > OCE_MAX_PAYLOAD65536)
2815 return (EINVAL22);
2816
2817 oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*((&sc->sc_mbx)->tag)->_dmamap_sync)(((&sc->
sc_mbx)->tag), ((&sc->sc_mbx)->map), (0), ((&
sc->sc_mbx)->map->dm_mapsize), (0x01 | 0x04))
;
2818
2819 memset(mbx, 0, sizeof(struct oce_mbx))__builtin_memset((mbx), (0), (sizeof(struct oce_mbx)));
2820
2821 mbx->payload_length = length;
2822
2823 if (epayload) {
2824 mbx->flags = OCE_MBX_F_SGE(1<<3);
2825 oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREREAD)(*((&sc->sc_pld)->tag)->_dmamap_sync)(((&sc->
sc_pld)->tag), ((&sc->sc_pld)->map), (0), ((&
sc->sc_pld)->map->dm_mapsize), (0x01))
;
2826 memcpy(epayload, payload, length)__builtin_memcpy((epayload), (payload), (length));
2827 mbx->pld.sgl[0].addr = OCE_MEM_DVA(&sc->sc_pld)((&sc->sc_pld)->paddr);
2828 mbx->pld.sgl[0].length = length;
2829 hdr = (struct mbx_hdr *)epayload;
2830 } else {
2831 mbx->flags = OCE_MBX_F_EMBED(1<<0);
2832 memcpy(mbx->pld.data, payload, length)__builtin_memcpy((mbx->pld.data), (payload), (length));
2833 hdr = (struct mbx_hdr *)&mbx->pld.data;
2834 }
2835
2836 hdr->subsys = subsys;
2837 hdr->opcode = opcode;
2838 hdr->version = version;
2839 hdr->length = length - sizeof(*hdr);
2840 if (opcode == OPCODE_COMMON_FUNCTION_RESET)
2841 hdr->timeout = 2 * OCE_MBX_TIMEOUT5;
2842 else
2843 hdr->timeout = OCE_MBX_TIMEOUT5;
2844
2845 if (epayload)
2846 oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREWRITE)(*((&sc->sc_pld)->tag)->_dmamap_sync)(((&sc->
sc_pld)->tag), ((&sc->sc_pld)->map), (0), ((&
sc->sc_pld)->map->dm_mapsize), (0x04))
;
2847
2848 err = oce_mbox_dispatch(sc);
2849 if (err == 0) {
2850 if (epayload) {
2851 oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_POSTWRITE)(*((&sc->sc_pld)->tag)->_dmamap_sync)(((&sc->
sc_pld)->tag), ((&sc->sc_pld)->map), (0), ((&
sc->sc_pld)->map->dm_mapsize), (0x08))
;
2852 memcpy(payload, epayload, length)__builtin_memcpy((payload), (epayload), (length));
2853 } else
2854 memcpy(payload, &mbx->pld.data, length)__builtin_memcpy((payload), (&mbx->pld.data), (length)
)
;
2855 } else
2856 printf("%s: mailbox timeout, subsys %d op %d ver %d "
2857 "%spayload length %d\n", sc->sc_dev.dv_xname, subsys,
2858 opcode, version, epayload ? "ext " : "",
2859 length);
2860 return (err);
2861}
2862
2863/**
2864 * @brief Firmware will send gracious notifications during
2865 * attach only after sending first mcc command. We
2866 * use MCC queue only for getting async and mailbox
2867 * for sending cmds. So to get gracious notifications
2868 * at least send one dummy command on mcc.
2869 */
2870void
2871oce_first_mcc(struct oce_softc *sc)
2872{
2873 struct oce_mbx *mbx;
2874 struct oce_mq *mq = sc->sc_mq;
2875 struct mbx_hdr *hdr;
2876 struct mbx_get_common_fw_version *cmd;
2877
2878 mbx = oce_ring_get(mq->ring);
2879 memset(mbx, 0, sizeof(struct oce_mbx))__builtin_memset((mbx), (0), (sizeof(struct oce_mbx)));
2880
2881 cmd = (struct mbx_get_common_fw_version *)&mbx->pld.data;
2882
2883 hdr = &cmd->hdr;
2884 hdr->subsys = SUBSYS_COMMON;
2885 hdr->opcode = OPCODE_COMMON_GET_FW_VERSION;
2886 hdr->version = OCE_MBX_VER_V00x0000;
2887 hdr->timeout = OCE_MBX_TIMEOUT5;
2888 hdr->length = sizeof(*cmd) - sizeof(*hdr);
2889
2890 mbx->flags = OCE_MBX_F_EMBED(1<<0);
2891 mbx->payload_length = sizeof(*cmd);
2892 oce_dma_sync(&mq->ring->dma, BUS_DMASYNC_PREREAD |(*((&mq->ring->dma)->tag)->_dmamap_sync)(((&
mq->ring->dma)->tag), ((&mq->ring->dma)->
map), (0), ((&mq->ring->dma)->map->dm_mapsize
), (0x01 | 0x04))
2893 BUS_DMASYNC_PREWRITE)(*((&mq->ring->dma)->tag)->_dmamap_sync)(((&
mq->ring->dma)->tag), ((&mq->ring->dma)->
map), (0), ((&mq->ring->dma)->map->dm_mapsize
), (0x01 | 0x04))
;
2894 oce_write_db(sc, PD_MQ_DB0x0140, mq->id | (1 << 16));
2895}
2896
2897int
2898oce_get_fw_config(struct oce_softc *sc)
2899{
2900 struct mbx_common_query_fw_config cmd;
2901 int err;
2902
2903 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2904
2905 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2906 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
2907 if (err)
2908 return (err);
2909
2910 sc->sc_port = cmd.params.rsp.port_id;
2911 sc->sc_fmode = cmd.params.rsp.function_mode;
2912
2913 return (0);
2914}
2915
2916int
2917oce_check_native_mode(struct oce_softc *sc)
2918{
2919 struct mbx_common_set_function_cap cmd;
2920 int err;
2921
2922 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2923
2924 cmd.params.req.valid_capability_flags = CAP_SW_TIMESTAMPS2 |
2925 CAP_BE3_NATIVE_ERX_API4;
2926 cmd.params.req.capability_flags = CAP_BE3_NATIVE_ERX_API4;
2927
2928 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FUNCTIONAL_CAPS,
2929 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
2930 if (err)
2931 return (err);
2932
2933 if (cmd.params.rsp.capability_flags & CAP_BE3_NATIVE_ERX_API4)
2934 SET(sc->sc_flags, OCE_F_BE3_NATIVE)((sc->sc_flags) |= (0x00000100));
2935
2936 return (0);
2937}
2938
2939/**
2940 * @brief Function for creating a network interface.
2941 * @param sc software handle to the device
2942 * @returns 0 on success, error otherwise
2943 */
2944int
2945oce_create_iface(struct oce_softc *sc, uint8_t *macaddr)
2946{
2947 struct mbx_create_common_iface cmd;
2948 uint32_t caps, caps_en;
2949 int err = 0;
2950
2951 /* interface capabilities to give device when creating interface */
2952 caps = MBX_RX_IFACE_BROADCAST0x000010 | MBX_RX_IFACE_UNTAGGED0x000020 |
2953 MBX_RX_IFACE_PROMISC0x000008 | MBX_RX_IFACE_MCAST_PROMISC0x000200 |
2954 MBX_RX_IFACE_RSS0x000004;
2955
2956 /* capabilities to enable by default (others set dynamically) */
2957 caps_en = MBX_RX_IFACE_BROADCAST0x000010 | MBX_RX_IFACE_UNTAGGED0x000020;
2958
2959 if (!IS_XE201(sc)(((sc)->sc_flags) & (0x00000008))) {
2960 /* LANCER A0 workaround */
2961 caps |= MBX_RX_IFACE_PASS_L3L4_ERR0x000800;
2962 caps_en |= MBX_RX_IFACE_PASS_L3L4_ERR0x000800;
2963 }
2964
2965 /* enable capabilities controlled via driver startup parameters */
2966 if (sc->sc_rss_enable)
2967 caps_en |= MBX_RX_IFACE_RSS0x000004;
2968
2969 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2970
2971 cmd.params.req.version = 0;
2972 cmd.params.req.cap_flags = htole32(caps)((__uint32_t)(caps));
2973 cmd.params.req.enable_flags = htole32(caps_en)((__uint32_t)(caps_en));
2974 if (macaddr != NULL((void *)0)) {
2975 memcpy(&cmd.params.req.mac_addr[0], macaddr, ETHER_ADDR_LEN)__builtin_memcpy((&cmd.params.req.mac_addr[0]), (macaddr)
, (6))
;
2976 cmd.params.req.mac_invalid = 0;
2977 } else
2978 cmd.params.req.mac_invalid = 1;
2979
2980 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_IFACE,
2981 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
2982 if (err)
2983 return (err);
2984
2985 sc->sc_if_id = letoh32(cmd.params.rsp.if_id)((__uint32_t)(cmd.params.rsp.if_id));
2986
2987 if (macaddr != NULL((void *)0))
2988 sc->sc_pmac_id = letoh32(cmd.params.rsp.pmac_id)((__uint32_t)(cmd.params.rsp.pmac_id));
2989
2990 return (0);
2991}
2992
2993/**
2994 * @brief Function to send the mbx command to configure vlan
2995 * @param sc software handle to the device
2996 * @param vtags array of vlan tags
2997 * @param nvtags number of elements in array
2998 * @param untagged boolean TRUE/FLASE
2999 * @param promisc flag to enable/disable VLAN promiscuous mode
3000 * @returns 0 on success, EIO on failure
3001 */
3002int
3003oce_config_vlan(struct oce_softc *sc, struct normal_vlan *vtags, int nvtags,
3004 int untagged, int promisc)
3005{
3006 struct mbx_common_config_vlan cmd;
3007
3008 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3009
3010 cmd.params.req.if_id = sc->sc_if_id;
3011 cmd.params.req.promisc = promisc;
3012 cmd.params.req.untagged = untagged;
3013 cmd.params.req.num_vlans = nvtags;
3014
3015 if (!promisc)
3016 memcpy(cmd.params.req.tags.normal_vlans, vtags,__builtin_memcpy((cmd.params.req.tags.normal_vlans), (vtags),
(nvtags * sizeof(struct normal_vlan)))
3017 nvtags * sizeof(struct normal_vlan))__builtin_memcpy((cmd.params.req.tags.normal_vlans), (vtags),
(nvtags * sizeof(struct normal_vlan)))
;
3018
3019 return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CONFIG_IFACE_VLAN,
3020 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd)));
3021}
3022
3023/**
3024 * @brief Function to set flow control capability in the hardware
3025 * @param sc software handle to the device
3026 * @param flags flow control flags to set
3027 * @returns 0 on success, EIO on failure
3028 */
3029int
3030oce_set_flow_control(struct oce_softc *sc, uint64_t flags)
3031{
3032 struct mbx_common_get_set_flow_control cmd;
3033 int err;
3034
3035 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3036
3037 cmd.rx_flow_control = flags & IFM_ETH_RXPAUSE0x0000000000020000ULL ? 1 : 0;
3038 cmd.tx_flow_control = flags & IFM_ETH_TXPAUSE0x0000000000040000ULL ? 1 : 0;
3039
3040 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FLOW_CONTROL,
3041 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
3042 if (err)
3043 return (err);
3044
3045 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3046
3047 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_GET_FLOW_CONTROL,
3048 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
3049 if (err)
3050 return (err);
3051
3052 sc->sc_fc = cmd.rx_flow_control ? IFM_ETH_RXPAUSE0x0000000000020000ULL : 0;
3053 sc->sc_fc |= cmd.tx_flow_control ? IFM_ETH_TXPAUSE0x0000000000040000ULL : 0;
3054
3055 return (0);
3056}
3057
3058#ifdef OCE_RSS
3059/**
3060 * @brief Function to set flow control capability in the hardware
3061 * @param sc software handle to the device
3062 * @param enable 0=disable, OCE_RSS_xxx flags otherwise
3063 * @returns 0 on success, EIO on failure
3064 */
3065int
3066oce_config_rss(struct oce_softc *sc, int enable)
3067{
3068 struct mbx_config_nic_rss cmd;
3069 uint8_t *tbl = &cmd.params.req.cputable;
3070 int i, j;
3071
3072 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3073
3074 if (enable)
3075 cmd.params.req.enable_rss = RSS_ENABLE_IPV4 | RSS_ENABLE_IPV6 |
3076 RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_TCP_IPV6;
3077 cmd.params.req.flush = OCE_FLUSH1;
3078 cmd.params.req.if_id = htole32(sc->sc_if_id)((__uint32_t)(sc->sc_if_id));
3079
3080 arc4random_buf(cmd.params.req.hash, sizeof(cmd.params.req.hash));
3081
3082 /*
3083 * Initialize the RSS CPU indirection table.
3084 *
3085 * The table is used to choose the queue to place incoming packets.
3086 * Incoming packets are hashed. The lowest bits in the hash result
3087 * are used as the index into the CPU indirection table.
3088 * Each entry in the table contains the RSS CPU-ID returned by the NIC
3089 * create. Based on the CPU ID, the receive completion is routed to
3090 * the corresponding RSS CQs. (Non-RSS packets are always completed
3091 * on the default (0) CQ).
3092 */
3093 for (i = 0, j = 0; j < sc->sc_nrq; j++) {
3094 if (sc->sc_rq[j]->cfg.is_rss_queue)
3095 tbl[i++] = sc->sc_rq[j]->rss_cpuid;
3096 }
3097 if (i > 0)
3098 cmd->params.req.cpu_tbl_sz_log2 = htole16(ilog2(i))((__uint16_t)(ilog2(i)));
3099 else
3100 return (ENXIO6);
3101
3102 return (oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CONFIG_RSS, OCE_MBX_VER_V00x0000,
3103 &cmd, sizeof(cmd)));
3104}
3105#endif /* OCE_RSS */
3106
3107/**
3108 * @brief Function for hardware update multicast filter
3109 * @param sc software handle to the device
3110 * @param multi table of multicast addresses
3111 * @param naddr number of multicast addresses in the table
3112 */
3113int
3114oce_update_mcast(struct oce_softc *sc,
3115 uint8_t multi[][ETHER_ADDR_LEN6], int naddr)
3116{
3117 struct mbx_set_common_iface_multicast cmd;
3118
3119 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3120
3121 memcpy(&cmd.params.req.mac[0], &multi[0], naddr * ETHER_ADDR_LEN)__builtin_memcpy((&cmd.params.req.mac[0]), (&multi[0]
), (naddr * 6))
;
3122 cmd.params.req.num_mac = htole16(naddr)((__uint16_t)(naddr));
3123 cmd.params.req.if_id = sc->sc_if_id;
3124
3125 return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_MULTICAST,
3126 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd)));
3127}
3128
3129/**
3130 * @brief RXF function to enable/disable device promiscuous mode
3131 * @param sc software handle to the device
3132 * @param enable enable/disable flag
3133 * @returns 0 on success, EIO on failure
3134 * @note
3135 * The OPCODE_NIC_CONFIG_PROMISCUOUS command deprecated for Lancer.
3136 * This function uses the COMMON_SET_IFACE_RX_FILTER command instead.
3137 */
3138int
3139oce_set_promisc(struct oce_softc *sc, int enable)
3140{
3141 struct mbx_set_common_iface_rx_filter cmd;
3142 struct iface_rx_filter_ctx *req;
3143
3144 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3145
3146 req = &cmd.params.req;
3147 req->if_id = sc->sc_if_id;
3148
3149 if (enable)
3150 req->iface_flags = req->iface_flags_mask =
3151 MBX_RX_IFACE_PROMISC0x000008 | MBX_RX_IFACE_VLAN_PROMISC0x000080;
3152
3153 return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_RX_FILTER,
3154 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd)));
3155}
3156
3157/**
3158 * @brief Function to query the link status from the hardware
3159 * @param sc software handle to the device
3160 * @param[out] link pointer to the structure returning link attributes
3161 * @returns 0 on success, EIO on failure
3162 */
3163int
3164oce_get_link_status(struct oce_softc *sc)
3165{
3166 struct mbx_query_common_link_config cmd;
3167 int err;
3168
3169 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3170
3171 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_LINK_CONFIG,
3172 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
3173 if (err)
3174 return (err);
3175
3176 sc->sc_link_up = (letoh32(cmd.params.rsp.logical_link_status)((__uint32_t)(cmd.params.rsp.logical_link_status)) ==
3177 NTWK_LOGICAL_LINK_UP1);
3178
3179 if (cmd.params.rsp.mac_speed < 5)
3180 sc->sc_link_speed = cmd.params.rsp.mac_speed;
3181 else
3182 sc->sc_link_speed = 0;
3183
3184 return (0);
3185}
3186
3187void
3188oce_macaddr_set(struct oce_softc *sc)
3189{
3190 uint32_t old_pmac_id = sc->sc_pmac_id;
3191 int status = 0;
3192
3193 if (!memcmp(sc->sc_macaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN)__builtin_memcmp((sc->sc_macaddr), (sc->sc_ac.ac_enaddr
), (6))
)
3194 return;
3195
3196 status = oce_macaddr_add(sc, sc->sc_ac.ac_enaddr, &sc->sc_pmac_id);
3197 if (!status)
3198 status = oce_macaddr_del(sc, old_pmac_id);
Value stored to 'status' is never read
3199 else
3200 printf("%s: failed to set MAC address\n", sc->sc_dev.dv_xname);
3201}
3202
3203int
3204oce_macaddr_get(struct oce_softc *sc, uint8_t *macaddr)
3205{
3206 struct mbx_query_common_iface_mac cmd;
3207 int err;
3208
3209 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3210
3211 cmd.params.req.type = MAC_ADDRESS_TYPE_NETWORK0x1;
3212 cmd.params.req.permanent = 1;
3213
3214 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_IFACE_MAC,
3215 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
3216 if (err == 0)
3217 memcpy(macaddr, &cmd.params.rsp.mac.mac_addr[0],__builtin_memcpy((macaddr), (&cmd.params.rsp.mac.mac_addr
[0]), (6))
3218 ETHER_ADDR_LEN)__builtin_memcpy((macaddr), (&cmd.params.rsp.mac.mac_addr
[0]), (6))
;
3219 return (err);
3220}
3221
3222int
3223oce_macaddr_add(struct oce_softc *sc, uint8_t *enaddr, uint32_t *pmac)
3224{
3225 struct mbx_add_common_iface_mac cmd;
3226 int err;
3227
3228 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3229
3230 cmd.params.req.if_id = htole16(sc->sc_if_id)((__uint16_t)(sc->sc_if_id));
3231 memcpy(cmd.params.req.mac_address, enaddr, ETHER_ADDR_LEN)__builtin_memcpy((cmd.params.req.mac_address), (enaddr), (6));
3232
3233 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_ADD_IFACE_MAC,
3234 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
3235 if (err == 0)
3236 *pmac = letoh32(cmd.params.rsp.pmac_id)((__uint32_t)(cmd.params.rsp.pmac_id));
3237 return (err);
3238}
3239
3240int
3241oce_macaddr_del(struct oce_softc *sc, uint32_t pmac)
3242{
3243 struct mbx_del_common_iface_mac cmd;
3244
3245 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3246
3247 cmd.params.req.if_id = htole16(sc->sc_if_id)((__uint16_t)(sc->sc_if_id));
3248 cmd.params.req.pmac_id = htole32(pmac)((__uint32_t)(pmac));
3249
3250 return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DEL_IFACE_MAC,
3251 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd)));
3252}
3253
3254int
3255oce_new_rq(struct oce_softc *sc, struct oce_rq *rq)
3256{
3257 struct mbx_create_nic_rq cmd;
3258 int err, npages;
3259
3260 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3261
3262 npages = oce_load_ring(sc, rq->ring, &cmd.params.req.pages[0],
3263 nitems(cmd.params.req.pages)(sizeof((cmd.params.req.pages)) / sizeof((cmd.params.req.pages
)[0]))
);
3264 if (!npages) {
3265 printf("%s: failed to load the rq ring\n", __func__);
3266 return (1);
3267 }
3268
3269 if (IS_XE201(sc)(((sc)->sc_flags) & (0x00000008))) {
3270 cmd.params.req.frag_size = rq->fragsize / 2048;
3271 cmd.params.req.page_size = 1;
3272 } else
3273 cmd.params.req.frag_size = ilog2(rq->fragsize);
3274 cmd.params.req.num_pages = npages;
3275 cmd.params.req.cq_id = rq->cq->id;
3276 cmd.params.req.if_id = htole32(sc->sc_if_id)((__uint32_t)(sc->sc_if_id));
3277 cmd.params.req.max_frame_size = htole16(rq->mtu)((__uint16_t)(rq->mtu));
3278 cmd.params.req.is_rss_queue = htole32(rq->rss)((__uint32_t)(rq->rss));
3279
3280 err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_RQ,
3281 IS_XE201(sc)(((sc)->sc_flags) & (0x00000008)) ? OCE_MBX_VER_V10x0001 : OCE_MBX_VER_V00x0000, &cmd,
3282 sizeof(cmd));
3283 if (err)
3284 return (err);
3285
3286 rq->id = letoh16(cmd.params.rsp.rq_id)((__uint16_t)(cmd.params.rsp.rq_id));
3287 rq->rss_cpuid = cmd.params.rsp.rss_cpuid;
3288
3289 return (0);
3290}
3291
3292int
3293oce_new_wq(struct oce_softc *sc, struct oce_wq *wq)
3294{
3295 struct mbx_create_nic_wq cmd;
3296 int err, npages;
3297
3298 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3299
3300 npages = oce_load_ring(sc, wq->ring, &cmd.params.req.pages[0],
3301 nitems(cmd.params.req.pages)(sizeof((cmd.params.req.pages)) / sizeof((cmd.params.req.pages
)[0]))
);
3302 if (!npages) {
3303 printf("%s: failed to load the wq ring\n", __func__);
3304 return (1);
3305 }
3306
3307 if (IS_XE201(sc)(((sc)->sc_flags) & (0x00000008)))
3308 cmd.params.req.if_id = sc->sc_if_id;
3309 cmd.params.req.nic_wq_type = NIC_WQ_TYPE_STANDARD0x02;
3310 cmd.params.req.num_pages = npages;
3311 cmd.params.req.wq_size = ilog2(wq->nitems) + 1;
3312 cmd.params.req.cq_id = htole16(wq->cq->id)((__uint16_t)(wq->cq->id));
3313 cmd.params.req.ulp_num = 1;
3314
3315 err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_WQ,
3316 IS_XE201(sc)(((sc)->sc_flags) & (0x00000008)) ? OCE_MBX_VER_V10x0001 : OCE_MBX_VER_V00x0000, &cmd,
3317 sizeof(cmd));
3318 if (err)
3319 return (err);
3320
3321 wq->id = letoh16(cmd.params.rsp.wq_id)((__uint16_t)(cmd.params.rsp.wq_id));
3322
3323 return (0);
3324}
3325
3326int
3327oce_new_mq(struct oce_softc *sc, struct oce_mq *mq)
3328{
3329 struct mbx_create_common_mq_ex cmd;
3330 union oce_mq_ext_ctx *ctx;
3331 int err, npages;
3332
3333 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3334
3335 npages = oce_load_ring(sc, mq->ring, &cmd.params.req.pages[0],
3336 nitems(cmd.params.req.pages)(sizeof((cmd.params.req.pages)) / sizeof((cmd.params.req.pages
)[0]))
);
3337 if (!npages) {
3338 printf("%s: failed to load the mq ring\n", __func__);
3339 return (-1);
3340 }
3341
3342 ctx = &cmd.params.req.context;
3343 ctx->v0.num_pages = npages;
3344 ctx->v0.cq_id = mq->cq->id;
3345 ctx->v0.ring_size = ilog2(mq->nitems) + 1;
3346 ctx->v0.valid = 1;
3347 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
3348 ctx->v0.async_evt_bitmap = 0xffffffff;
3349
3350 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_MQ_EXT,
3351 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
3352 if (err)
3353 return (err);
3354
3355 mq->id = letoh16(cmd.params.rsp.mq_id)((__uint16_t)(cmd.params.rsp.mq_id));
3356
3357 return (0);
3358}
3359
3360int
3361oce_new_eq(struct oce_softc *sc, struct oce_eq *eq)
3362{
3363 struct mbx_create_common_eq cmd;
3364 int err, npages;
3365
3366 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3367
3368 npages = oce_load_ring(sc, eq->ring, &cmd.params.req.pages[0],
3369 nitems(cmd.params.req.pages)(sizeof((cmd.params.req.pages)) / sizeof((cmd.params.req.pages
)[0]))
);
3370 if (!npages) {
3371 printf("%s: failed to load the eq ring\n", __func__);
3372 return (-1);
3373 }
3374
3375 cmd.params.req.ctx.num_pages = htole16(npages)((__uint16_t)(npages));
3376 cmd.params.req.ctx.valid = 1;
3377 cmd.params.req.ctx.size = (eq->isize == 4) ? 0 : 1;
3378 cmd.params.req.ctx.count = ilog2(eq->nitems / 256);
3379 cmd.params.req.ctx.armed = 0;
3380 cmd.params.req.ctx.delay_mult = htole32(eq->delay)((__uint32_t)(eq->delay));
3381
3382 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_EQ,
3383 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
3384 if (err)
3385 return (err);
3386
3387 eq->id = letoh16(cmd.params.rsp.eq_id)((__uint16_t)(cmd.params.rsp.eq_id));
3388
3389 return (0);
3390}
3391
3392int
3393oce_new_cq(struct oce_softc *sc, struct oce_cq *cq)
3394{
3395 struct mbx_create_common_cq cmd;
3396 union oce_cq_ctx *ctx;
3397 int err, npages;
3398
3399 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3400
3401 npages = oce_load_ring(sc, cq->ring, &cmd.params.req.pages[0],
3402 nitems(cmd.params.req.pages)(sizeof((cmd.params.req.pages)) / sizeof((cmd.params.req.pages
)[0]))
);
3403 if (!npages) {
3404 printf("%s: failed to load the cq ring\n", __func__);
3405 return (-1);
3406 }
3407
3408 ctx = &cmd.params.req.cq_ctx;
3409
3410 if (IS_XE201(sc)(((sc)->sc_flags) & (0x00000008))) {
3411 ctx->v2.num_pages = htole16(npages)((__uint16_t)(npages));
3412 ctx->v2.page_size = 1; /* for 4K */
3413 ctx->v2.eventable = cq->eventable;
3414 ctx->v2.valid = 1;
3415 ctx->v2.count = ilog2(cq->nitems / 256);
3416 ctx->v2.nodelay = cq->nodelay;
3417 ctx->v2.coalesce_wm = cq->ncoalesce;
3418 ctx->v2.armed = 0;
3419 ctx->v2.eq_id = cq->eq->id;
3420 if (ctx->v2.count == 3) {
3421 if (cq->nitems > (4*1024)-1)
3422 ctx->v2.cqe_count = (4*1024)-1;
3423 else
3424 ctx->v2.cqe_count = cq->nitems;
3425 }
3426 } else {
3427 ctx->v0.num_pages = htole16(npages)((__uint16_t)(npages));
3428 ctx->v0.eventable = cq->eventable;
3429 ctx->v0.valid = 1;
3430 ctx->v0.count = ilog2(cq->nitems / 256);
3431 ctx->v0.nodelay = cq->nodelay;
3432 ctx->v0.coalesce_wm = cq->ncoalesce;
3433 ctx->v0.armed = 0;
3434 ctx->v0.eq_id = cq->eq->id;
3435 }
3436
3437 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_CQ,
3438 IS_XE201(sc)(((sc)->sc_flags) & (0x00000008)) ? OCE_MBX_VER_V20x0002 : OCE_MBX_VER_V00x0000, &cmd,
3439 sizeof(cmd));
3440 if (err)
3441 return (err);
3442
3443 cq->id = letoh16(cmd.params.rsp.cq_id)((__uint16_t)(cmd.params.rsp.cq_id));
3444
3445 return (0);
3446}
3447
3448int
3449oce_init_stats(struct oce_softc *sc)
3450{
3451 union cmd {
3452 struct mbx_get_nic_stats_v0 _be2;
3453 struct mbx_get_nic_stats _be3;
3454 struct mbx_get_pport_stats _xe201;
3455 };
3456
3457 sc->sc_statcmd = malloc(sizeof(union cmd), M_DEVBUF2, M_ZERO0x0008 | M_NOWAIT0x0002);
3458 if (sc->sc_statcmd == NULL((void *)0)) {
3459 printf("%s: failed to allocate statistics command block\n",
3460 sc->sc_dev.dv_xname);
3461 return (-1);
3462 }
3463 return (0);
3464}
3465
3466int
3467oce_update_stats(struct oce_softc *sc)
3468{
3469 struct ifnet *ifp = &sc->sc_ac.ac_if;
3470 uint64_t rxe, txe;
3471 int err;
3472
3473 if (ISSET(sc->sc_flags, OCE_F_BE2)((sc->sc_flags) & (0x00000001)))
3474 err = oce_stats_be2(sc, &rxe, &txe);
3475 else if (ISSET(sc->sc_flags, OCE_F_BE3)((sc->sc_flags) & (0x00000002)))
3476 err = oce_stats_be3(sc, &rxe, &txe);
3477 else
3478 err = oce_stats_xe(sc, &rxe, &txe);
3479 if (err)
3480 return (err);
3481
3482 ifp->if_ierrorsif_data.ifi_ierrors += (rxe > sc->sc_rx_errors) ?
3483 rxe - sc->sc_rx_errors : sc->sc_rx_errors - rxe;
3484 sc->sc_rx_errors = rxe;
3485 ifp->if_oerrorsif_data.ifi_oerrors += (txe > sc->sc_tx_errors) ?
3486 txe - sc->sc_tx_errors : sc->sc_tx_errors - txe;
3487 sc->sc_tx_errors = txe;
3488
3489 return (0);
3490}
3491
3492int
3493oce_stats_be2(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3494{
3495 struct mbx_get_nic_stats_v0 *cmd = sc->sc_statcmd;
3496 struct oce_pmem_stats *ms;
3497 struct oce_rxf_stats_v0 *rs;
3498 struct oce_port_rxf_stats_v0 *ps;
3499 int err;
3500
3501 memset(cmd, 0, sizeof(*cmd))__builtin_memset((cmd), (0), (sizeof(*cmd)));
3502
3503 err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V00x0000,
3504 cmd, sizeof(*cmd));
3505 if (err)
3506 return (err);
3507
3508 ms = &cmd->params.rsp.stats.pmem;
3509 rs = &cmd->params.rsp.stats.rxf;
3510 ps = &rs->port[sc->sc_port];
3511
3512 *rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3513 ps->rx_frame_too_long + ps->rx_dropped_runt +
3514 ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3515 ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3516 ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3517 ps->rx_dropped_too_short + ps->rx_out_range_errors +
3518 ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3519 ps->rx_alignment_symbol_errors;
3520 if (sc->sc_if_id)
3521 *rxe += rs->port1_jabber_events;
3522 else
3523 *rxe += rs->port0_jabber_events;
3524 *rxe += ms->eth_red_drops;
3525
3526 *txe = 0; /* hardware doesn't provide any extra tx error statistics */
3527
3528 return (0);
3529}
3530
3531int
3532oce_stats_be3(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3533{
3534 struct mbx_get_nic_stats *cmd = sc->sc_statcmd;
3535 struct oce_pmem_stats *ms;
3536 struct oce_rxf_stats_v1 *rs;
3537 struct oce_port_rxf_stats_v1 *ps;
3538 int err;
3539
3540 memset(cmd, 0, sizeof(*cmd))__builtin_memset((cmd), (0), (sizeof(*cmd)));
3541
3542 err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V10x0001,
3543 cmd, sizeof(*cmd));
3544 if (err)
3545 return (err);
3546
3547 ms = &cmd->params.rsp.stats.pmem;
3548 rs = &cmd->params.rsp.stats.rxf;
3549 ps = &rs->port[sc->sc_port];
3550
3551 *rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3552 ps->rx_frame_too_long + ps->rx_dropped_runt +
3553 ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3554 ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3555 ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3556 ps->rx_dropped_too_short + ps->rx_out_range_errors +
3557 ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3558 ps->rx_alignment_symbol_errors + ps->jabber_events;
3559 *rxe += ms->eth_red_drops;
3560
3561 *txe = 0; /* hardware doesn't provide any extra tx error statistics */
3562
3563 return (0);
3564}
3565
3566int
3567oce_stats_xe(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3568{
3569 struct mbx_get_pport_stats *cmd = sc->sc_statcmd;
3570 struct oce_pport_stats *pps;
3571 int err;
3572
3573 memset(cmd, 0, sizeof(*cmd))__builtin_memset((cmd), (0), (sizeof(*cmd)));
3574
3575 cmd->params.req.reset_stats = 0;
3576 cmd->params.req.port_number = sc->sc_if_id;
3577
3578 err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_PPORT_STATS,
3579 OCE_MBX_VER_V00x0000, cmd, sizeof(*cmd));
3580 if (err)
3581 return (err);
3582
3583 pps = &cmd->params.rsp.pps;
3584
3585 *rxe = pps->rx_discards + pps->rx_errors + pps->rx_crc_errors +
3586 pps->rx_alignment_errors + pps->rx_symbol_errors +
3587 pps->rx_frames_too_long + pps->rx_internal_mac_errors +
3588 pps->rx_undersize_pkts + pps->rx_oversize_pkts + pps->rx_jabbers +
3589 pps->rx_control_frames_unknown_opcode + pps->rx_in_range_errors +
3590 pps->rx_out_of_range_errors + pps->rx_ip_checksum_errors +
3591 pps->rx_tcp_checksum_errors + pps->rx_udp_checksum_errors +
3592 pps->rx_fifo_overflow + pps->rx_input_fifo_overflow +
3593 pps->rx_drops_too_many_frags + pps->rx_drops_mtu;
3594
3595 *txe = pps->tx_discards + pps->tx_errors + pps->tx_internal_mac_errors;
3596
3597 return (0);
3598}