Bug Summary

File:dev/pci/if_oce.c
Warning:line 3197, column 3
Value stored to 'status' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name if_oce.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/if_oce.c
1/* $OpenBSD: if_oce.c,v 1.108 2023/11/10 15:51:20 bluhm Exp $ */
2
3/*
4 * Copyright (c) 2012 Mike Belopuhov
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/*-
20 * Copyright (C) 2012 Emulex
21 * All rights reserved.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions are met:
25 *
26 * 1. Redistributions of source code must retain the above copyright notice,
27 * this list of conditions and the following disclaimer.
28 *
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 *
33 * 3. Neither the name of the Emulex Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived from
35 * this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
38 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
41 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
46 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
47 * POSSIBILITY OF SUCH DAMAGE.
48 *
49 * Contact Information:
50 * freebsd-drivers@emulex.com
51 *
52 * Emulex
53 * 3333 Susan Street
54 * Costa Mesa, CA 92626
55 */
56
57#include "bpfilter.h"
58#include "vlan.h"
59
60#include <sys/param.h>
61#include <sys/systm.h>
62#include <sys/sockio.h>
63#include <sys/mbuf.h>
64#include <sys/malloc.h>
65#include <sys/kernel.h>
66#include <sys/device.h>
67#include <sys/socket.h>
68#include <sys/queue.h>
69#include <sys/timeout.h>
70#include <sys/pool.h>
71
72#include <net/if.h>
73#include <net/if_media.h>
74
75#include <netinet/in.h>
76#include <netinet/if_ether.h>
77
78#ifdef INET61
79#include <netinet/ip6.h>
80#endif
81
82#if NBPFILTER1 > 0
83#include <net/bpf.h>
84#endif
85
86#include <dev/pci/pcireg.h>
87#include <dev/pci/pcivar.h>
88#include <dev/pci/pcidevs.h>
89
90#include <dev/pci/if_ocereg.h>
91
92#ifndef TRUE1
93#define TRUE1 1
94#endif
95#ifndef FALSE0
96#define FALSE0 0
97#endif
98
99#define OCE_MBX_TIMEOUT5 5
100
101#define OCE_MAX_PAYLOAD65536 65536
102
103#define OCE_TX_RING_SIZE512 512
104#define OCE_RX_RING_SIZE1024 1024
105
106/* This should be powers of 2. Like 2,4,8 & 16 */
107#define OCE_MAX_RSS4 4 /* TODO: 8 */
108#define OCE_MAX_RQ4 + 1 OCE_MAX_RSS4 + 1 /* one default queue */
109#define OCE_MAX_WQ8 8
110
111#define OCE_MAX_EQ32 32
112#define OCE_MAX_CQ4 + 1 + 8 + 1 OCE_MAX_RQ4 + 1 + OCE_MAX_WQ8 + 1 /* one MCC queue */
113#define OCE_MAX_CQ_EQ8 8 /* Max CQ that can attached to an EQ */
114
115#define OCE_DEFAULT_EQD80 80
116
117#define OCE_MIN_MTU256 256
118#define OCE_MAX_MTU9000 9000
119
120#define OCE_MAX_RQ_COMPL64 64
121#define OCE_MAX_RQ_POSTS255 255
122#define OCE_RX_BUF_SIZE2048 2048
123
124#define OCE_MAX_TX_ELEMENTS29 29
125#define OCE_MAX_TX_DESC1024 1024
126#define OCE_MAX_TX_SIZE65535 65535
127
128#define OCE_MEM_KVA(_m)((void *)((_m)->vaddr)) ((void *)((_m)->vaddr))
129#define OCE_MEM_DVA(_m)((_m)->paddr) ((_m)->paddr)
130
131#define OCE_WQ_FOREACH(sc, wq, i)for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq
= sc->sc_wq[i])
\
132 for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq = sc->sc_wq[i])
133#define OCE_RQ_FOREACH(sc, rq, i)for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq
= sc->sc_rq[i])
\
134 for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq = sc->sc_rq[i])
135#define OCE_EQ_FOREACH(sc, eq, i)for (i = 0, eq = sc->sc_eq[0]; i < sc->sc_neq; i++, eq
= sc->sc_eq[i])
\
136 for (i = 0, eq = sc->sc_eq[0]; i < sc->sc_neq; i++, eq = sc->sc_eq[i])
137#define OCE_CQ_FOREACH(sc, cq, i)for (i = 0, cq = sc->sc_cq[0]; i < sc->sc_ncq; i++, cq
= sc->sc_cq[i])
\
138 for (i = 0, cq = sc->sc_cq[0]; i < sc->sc_ncq; i++, cq = sc->sc_cq[i])
139#define OCE_RING_FOREACH(_r, _v, _c)for ((_v) = oce_ring_first(_r); _c; (_v) = oce_ring_next(_r)) \
140 for ((_v) = oce_ring_first(_r); _c; (_v) = oce_ring_next(_r))
141
142static inline int
143ilog2(unsigned int v)
144{
145 int r = 0;
146
147 while (v >>= 1)
148 r++;
149 return (r);
150}
151
152struct oce_pkt {
153 struct mbuf * mbuf;
154 bus_dmamap_t map;
155 int nsegs;
156 SIMPLEQ_ENTRY(oce_pkt)struct { struct oce_pkt *sqe_next; } entry;
157};
158SIMPLEQ_HEAD(oce_pkt_list, oce_pkt)struct oce_pkt_list { struct oce_pkt *sqh_first; struct oce_pkt
**sqh_last; }
;
159
160struct oce_dma_mem {
161 bus_dma_tag_t tag;
162 bus_dmamap_t map;
163 bus_dma_segment_t segs;
164 int nsegs;
165 bus_size_t size;
166 caddr_t vaddr;
167 bus_addr_t paddr;
168};
169
170struct oce_ring {
171 int index;
172 int nitems;
173 int nused;
174 int isize;
175 struct oce_dma_mem dma;
176};
177
178struct oce_softc;
179
180enum cq_len {
181 CQ_LEN_256 = 256,
182 CQ_LEN_512 = 512,
183 CQ_LEN_1024 = 1024
184};
185
186enum eq_len {
187 EQ_LEN_256 = 256,
188 EQ_LEN_512 = 512,
189 EQ_LEN_1024 = 1024,
190 EQ_LEN_2048 = 2048,
191 EQ_LEN_4096 = 4096
192};
193
194enum eqe_size {
195 EQE_SIZE_4 = 4,
196 EQE_SIZE_16 = 16
197};
198
199enum qtype {
200 QTYPE_EQ,
201 QTYPE_MQ,
202 QTYPE_WQ,
203 QTYPE_RQ,
204 QTYPE_CQ,
205 QTYPE_RSS
206};
207
208struct oce_eq {
209 struct oce_softc * sc;
210 struct oce_ring * ring;
211 enum qtype type;
212 int id;
213
214 struct oce_cq * cq[OCE_MAX_CQ_EQ8];
215 int cq_valid;
216
217 int nitems;
218 int isize;
219 int delay;
220};
221
222struct oce_cq {
223 struct oce_softc * sc;
224 struct oce_ring * ring;
225 enum qtype type;
226 int id;
227
228 struct oce_eq * eq;
229
230 void (*cq_intr)(void *);
231 void * cb_arg;
232
233 int nitems;
234 int nodelay;
235 int eventable;
236 int ncoalesce;
237};
238
239struct oce_mq {
240 struct oce_softc * sc;
241 struct oce_ring * ring;
242 enum qtype type;
243 int id;
244
245 struct oce_cq * cq;
246
247 int nitems;
248};
249
250struct oce_wq {
251 struct oce_softc * sc;
252 struct oce_ring * ring;
253 enum qtype type;
254 int id;
255
256 struct oce_cq * cq;
257
258 struct oce_pkt_list pkt_list;
259 struct oce_pkt_list pkt_free;
260
261 int nitems;
262};
263
264struct oce_rq {
265 struct oce_softc * sc;
266 struct oce_ring * ring;
267 enum qtype type;
268 int id;
269
270 struct oce_cq * cq;
271
272 struct if_rxring rxring;
273 struct oce_pkt_list pkt_list;
274 struct oce_pkt_list pkt_free;
275
276 uint32_t rss_cpuid;
277
278#ifdef OCE_LRO
279 struct lro_ctrl lro;
280 int lro_pkts_queued;
281#endif
282
283 int nitems;
284 int fragsize;
285 int mtu;
286 int rss;
287};
288
289struct oce_softc {
290 struct device sc_dev;
291
292 uint sc_flags;
293#define OCE_F_BE20x00000001 0x00000001
294#define OCE_F_BE30x00000002 0x00000002
295#define OCE_F_XE2010x00000008 0x00000008
296#define OCE_F_BE3_NATIVE0x00000100 0x00000100
297#define OCE_F_RESET_RQD0x00001000 0x00001000
298#define OCE_F_MBOX_ENDIAN_RQD0x00002000 0x00002000
299
300 bus_dma_tag_t sc_dmat;
301
302 bus_space_tag_t sc_cfg_iot;
303 bus_space_handle_t sc_cfg_ioh;
304 bus_size_t sc_cfg_size;
305
306 bus_space_tag_t sc_csr_iot;
307 bus_space_handle_t sc_csr_ioh;
308 bus_size_t sc_csr_size;
309
310 bus_space_tag_t sc_db_iot;
311 bus_space_handle_t sc_db_ioh;
312 bus_size_t sc_db_size;
313
314 void * sc_ih;
315
316 struct arpcom sc_ac;
317 struct ifmedia sc_media;
318 ushort sc_link_up;
319 ushort sc_link_speed;
320 uint64_t sc_fc;
321
322 struct oce_dma_mem sc_mbx;
323 struct oce_dma_mem sc_pld;
324
325 uint sc_port;
326 uint sc_fmode;
327
328 struct oce_wq * sc_wq[OCE_MAX_WQ8]; /* TX work queues */
329 struct oce_rq * sc_rq[OCE_MAX_RQ4 + 1]; /* RX work queues */
330 struct oce_cq * sc_cq[OCE_MAX_CQ4 + 1 + 8 + 1]; /* Completion queues */
331 struct oce_eq * sc_eq[OCE_MAX_EQ32]; /* Event queues */
332 struct oce_mq * sc_mq; /* Mailbox queue */
333
334 ushort sc_neq;
335 ushort sc_ncq;
336 ushort sc_nrq;
337 ushort sc_nwq;
338 ushort sc_nintr;
339
340 ushort sc_tx_ring_size;
341 ushort sc_rx_ring_size;
342 ushort sc_rss_enable;
343
344 uint32_t sc_if_id; /* interface ID */
345 uint32_t sc_pmac_id; /* PMAC id */
346 char sc_macaddr[ETHER_ADDR_LEN6];
347
348 uint32_t sc_pvid;
349
350 uint64_t sc_rx_errors;
351 uint64_t sc_tx_errors;
352
353 struct timeout sc_tick;
354 struct timeout sc_rxrefill;
355
356 void * sc_statcmd;
357};
358
359#define IS_BE(sc)(((sc)->sc_flags) & (0x00000001 | 0x00000002)) ISSET((sc)->sc_flags, OCE_F_BE2 | OCE_F_BE3)(((sc)->sc_flags) & (0x00000001 | 0x00000002))
360#define IS_XE201(sc)(((sc)->sc_flags) & (0x00000008)) ISSET((sc)->sc_flags, OCE_F_XE201)(((sc)->sc_flags) & (0x00000008))
361
362#define ADDR_HI(x)((uint32_t)((uint64_t)(x) >> 32)) ((uint32_t)((uint64_t)(x) >> 32))
363#define ADDR_LO(x)((uint32_t)((uint64_t)(x) & 0xffffffff)) ((uint32_t)((uint64_t)(x) & 0xffffffff))
364
365#define IF_LRO_ENABLED(ifp)(((ifp)->if_data.ifi_capabilities) & (0x00004000)) ISSET((ifp)->if_capabilities, IFCAP_LRO)(((ifp)->if_data.ifi_capabilities) & (0x00004000))
366
367int oce_match(struct device *, void *, void *);
368void oce_attach(struct device *, struct device *, void *);
369int oce_pci_alloc(struct oce_softc *, struct pci_attach_args *);
370void oce_attachhook(struct device *);
371void oce_attach_ifp(struct oce_softc *);
372int oce_ioctl(struct ifnet *, u_long, caddr_t);
373int oce_rxrinfo(struct oce_softc *, struct if_rxrinfo *);
374void oce_iff(struct oce_softc *);
375void oce_link_status(struct oce_softc *);
376void oce_media_status(struct ifnet *, struct ifmediareq *);
377int oce_media_change(struct ifnet *);
378void oce_tick(void *);
379void oce_init(void *);
380void oce_stop(struct oce_softc *);
381void oce_watchdog(struct ifnet *);
382void oce_start(struct ifnet *);
383int oce_encap(struct oce_softc *, struct mbuf **, int wqidx);
384#ifdef OCE_TSO
385struct mbuf *
386 oce_tso(struct oce_softc *, struct mbuf **);
387#endif
388int oce_intr(void *);
389void oce_intr_wq(void *);
390void oce_txeof(struct oce_wq *);
391void oce_intr_rq(void *);
392void oce_rxeof(struct oce_rq *, struct oce_nic_rx_cqe *);
393void oce_rxeoc(struct oce_rq *, struct oce_nic_rx_cqe *);
394int oce_vtp_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
395int oce_port_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
396#ifdef OCE_LRO
397void oce_flush_lro(struct oce_rq *);
398int oce_init_lro(struct oce_softc *);
399void oce_free_lro(struct oce_softc *);
400#endif
401int oce_get_buf(struct oce_rq *);
402int oce_alloc_rx_bufs(struct oce_rq *);
403void oce_refill_rx(void *);
404void oce_free_posted_rxbuf(struct oce_rq *);
405void oce_intr_mq(void *);
406void oce_link_event(struct oce_softc *,
407 struct oce_async_cqe_link_state *);
408
409int oce_init_queues(struct oce_softc *);
410void oce_release_queues(struct oce_softc *);
411struct oce_wq *oce_create_wq(struct oce_softc *, struct oce_eq *);
412void oce_drain_wq(struct oce_wq *);
413void oce_destroy_wq(struct oce_wq *);
414struct oce_rq *
415 oce_create_rq(struct oce_softc *, struct oce_eq *, int rss);
416void oce_drain_rq(struct oce_rq *);
417void oce_destroy_rq(struct oce_rq *);
418struct oce_eq *
419 oce_create_eq(struct oce_softc *);
420static inline void
421 oce_arm_eq(struct oce_eq *, int neqe, int rearm, int clearint);
422void oce_drain_eq(struct oce_eq *);
423void oce_destroy_eq(struct oce_eq *);
424struct oce_mq *
425 oce_create_mq(struct oce_softc *, struct oce_eq *);
426void oce_drain_mq(struct oce_mq *);
427void oce_destroy_mq(struct oce_mq *);
428struct oce_cq *
429 oce_create_cq(struct oce_softc *, struct oce_eq *, int nitems,
430 int isize, int eventable, int nodelay, int ncoalesce);
431static inline void
432 oce_arm_cq(struct oce_cq *, int ncqe, int rearm);
433void oce_destroy_cq(struct oce_cq *);
434
435int oce_dma_alloc(struct oce_softc *, bus_size_t, struct oce_dma_mem *);
436void oce_dma_free(struct oce_softc *, struct oce_dma_mem *);
437#define oce_dma_sync(d, f)(*((d)->tag)->_dmamap_sync)(((d)->tag), ((d)->map
), (0), ((d)->map->dm_mapsize), (f))
\
438 bus_dmamap_sync((d)->tag, (d)->map, 0, (d)->map->dm_mapsize, f)(*((d)->tag)->_dmamap_sync)(((d)->tag), ((d)->map
), (0), ((d)->map->dm_mapsize), (f))
439
440struct oce_ring *
441 oce_create_ring(struct oce_softc *, int nitems, int isize, int maxseg);
442void oce_destroy_ring(struct oce_softc *, struct oce_ring *);
443int oce_load_ring(struct oce_softc *, struct oce_ring *,
444 struct oce_pa *, int max_segs);
445static inline void *
446 oce_ring_get(struct oce_ring *);
447static inline void *
448 oce_ring_first(struct oce_ring *);
449static inline void *
450 oce_ring_next(struct oce_ring *);
451struct oce_pkt *
452 oce_pkt_alloc(struct oce_softc *, size_t size, int nsegs,
453 int maxsegsz);
454void oce_pkt_free(struct oce_softc *, struct oce_pkt *);
455static inline struct oce_pkt *
456 oce_pkt_get(struct oce_pkt_list *);
457static inline void
458 oce_pkt_put(struct oce_pkt_list *, struct oce_pkt *);
459
460int oce_init_fw(struct oce_softc *);
461int oce_mbox_init(struct oce_softc *);
462int oce_mbox_dispatch(struct oce_softc *);
463int oce_cmd(struct oce_softc *, int subsys, int opcode, int version,
464 void *payload, int length);
465void oce_first_mcc(struct oce_softc *);
466
467int oce_get_fw_config(struct oce_softc *);
468int oce_check_native_mode(struct oce_softc *);
469int oce_create_iface(struct oce_softc *, uint8_t *macaddr);
470int oce_config_vlan(struct oce_softc *, struct normal_vlan *vtags,
471 int nvtags, int untagged, int promisc);
472int oce_set_flow_control(struct oce_softc *, uint64_t);
473int oce_config_rss(struct oce_softc *, int enable);
474int oce_update_mcast(struct oce_softc *, uint8_t multi[][ETHER_ADDR_LEN6],
475 int naddr);
476int oce_set_promisc(struct oce_softc *, int enable);
477int oce_get_link_status(struct oce_softc *);
478
479void oce_macaddr_set(struct oce_softc *);
480int oce_macaddr_get(struct oce_softc *, uint8_t *macaddr);
481int oce_macaddr_add(struct oce_softc *, uint8_t *macaddr, uint32_t *pmac);
482int oce_macaddr_del(struct oce_softc *, uint32_t pmac);
483
484int oce_new_rq(struct oce_softc *, struct oce_rq *);
485int oce_new_wq(struct oce_softc *, struct oce_wq *);
486int oce_new_mq(struct oce_softc *, struct oce_mq *);
487int oce_new_eq(struct oce_softc *, struct oce_eq *);
488int oce_new_cq(struct oce_softc *, struct oce_cq *);
489
490int oce_init_stats(struct oce_softc *);
491int oce_update_stats(struct oce_softc *);
492int oce_stats_be2(struct oce_softc *, uint64_t *, uint64_t *);
493int oce_stats_be3(struct oce_softc *, uint64_t *, uint64_t *);
494int oce_stats_xe(struct oce_softc *, uint64_t *, uint64_t *);
495
496struct pool *oce_pkt_pool;
497
498struct cfdriver oce_cd = {
499 NULL((void *)0), "oce", DV_IFNET
500};
501
502const struct cfattach oce_ca = {
503 sizeof(struct oce_softc), oce_match, oce_attach, NULL((void *)0), NULL((void *)0)
504};
505
506const struct pci_matchid oce_devices[] = {
507 { PCI_VENDOR_SERVERENGINES0x19a2, PCI_PRODUCT_SERVERENGINES_BE20x0211 },
508 { PCI_VENDOR_SERVERENGINES0x19a2, PCI_PRODUCT_SERVERENGINES_BE30x0221 },
509 { PCI_VENDOR_SERVERENGINES0x19a2, PCI_PRODUCT_SERVERENGINES_OCBE20x0700 },
510 { PCI_VENDOR_SERVERENGINES0x19a2, PCI_PRODUCT_SERVERENGINES_OCBE30x0710 },
511 { PCI_VENDOR_EMULEX0x10df, PCI_PRODUCT_EMULEX_XE2010xe220 },
512};
513
514int
515oce_match(struct device *parent, void *match, void *aux)
516{
517 return (pci_matchbyid(aux, oce_devices, nitems(oce_devices)(sizeof((oce_devices)) / sizeof((oce_devices)[0]))));
518}
519
520void
521oce_attach(struct device *parent, struct device *self, void *aux)
522{
523 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
524 struct oce_softc *sc = (struct oce_softc *)self;
525 const char *intrstr = NULL((void *)0);
526 pci_intr_handle_t ih;
527
528 switch (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff)) {
529 case PCI_PRODUCT_SERVERENGINES_BE20x0211:
530 case PCI_PRODUCT_SERVERENGINES_OCBE20x0700:
531 SET(sc->sc_flags, OCE_F_BE2)((sc->sc_flags) |= (0x00000001));
532 break;
533 case PCI_PRODUCT_SERVERENGINES_BE30x0221:
534 case PCI_PRODUCT_SERVERENGINES_OCBE30x0710:
535 SET(sc->sc_flags, OCE_F_BE3)((sc->sc_flags) |= (0x00000002));
536 break;
537 case PCI_PRODUCT_EMULEX_XE2010xe220:
538 SET(sc->sc_flags, OCE_F_XE201)((sc->sc_flags) |= (0x00000008));
539 break;
540 }
541
542 sc->sc_dmat = pa->pa_dmat;
543 if (oce_pci_alloc(sc, pa))
544 return;
545
546 sc->sc_tx_ring_size = OCE_TX_RING_SIZE512;
547 sc->sc_rx_ring_size = OCE_RX_RING_SIZE1024;
548
549 /* create the bootstrap mailbox */
550 if (oce_dma_alloc(sc, sizeof(struct oce_bmbx), &sc->sc_mbx)) {
551 printf(": failed to allocate mailbox memory\n");
552 return;
553 }
554 if (oce_dma_alloc(sc, OCE_MAX_PAYLOAD65536, &sc->sc_pld)) {
555 printf(": failed to allocate payload memory\n");
556 goto fail_1;
557 }
558
559 if (oce_init_fw(sc))
560 goto fail_2;
561
562 if (oce_mbox_init(sc)) {
563 printf(": failed to initialize mailbox\n");
564 goto fail_2;
565 }
566
567 if (oce_get_fw_config(sc)) {
568 printf(": failed to get firmware configuration\n");
569 goto fail_2;
570 }
571
572 if (ISSET(sc->sc_flags, OCE_F_BE3)((sc->sc_flags) & (0x00000002))) {
573 if (oce_check_native_mode(sc))
574 goto fail_2;
575 }
576
577 if (oce_macaddr_get(sc, sc->sc_macaddr)) {
578 printf(": failed to fetch MAC address\n");
579 goto fail_2;
580 }
581 memcpy(sc->sc_ac.ac_enaddr, sc->sc_macaddr, ETHER_ADDR_LEN)__builtin_memcpy((sc->sc_ac.ac_enaddr), (sc->sc_macaddr
), (6))
;
582
583 if (oce_pkt_pool == NULL((void *)0)) {
584 oce_pkt_pool = malloc(sizeof(struct pool), M_DEVBUF2, M_NOWAIT0x0002);
585 if (oce_pkt_pool == NULL((void *)0)) {
586 printf(": unable to allocate descriptor pool\n");
587 goto fail_2;
588 }
589 pool_init(oce_pkt_pool, sizeof(struct oce_pkt), 0, IPL_NET0x4,
590 0, "ocepkts", NULL((void *)0));
591 }
592
593 /* We allocate a single interrupt resource */
594 sc->sc_nintr = 1;
595 if (pci_intr_map_msi(pa, &ih) != 0 &&
596 pci_intr_map(pa, &ih) != 0) {
597 printf(": couldn't map interrupt\n");
598 goto fail_2;
599 }
600
601 intrstr = pci_intr_string(pa->pa_pc, ih);
602 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET0x4, oce_intr, sc,
603 sc->sc_dev.dv_xname);
604 if (sc->sc_ih == NULL((void *)0)) {
605 printf(": couldn't establish interrupt\n");
606 if (intrstr != NULL((void *)0))
607 printf(" at %s", intrstr);
608 printf("\n");
609 goto fail_2;
610 }
611 printf(": %s", intrstr);
612
613 if (oce_init_stats(sc))
614 goto fail_3;
615
616 if (oce_init_queues(sc))
617 goto fail_3;
618
619 oce_attach_ifp(sc);
620
621#ifdef OCE_LRO
622 if (oce_init_lro(sc))
623 goto fail_4;
624#endif
625
626 timeout_set(&sc->sc_tick, oce_tick, sc);
627 timeout_set(&sc->sc_rxrefill, oce_refill_rx, sc);
628
629 config_mountroot(self, oce_attachhook);
630
631 printf(", address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));
632
633 return;
634
635#ifdef OCE_LRO
636fail_4:
637 oce_free_lro(sc);
638 ether_ifdetach(&sc->sc_ac.ac_if);
639 if_detach(&sc->sc_ac.ac_if);
640 oce_release_queues(sc);
641#endif
642fail_3:
643 pci_intr_disestablish(pa->pa_pc, sc->sc_ih);
644fail_2:
645 oce_dma_free(sc, &sc->sc_pld);
646fail_1:
647 oce_dma_free(sc, &sc->sc_mbx);
648}
649
650int
651oce_pci_alloc(struct oce_softc *sc, struct pci_attach_args *pa)
652{
653 pcireg_t memtype, reg;
654
655 /* setup the device config region */
656 if (ISSET(sc->sc_flags, OCE_F_BE2)((sc->sc_flags) & (0x00000001)))
657 reg = OCE_BAR_CFG_BE20x14;
658 else
659 reg = OCE_BAR_CFG0x10;
660
661 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
662 if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_cfg_iot,
663 &sc->sc_cfg_ioh, NULL((void *)0), &sc->sc_cfg_size,
664 IS_BE(sc)(((sc)->sc_flags) & (0x00000001 | 0x00000002)) ? 0 : 32768)) {
665 printf(": can't find cfg mem space\n");
666 return (ENXIO6);
667 }
668
669 /*
670 * Read the SLI_INTF register and determine whether we
671 * can use this port and its features
672 */
673 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, OCE_INTF_REG_OFFSET0x58);
674 if (OCE_SLI_SIGNATURE(reg)(((reg) >> 29) & 0x7) != OCE_INTF_VALID_SIG6) {
675 printf(": invalid signature\n");
676 goto fail_1;
677 }
678 if (OCE_SLI_REVISION(reg)(((reg) >> 4) & 0xf) != OCE_INTF_SLI_REV44) {
679 printf(": unsupported SLI revision\n");
680 goto fail_1;
681 }
682 if (OCE_SLI_IFTYPE(reg)(((reg) >> 12) & 0xf) == OCE_INTF_IF_TYPE_11)
683 SET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD)((sc->sc_flags) |= (0x00002000));
684 if (OCE_SLI_HINT1(reg)(((reg) >> 16) & 0xff) == OCE_INTF_FUNC_RESET_REQD1)
685 SET(sc->sc_flags, OCE_F_RESET_RQD)((sc->sc_flags) |= (0x00001000));
686
687 /* Lancer has one BAR (CFG) but BE3 has three (CFG, CSR, DB) */
688 if (IS_BE(sc)(((sc)->sc_flags) & (0x00000001 | 0x00000002))) {
689 /* set up CSR region */
690 reg = OCE_BAR_CSR0x18;
691 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
692 if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_csr_iot,
693 &sc->sc_csr_ioh, NULL((void *)0), &sc->sc_csr_size, 0)) {
694 printf(": can't find csr mem space\n");
695 goto fail_1;
696 }
697
698 /* set up DB doorbell region */
699 reg = OCE_BAR_DB0x20;
700 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
701 if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_db_iot,
702 &sc->sc_db_ioh, NULL((void *)0), &sc->sc_db_size, 0)) {
703 printf(": can't find csr mem space\n");
704 goto fail_2;
705 }
706 } else {
707 sc->sc_csr_iot = sc->sc_db_iot = sc->sc_cfg_iot;
708 sc->sc_csr_ioh = sc->sc_db_ioh = sc->sc_cfg_ioh;
709 }
710
711 return (0);
712
713fail_2:
714 bus_space_unmap(sc->sc_csr_iot, sc->sc_csr_ioh, sc->sc_csr_size);
715fail_1:
716 bus_space_unmap(sc->sc_cfg_iot, sc->sc_cfg_ioh, sc->sc_cfg_size);
717 return (ENXIO6);
718}
719
720static inline uint32_t
721oce_read_cfg(struct oce_softc *sc, bus_size_t off)
722{
723 bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
724 BUS_SPACE_BARRIER_READ0x01);
725 return (bus_space_read_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off)((sc->sc_cfg_iot)->read_4((sc->sc_cfg_ioh), (off))));
726}
727
728static inline uint32_t
729oce_read_csr(struct oce_softc *sc, bus_size_t off)
730{
731 bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
732 BUS_SPACE_BARRIER_READ0x01);
733 return (bus_space_read_4(sc->sc_csr_iot, sc->sc_csr_ioh, off)((sc->sc_csr_iot)->read_4((sc->sc_csr_ioh), (off))));
734}
735
736static inline uint32_t
737oce_read_db(struct oce_softc *sc, bus_size_t off)
738{
739 bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
740 BUS_SPACE_BARRIER_READ0x01);
741 return (bus_space_read_4(sc->sc_db_iot, sc->sc_db_ioh, off)((sc->sc_db_iot)->read_4((sc->sc_db_ioh), (off))));
742}
743
744static inline void
745oce_write_cfg(struct oce_softc *sc, bus_size_t off, uint32_t val)
746{
747 bus_space_write_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, val)((sc->sc_cfg_iot)->write_4((sc->sc_cfg_ioh), (off), (
val)))
;
748 bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
749 BUS_SPACE_BARRIER_WRITE0x02);
750}
751
752static inline void
753oce_write_csr(struct oce_softc *sc, bus_size_t off, uint32_t val)
754{
755 bus_space_write_4(sc->sc_csr_iot, sc->sc_csr_ioh, off, val)((sc->sc_csr_iot)->write_4((sc->sc_csr_ioh), (off), (
val)))
;
756 bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
757 BUS_SPACE_BARRIER_WRITE0x02);
758}
759
760static inline void
761oce_write_db(struct oce_softc *sc, bus_size_t off, uint32_t val)
762{
763 bus_space_write_4(sc->sc_db_iot, sc->sc_db_ioh, off, val)((sc->sc_db_iot)->write_4((sc->sc_db_ioh), (off), (val
)))
;
764 bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
765 BUS_SPACE_BARRIER_WRITE0x02);
766}
767
768static inline void
769oce_intr_enable(struct oce_softc *sc)
770{
771 uint32_t reg;
772
773 reg = oce_read_cfg(sc, PCI_INTR_CTRL0xfc);
774 oce_write_cfg(sc, PCI_INTR_CTRL0xfc, reg | HOSTINTR_MASK(1<<29));
775}
776
777static inline void
778oce_intr_disable(struct oce_softc *sc)
779{
780 uint32_t reg;
781
782 reg = oce_read_cfg(sc, PCI_INTR_CTRL0xfc);
783 oce_write_cfg(sc, PCI_INTR_CTRL0xfc, reg & ~HOSTINTR_MASK(1<<29));
784}
785
786void
787oce_attachhook(struct device *self)
788{
789 struct oce_softc *sc = (struct oce_softc *)self;
790
791 oce_get_link_status(sc);
792
793 oce_arm_cq(sc->sc_mq->cq, 0, TRUE1);
794
795 /*
796 * We need to get MCC async events. So enable intrs and arm
797 * first EQ, Other EQs will be armed after interface is UP
798 */
799 oce_intr_enable(sc);
800 oce_arm_eq(sc->sc_eq[0], 0, TRUE1, FALSE0);
801
802 /*
803 * Send first mcc cmd and after that we get gracious
804 * MCC notifications from FW
805 */
806 oce_first_mcc(sc);
807}
808
809void
810oce_attach_ifp(struct oce_softc *sc)
811{
812 struct ifnet *ifp = &sc->sc_ac.ac_if;
813
814 ifmedia_init(&sc->sc_media, IFM_IMASK0xff00000000000000ULL, oce_media_change,
815 oce_media_status);
816 ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0));
817 ifmedia_set(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL);
818
819 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ16);
820 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
821 ifp->if_ioctl = oce_ioctl;
822 ifp->if_start = oce_start;
823 ifp->if_watchdog = oce_watchdog;
824 ifp->if_hardmtu = OCE_MAX_MTU9000;
825 ifp->if_softc = sc;
826 ifq_init_maxlen(&ifp->if_snd, sc->sc_tx_ring_size - 1);
827
828 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010 | IFCAP_CSUM_IPv40x00000001 |
829 IFCAP_CSUM_TCPv40x00000002 | IFCAP_CSUM_UDPv40x00000004;
830
831#if NVLAN1 > 0
832 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020;
833#endif
834
835#ifdef OCE_TSO
836 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_TSO;
837 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTSO;
838#endif
839#ifdef OCE_LRO
840 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_LRO0x00004000;
841#endif
842
843 if_attach(ifp);
844 ether_ifattach(ifp);
845}
846
847int
848oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
849{
850 struct oce_softc *sc = ifp->if_softc;
851 struct ifreq *ifr = (struct ifreq *)data;
852 int s, error = 0;
853
854 s = splnet()splraise(0x4);
855
856 switch (command) {
857 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
858 ifp->if_flags |= IFF_UP0x1;
859 if (!(ifp->if_flags & IFF_RUNNING0x40))
860 oce_init(sc);
861 break;
862 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
863 if (ifp->if_flags & IFF_UP0x1) {
864 if (ifp->if_flags & IFF_RUNNING0x40)
865 error = ENETRESET52;
866 else
867 oce_init(sc);
868 } else {
869 if (ifp->if_flags & IFF_RUNNING0x40)
870 oce_stop(sc);
871 }
872 break;
873 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
874 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
875 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
876 break;
877 case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((170)))
:
878 error = oce_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data);
879 break;
880 default:
881 error = ether_ioctl(ifp, &sc->sc_ac, command, data);
882 break;
883 }
884
885 if (error == ENETRESET52) {
886 if (ifp->if_flags & IFF_RUNNING0x40)
887 oce_iff(sc);
888 error = 0;
889 }
890
891 splx(s)spllower(s);
892
893 return (error);
894}
895
896int
897oce_rxrinfo(struct oce_softc *sc, struct if_rxrinfo *ifri)
898{
899 struct if_rxring_info *ifr, ifr1;
900 struct oce_rq *rq;
901 int error, i;
902 u_int n = 0;
903
904 if (sc->sc_nrq > 1) {
905 ifr = mallocarray(sc->sc_nrq, sizeof(*ifr), M_DEVBUF2,
906 M_WAITOK0x0001 | M_ZERO0x0008);
907 } else
908 ifr = &ifr1;
909
910 OCE_RQ_FOREACH(sc, rq, i)for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq
= sc->sc_rq[i])
{
911 ifr[n].ifr_size = MCLBYTES(1 << 11);
912 snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "/%d", i);
913 ifr[n].ifr_info = rq->rxring;
914 n++;
915 }
916
917 error = if_rxr_info_ioctl(ifri, sc->sc_nrq, ifr);
918
919 if (sc->sc_nrq > 1)
920 free(ifr, M_DEVBUF2, sc->sc_nrq * sizeof(*ifr));
921 return (error);
922}
923
924
925void
926oce_iff(struct oce_softc *sc)
927{
928 uint8_t multi[OCE_MAX_MC_FILTER_SIZE32][ETHER_ADDR_LEN6];
929 struct arpcom *ac = &sc->sc_ac;
930 struct ifnet *ifp = &ac->ac_if;
931 struct ether_multi *enm;
932 struct ether_multistep step;
933 int naddr = 0, promisc = 0;
934
935 ifp->if_flags &= ~IFF_ALLMULTI0x200;
936
937 if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0 ||
938 ac->ac_multicnt >= OCE_MAX_MC_FILTER_SIZE32) {
939 ifp->if_flags |= IFF_ALLMULTI0x200;
940 promisc = 1;
941 } else {
942 ETHER_FIRST_MULTI(step, &sc->sc_ac, enm)do { (step).e_enm = ((&(&sc->sc_ac)->ac_multiaddrs
)->lh_first); do { if ((((enm)) = ((step)).e_enm) != ((void
*)0)) ((step)).e_enm = ((((enm)))->enm_list.le_next); } while
( 0); } while ( 0)
;
943 while (enm != NULL((void *)0)) {
944 memcpy(multi[naddr++], enm->enm_addrlo, ETHER_ADDR_LEN)__builtin_memcpy((multi[naddr++]), (enm->enm_addrlo), (6));
945 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
946 }
947 oce_update_mcast(sc, multi, naddr);
948 }
949
950 oce_set_promisc(sc, promisc);
951}
952
953void
954oce_link_status(struct oce_softc *sc)
955{
956 struct ifnet *ifp = &sc->sc_ac.ac_if;
957 int link_state = LINK_STATE_DOWN2;
958
959 ifp->if_baudrateif_data.ifi_baudrate = 0;
960 if (sc->sc_link_up) {
961 link_state = LINK_STATE_FULL_DUPLEX6;
962
963 switch (sc->sc_link_speed) {
964 case 1:
965 ifp->if_baudrateif_data.ifi_baudrate = IF_Mbps(10)((((10) * 1000ULL) * 1000ULL));
966 break;
967 case 2:
968 ifp->if_baudrateif_data.ifi_baudrate = IF_Mbps(100)((((100) * 1000ULL) * 1000ULL));
969 break;
970 case 3:
971 ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(1)((((((1) * 1000ULL) * 1000ULL) * 1000ULL)));
972 break;
973 case 4:
974 ifp->if_baudrateif_data.ifi_baudrate = IF_Gbps(10)((((((10) * 1000ULL) * 1000ULL) * 1000ULL)));
975 break;
976 }
977 }
978 if (ifp->if_link_stateif_data.ifi_link_state != link_state) {
979 ifp->if_link_stateif_data.ifi_link_state = link_state;
980 if_link_state_change(ifp);
981 }
982}
983
984void
985oce_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
986{
987 struct oce_softc *sc = ifp->if_softc;
988
989 ifmr->ifm_status = IFM_AVALID0x0000000000000001ULL;
990 ifmr->ifm_active = IFM_ETHER0x0000000000000100ULL;
991
992 if (oce_get_link_status(sc) == 0)
993 oce_link_status(sc);
994
995 if (!sc->sc_link_up) {
996 ifmr->ifm_active |= IFM_NONE2ULL;
997 return;
998 }
999
1000 ifmr->ifm_status |= IFM_ACTIVE0x0000000000000002ULL;
1001
1002 switch (sc->sc_link_speed) {
1003 case 1: /* 10 Mbps */
1004 ifmr->ifm_active |= IFM_10_T3 | IFM_FDX0x0000010000000000ULL;
1005 break;
1006 case 2: /* 100 Mbps */
1007 ifmr->ifm_active |= IFM_100_TX6 | IFM_FDX0x0000010000000000ULL;
1008 break;
1009 case 3: /* 1 Gbps */
1010 ifmr->ifm_active |= IFM_1000_T16 | IFM_FDX0x0000010000000000ULL;
1011 break;
1012 case 4: /* 10 Gbps */
1013 ifmr->ifm_active |= IFM_10G_SR19 | IFM_FDX0x0000010000000000ULL;
1014 break;
1015 }
1016
1017 if (sc->sc_fc & IFM_ETH_RXPAUSE0x0000000000020000ULL)
1018 ifmr->ifm_active |= IFM_FLOW0x0000040000000000ULL | IFM_ETH_RXPAUSE0x0000000000020000ULL;
1019 if (sc->sc_fc & IFM_ETH_TXPAUSE0x0000000000040000ULL)
1020 ifmr->ifm_active |= IFM_FLOW0x0000040000000000ULL | IFM_ETH_TXPAUSE0x0000000000040000ULL;
1021}
1022
1023int
1024oce_media_change(struct ifnet *ifp)
1025{
1026 return (0);
1027}
1028
1029void
1030oce_tick(void *arg)
1031{
1032 struct oce_softc *sc = arg;
1033 int s;
1034
1035 s = splnet()splraise(0x4);
1036
1037 if (oce_update_stats(sc) == 0)
1038 timeout_add_sec(&sc->sc_tick, 1);
1039
1040 splx(s)spllower(s);
1041}
1042
1043void
1044oce_init(void *arg)
1045{
1046 struct oce_softc *sc = arg;
1047 struct ifnet *ifp = &sc->sc_ac.ac_if;
1048 struct oce_eq *eq;
1049 struct oce_rq *rq;
1050 struct oce_wq *wq;
1051 int i;
1052
1053 oce_stop(sc);
1054
1055 DELAY(10)(*delay_func)(10);
1056
1057 oce_macaddr_set(sc);
1058
1059 oce_iff(sc);
1060
1061 /* Enable VLAN promiscuous mode */
1062 if (oce_config_vlan(sc, NULL((void *)0), 0, 1, 1))
1063 goto error;
1064
1065 if (oce_set_flow_control(sc, IFM_ETH_RXPAUSE0x0000000000020000ULL | IFM_ETH_TXPAUSE0x0000000000040000ULL))
1066 goto error;
1067
1068 OCE_RQ_FOREACH(sc, rq, i)for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq
= sc->sc_rq[i])
{
1069 rq->mtu = ifp->if_hardmtu + ETHER_HDR_LEN((6 * 2) + 2) + ETHER_CRC_LEN4 +
1070 ETHER_VLAN_ENCAP_LEN4;
1071 if (oce_new_rq(sc, rq)) {
1072 printf("%s: failed to create rq\n",
1073 sc->sc_dev.dv_xname);
1074 goto error;
1075 }
1076 rq->ring->index = 0;
1077
1078 /* oce splits jumbos into 2k chunks... */
1079 if_rxr_init(&rq->rxring, 8, rq->nitems);
1080
1081 if (!oce_alloc_rx_bufs(rq)) {
1082 printf("%s: failed to allocate rx buffers\n",
1083 sc->sc_dev.dv_xname);
1084 goto error;
1085 }
1086 }
1087
1088#ifdef OCE_RSS
1089 /* RSS config */
1090 if (sc->sc_rss_enable) {
1091 if (oce_config_rss(sc, (uint8_t)sc->sc_if_id, 1)) {
1092 printf("%s: failed to configure RSS\n",
1093 sc->sc_dev.dv_xname);
1094 goto error;
1095 }
1096 }
1097#endif
1098
1099 OCE_RQ_FOREACH(sc, rq, i)for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq
= sc->sc_rq[i])
1100 oce_arm_cq(rq->cq, 0, TRUE1);
1101
1102 OCE_WQ_FOREACH(sc, wq, i)for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq
= sc->sc_wq[i])
1103 oce_arm_cq(wq->cq, 0, TRUE1);
1104
1105 oce_arm_cq(sc->sc_mq->cq, 0, TRUE1);
1106
1107 OCE_EQ_FOREACH(sc, eq, i)for (i = 0, eq = sc->sc_eq[0]; i < sc->sc_neq; i++, eq
= sc->sc_eq[i])
1108 oce_arm_eq(eq, 0, TRUE1, FALSE0);
1109
1110 if (oce_get_link_status(sc) == 0)
1111 oce_link_status(sc);
1112
1113 ifp->if_flags |= IFF_RUNNING0x40;
1114 ifq_clr_oactive(&ifp->if_snd);
1115
1116 timeout_add_sec(&sc->sc_tick, 1);
1117
1118 oce_intr_enable(sc);
1119
1120 return;
1121error:
1122 oce_stop(sc);
1123}
1124
1125void
1126oce_stop(struct oce_softc *sc)
1127{
1128 struct mbx_delete_nic_rq cmd;
1129 struct ifnet *ifp = &sc->sc_ac.ac_if;
1130 struct oce_rq *rq;
1131 struct oce_wq *wq;
1132 struct oce_eq *eq;
1133 int i;
1134
1135 timeout_del(&sc->sc_tick);
1136 timeout_del(&sc->sc_rxrefill);
1137
1138 ifp->if_flags &= ~IFF_RUNNING0x40;
1139 ifq_clr_oactive(&ifp->if_snd);
1140
1141 /* Stop intrs and finish any bottom halves pending */
1142 oce_intr_disable(sc);
1143
1144 /* Invalidate any pending cq and eq entries */
1145 OCE_EQ_FOREACH(sc, eq, i)for (i = 0, eq = sc->sc_eq[0]; i < sc->sc_neq; i++, eq
= sc->sc_eq[i])
1146 oce_drain_eq(eq);
1147 OCE_RQ_FOREACH(sc, rq, i)for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq
= sc->sc_rq[i])
{
1148 /* destroy the work queue in the firmware */
1149 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
1150 cmd.params.req.rq_id = htole16(rq->id)((__uint16_t)(rq->id));
1151 oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ,
1152 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
1153 DELAY(1000)(*delay_func)(1000);
1154 oce_drain_rq(rq);
1155 oce_free_posted_rxbuf(rq);
1156 }
1157 OCE_WQ_FOREACH(sc, wq, i)for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq
= sc->sc_wq[i])
1158 oce_drain_wq(wq);
1159}
1160
1161void
1162oce_watchdog(struct ifnet *ifp)
1163{
1164 printf("%s: watchdog timeout -- resetting\n", ifp->if_xname);
1165
1166 oce_init(ifp->if_softc);
1167
1168 ifp->if_oerrorsif_data.ifi_oerrors++;
1169}
1170
1171void
1172oce_start(struct ifnet *ifp)
1173{
1174 struct oce_softc *sc = ifp->if_softc;
1175 struct mbuf *m;
1176 int pkts = 0;
1177
1178 if (!(ifp->if_flags & IFF_RUNNING0x40) || ifq_is_oactive(&ifp->if_snd))
1179 return;
1180
1181 for (;;) {
1182 m = ifq_dequeue(&ifp->if_snd);
1183 if (m == NULL((void *)0))
1184 break;
1185
1186 if (oce_encap(sc, &m, 0)) {
1187 ifq_set_oactive(&ifp->if_snd);
1188 break;
1189 }
1190
1191#if NBPFILTER1 > 0
1192 if (ifp->if_bpf)
1193 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
1194#endif
1195 pkts++;
1196 }
1197
1198 /* Set a timeout in case the chip goes out to lunch */
1199 if (pkts)
1200 ifp->if_timer = 5;
1201}
1202
1203int
1204oce_encap(struct oce_softc *sc, struct mbuf **mpp, int wqidx)
1205{
1206 struct mbuf *m = *mpp;
1207 struct oce_wq *wq = sc->sc_wq[wqidx];
1208 struct oce_pkt *pkt = NULL((void *)0);
1209 struct oce_nic_hdr_wqe *nhe;
1210 struct oce_nic_frag_wqe *nfe;
1211 int i, nwqe, err;
1212
1213#ifdef OCE_TSO
1214 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & CSUM_TSO) {
1215 /* consolidate packet buffers for TSO/LSO segment offload */
1216 m = oce_tso(sc, mpp);
1217 if (m == NULL((void *)0))
1218 goto error;
1219 }
1220#endif
1221
1222 if ((pkt = oce_pkt_get(&wq->pkt_free)) == NULL((void *)0))
1223 goto error;
1224
1225 err = bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
pkt->map), (m), (0x0001))
;
1226 if (err == EFBIG27) {
1227 if (m_defrag(m, M_DONTWAIT0x0002) ||
1228 bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
pkt->map), (m), (0x0001))
1229 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
pkt->map), (m), (0x0001))
)
1230 goto error;
1231 *mpp = m;
1232 } else if (err != 0)
1233 goto error;
1234
1235 pkt->nsegs = pkt->map->dm_nsegs;
1236
1237 nwqe = pkt->nsegs + 1;
1238 if (IS_BE(sc)(((sc)->sc_flags) & (0x00000001 | 0x00000002))) {
1239 /* BE2 and BE3 require even number of WQEs */
1240 if (nwqe & 1)
1241 nwqe++;
1242 }
1243
1244 /* Fail if there's not enough free WQEs */
1245 if (nwqe >= wq->ring->nitems - wq->ring->nused) {
1246 bus_dmamap_unload(sc->sc_dmat, pkt->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkt
->map))
;
1247 goto error;
1248 }
1249
1250 bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x04))
1251 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x04))
;
1252 pkt->mbuf = m;
1253
1254 /* TX work queue entry for the header */
1255 nhe = oce_ring_get(wq->ring);
1256 memset(nhe, 0, sizeof(*nhe))__builtin_memset((nhe), (0), (sizeof(*nhe)));
1257
1258 nhe->u0.s.complete = 1;
1259 nhe->u0.s.event = 1;
1260 nhe->u0.s.crc = 1;
1261 nhe->u0.s.forward = 0;
1262 nhe->u0.s.ipcs = (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_IPV4_CSUM_OUT0x0001) ? 1 : 0;
1263 nhe->u0.s.udpcs = (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_UDP_CSUM_OUT0x0004) ? 1 : 0;
1264 nhe->u0.s.tcpcs = (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_TCP_CSUM_OUT0x0002) ? 1 : 0;
1265 nhe->u0.s.num_wqe = nwqe;
1266 nhe->u0.s.total_length = m->m_pkthdrM_dat.MH.MH_pkthdr.len;
1267
1268#if NVLAN1 > 0
1269 if (m->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020) {
1270 nhe->u0.s.vlan = 1; /* Vlan present */
1271 nhe->u0.s.vlan_tag = m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag;
1272 }
1273#endif
1274
1275#ifdef OCE_TSO
1276 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & CSUM_TSO) {
1277 if (m->m_pkthdrM_dat.MH.MH_pkthdr.tso_segsz) {
1278 nhe->u0.s.lso = 1;
1279 nhe->u0.s.lso_mss = m->m_pkthdrM_dat.MH.MH_pkthdr.tso_segsz;
1280 }
1281 if (!IS_BE(sc)(((sc)->sc_flags) & (0x00000001 | 0x00000002)))
1282 nhe->u0.s.ipcs = 1;
1283 }
1284#endif
1285
1286 oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_PREREAD |(*((&wq->ring->dma)->tag)->_dmamap_sync)(((&
wq->ring->dma)->tag), ((&wq->ring->dma)->
map), (0), ((&wq->ring->dma)->map->dm_mapsize
), (0x01 | 0x04))
1287 BUS_DMASYNC_PREWRITE)(*((&wq->ring->dma)->tag)->_dmamap_sync)(((&
wq->ring->dma)->tag), ((&wq->ring->dma)->
map), (0), ((&wq->ring->dma)->map->dm_mapsize
), (0x01 | 0x04))
;
1288
1289 wq->ring->nused++;
1290
1291 /* TX work queue entries for data chunks */
1292 for (i = 0; i < pkt->nsegs; i++) {
1293 nfe = oce_ring_get(wq->ring);
1294 memset(nfe, 0, sizeof(*nfe))__builtin_memset((nfe), (0), (sizeof(*nfe)));
1295 nfe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[i].ds_addr)((uint32_t)((uint64_t)(pkt->map->dm_segs[i].ds_addr) >>
32))
;
1296 nfe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[i].ds_addr)((uint32_t)((uint64_t)(pkt->map->dm_segs[i].ds_addr) &
0xffffffff))
;
1297 nfe->u0.s.frag_len = pkt->map->dm_segs[i].ds_len;
1298 wq->ring->nused++;
1299 }
1300 if (nwqe > (pkt->nsegs + 1)) {
1301 nfe = oce_ring_get(wq->ring);
1302 memset(nfe, 0, sizeof(*nfe))__builtin_memset((nfe), (0), (sizeof(*nfe)));
1303 wq->ring->nused++;
1304 pkt->nsegs++;
1305 }
1306
1307 oce_pkt_put(&wq->pkt_list, pkt);
1308
1309 oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_POSTREAD |(*((&wq->ring->dma)->tag)->_dmamap_sync)(((&
wq->ring->dma)->tag), ((&wq->ring->dma)->
map), (0), ((&wq->ring->dma)->map->dm_mapsize
), (0x02 | 0x08))
1310 BUS_DMASYNC_POSTWRITE)(*((&wq->ring->dma)->tag)->_dmamap_sync)(((&
wq->ring->dma)->tag), ((&wq->ring->dma)->
map), (0), ((&wq->ring->dma)->map->dm_mapsize
), (0x02 | 0x08))
;
1311
1312 oce_write_db(sc, PD_TXULP_DB0x0060, wq->id | (nwqe << 16));
1313
1314 return (0);
1315
1316error:
1317 if (pkt)
1318 oce_pkt_put(&wq->pkt_free, pkt);
1319 m_freem(*mpp);
1320 *mpp = NULL((void *)0);
1321 return (1);
1322}
1323
1324#ifdef OCE_TSO
1325struct mbuf *
1326oce_tso(struct oce_softc *sc, struct mbuf **mpp)
1327{
1328 struct mbuf *m;
1329 struct ip *ip;
1330#ifdef INET61
1331 struct ip6_hdr *ip6;
1332#endif
1333 struct ether_vlan_header *eh;
1334 struct tcphdr *th;
1335 uint16_t etype;
1336 int total_len = 0, ehdrlen = 0;
1337
1338 m = *mpp;
1339
1340 if (M_WRITABLE(m) == 0) {
1341 m = m_dup(*mpp, M_DONTWAIT0x0002);
1342 if (!m)
1343 return (NULL((void *)0));
1344 m_freem(*mpp);
1345 *mpp = m;
1346 }
1347
1348 eh = mtod(m, struct ether_vlan_header *)((struct ether_vlan_header *)((m)->m_hdr.mh_data));
1349 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)(__uint16_t)(__builtin_constant_p(0x8100) ? (__uint16_t)(((__uint16_t
)(0x8100) & 0xffU) << 8 | ((__uint16_t)(0x8100) &
0xff00U) >> 8) : __swap16md(0x8100))
) {
1350 etype = ntohs(eh->evl_proto)(__uint16_t)(__builtin_constant_p(eh->evl_proto) ? (__uint16_t
)(((__uint16_t)(eh->evl_proto) & 0xffU) << 8 | (
(__uint16_t)(eh->evl_proto) & 0xff00U) >> 8) : __swap16md
(eh->evl_proto))
;
1351 ehdrlen = ETHER_HDR_LEN((6 * 2) + 2) + ETHER_VLAN_ENCAP_LEN4;
1352 } else {
1353 etype = ntohs(eh->evl_encap_proto)(__uint16_t)(__builtin_constant_p(eh->evl_encap_proto) ? (
__uint16_t)(((__uint16_t)(eh->evl_encap_proto) & 0xffU
) << 8 | ((__uint16_t)(eh->evl_encap_proto) & 0xff00U
) >> 8) : __swap16md(eh->evl_encap_proto))
;
1354 ehdrlen = ETHER_HDR_LEN((6 * 2) + 2);
1355 }
1356
1357 switch (etype) {
1358 case ETHERTYPE_IP0x0800:
1359 ip = (struct ip *)(m->m_datam_hdr.mh_data + ehdrlen);
1360 if (ip->ip_p != IPPROTO_TCP6)
1361 return (NULL((void *)0));
1362 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1363
1364 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1365 break;
1366#ifdef INET61
1367 case ETHERTYPE_IPV60x86DD:
1368 ip6 = (struct ip6_hdr *)(m->m_datam_hdr.mh_data + ehdrlen);
1369 if (ip6->ip6_nxtip6_ctlun.ip6_un1.ip6_un1_nxt != IPPROTO_TCP6)
1370 return NULL((void *)0);
1371 th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1372
1373 total_len = ehdrlen + sizeof(struct ip6_hdr) +
1374 (th->th_off << 2);
1375 break;
1376#endif
1377 default:
1378 return (NULL((void *)0));
1379 }
1380
1381 m = m_pullup(m, total_len);
1382 if (!m)
1383 return (NULL((void *)0));
1384 *mpp = m;
1385 return (m);
1386
1387}
1388#endif /* OCE_TSO */
1389
1390int
1391oce_intr(void *arg)
1392{
1393 struct oce_softc *sc = arg;
1394 struct oce_eq *eq = sc->sc_eq[0];
1395 struct oce_eqe *eqe;
1396 struct oce_cq *cq = NULL((void *)0);
1397 int i, neqe = 0;
1398
1399 oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD)(*((&eq->ring->dma)->tag)->_dmamap_sync)(((&
eq->ring->dma)->tag), ((&eq->ring->dma)->
map), (0), ((&eq->ring->dma)->map->dm_mapsize
), (0x02))
;
1400
1401 OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0)for ((eqe) = oce_ring_first(eq->ring); eqe->evnt != 0; (
eqe) = oce_ring_next(eq->ring))
{
1402 eqe->evnt = 0;
1403 neqe++;
1404 }
1405
1406 /* Spurious? */
1407 if (!neqe) {
1408 oce_arm_eq(eq, 0, TRUE1, FALSE0);
1409 return (0);
1410 }
1411
1412 oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE)(*((&eq->ring->dma)->tag)->_dmamap_sync)(((&
eq->ring->dma)->tag), ((&eq->ring->dma)->
map), (0), ((&eq->ring->dma)->map->dm_mapsize
), (0x04))
;
1413
1414 /* Clear EQ entries, but dont arm */
1415 oce_arm_eq(eq, neqe, FALSE0, TRUE1);
1416
1417 /* Process TX, RX and MCC completion queues */
1418 for (i = 0; i < eq->cq_valid; i++) {
1419 cq = eq->cq[i];
1420 (*cq->cq_intr)(cq->cb_arg);
1421 oce_arm_cq(cq, 0, TRUE1);
1422 }
1423
1424 oce_arm_eq(eq, 0, TRUE1, FALSE0);
1425 return (1);
1426}
1427
1428/* Handle the Completion Queue for transmit */
1429void
1430oce_intr_wq(void *arg)
1431{
1432 struct oce_wq *wq = (struct oce_wq *)arg;
1433 struct oce_cq *cq = wq->cq;
1434 struct oce_nic_tx_cqe *cqe;
1435 struct oce_softc *sc = wq->sc;
1436 struct ifnet *ifp = &sc->sc_ac.ac_if;
1437 int ncqe = 0;
1438
1439 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x02))
;
1440 OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe))for ((cqe) = oce_ring_first(cq->ring); ((cqe)->u0.dw[3]
); (cqe) = oce_ring_next(cq->ring))
{
1441 oce_txeof(wq);
1442 WQ_CQE_INVALIDATE(cqe)((cqe)->u0.dw[3] = 0);
1443 ncqe++;
1444 }
1445 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x04))
;
1446
1447 if (ifq_is_oactive(&ifp->if_snd)) {
1448 if (wq->ring->nused < (wq->ring->nitems / 2)) {
1449 ifq_clr_oactive(&ifp->if_snd);
1450 oce_start(ifp);
1451 }
1452 }
1453 if (wq->ring->nused == 0)
1454 ifp->if_timer = 0;
1455
1456 if (ncqe)
1457 oce_arm_cq(cq, ncqe, FALSE0);
1458}
1459
1460void
1461oce_txeof(struct oce_wq *wq)
1462{
1463 struct oce_softc *sc = wq->sc;
1464 struct oce_pkt *pkt;
1465 struct mbuf *m;
1466
1467 if ((pkt = oce_pkt_get(&wq->pkt_list)) == NULL((void *)0)) {
1468 printf("%s: missing descriptor in txeof\n",
1469 sc->sc_dev.dv_xname);
1470 return;
1471 }
1472
1473 wq->ring->nused -= pkt->nsegs + 1;
1474 bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x08))
1475 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x08))
;
1476 bus_dmamap_unload(sc->sc_dmat, pkt->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkt
->map))
;
1477
1478 m = pkt->mbuf;
1479 m_freem(m);
1480 pkt->mbuf = NULL((void *)0);
1481 oce_pkt_put(&wq->pkt_free, pkt);
1482}
1483
1484/* Handle the Completion Queue for receive */
1485void
1486oce_intr_rq(void *arg)
1487{
1488 struct oce_rq *rq = (struct oce_rq *)arg;
1489 struct oce_cq *cq = rq->cq;
1490 struct oce_softc *sc = rq->sc;
1491 struct oce_nic_rx_cqe *cqe;
1492 struct ifnet *ifp = &sc->sc_ac.ac_if;
1493 int maxrx, ncqe = 0;
1494
1495 maxrx = IS_XE201(sc)(((sc)->sc_flags) & (0x00000008)) ? 8 : OCE_MAX_RQ_COMPL64;
1496
1497 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x02))
;
1498
1499 OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe) && ncqe <= maxrx)for ((cqe) = oce_ring_first(cq->ring); ((cqe)->u0.dw[2]
) && ncqe <= maxrx; (cqe) = oce_ring_next(cq->ring
))
{
1500 if (cqe->u0.s.error == 0) {
1501 if (cqe->u0.s.pkt_size == 0)
1502 /* partial DMA workaround for Lancer */
1503 oce_rxeoc(rq, cqe);
1504 else
1505 oce_rxeof(rq, cqe);
1506 } else {
1507 ifp->if_ierrorsif_data.ifi_ierrors++;
1508 if (IS_XE201(sc)(((sc)->sc_flags) & (0x00000008)))
1509 /* Lancer A0 no buffer workaround */
1510 oce_rxeoc(rq, cqe);
1511 else
1512 /* Post L3/L4 errors to stack.*/
1513 oce_rxeof(rq, cqe);
1514 }
1515#ifdef OCE_LRO
1516 if (IF_LRO_ENABLED(ifp)(((ifp)->if_data.ifi_capabilities) & (0x00004000)) && rq->lro_pkts_queued >= 16)
1517 oce_flush_lro(rq);
1518#endif
1519 RQ_CQE_INVALIDATE(cqe)((cqe)->u0.dw[2] = 0);
1520 ncqe++;
1521 }
1522
1523 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x04))
;
1524
1525#ifdef OCE_LRO
1526 if (IF_LRO_ENABLED(ifp)(((ifp)->if_data.ifi_capabilities) & (0x00004000)))
1527 oce_flush_lro(rq);
1528#endif
1529
1530 if (ncqe) {
1531 oce_arm_cq(cq, ncqe, FALSE0);
1532 if (!oce_alloc_rx_bufs(rq))
1533 timeout_add(&sc->sc_rxrefill, 1);
1534 }
1535}
1536
1537void
1538oce_rxeof(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1539{
1540 struct oce_softc *sc = rq->sc;
1541 struct oce_pkt *pkt = NULL((void *)0);
1542 struct ifnet *ifp = &sc->sc_ac.ac_if;
1543 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
1544 struct mbuf *m = NULL((void *)0), *tail = NULL((void *)0);
1545 int i, len, frag_len;
1546 uint16_t vtag;
1547
1548 len = cqe->u0.s.pkt_size;
1549
1550 /* Get vlan_tag value */
1551 if (IS_BE(sc)(((sc)->sc_flags) & (0x00000001 | 0x00000002)))
1552 vtag = ntohs(cqe->u0.s.vlan_tag)(__uint16_t)(__builtin_constant_p(cqe->u0.s.vlan_tag) ? (__uint16_t
)(((__uint16_t)(cqe->u0.s.vlan_tag) & 0xffU) << 8
| ((__uint16_t)(cqe->u0.s.vlan_tag) & 0xff00U) >>
8) : __swap16md(cqe->u0.s.vlan_tag))
;
1553 else
1554 vtag = cqe->u0.s.vlan_tag;
1555
1556 for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1557 if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL((void *)0)) {
1558 printf("%s: missing descriptor in rxeof\n",
1559 sc->sc_dev.dv_xname);
1560 goto exit;
1561 }
1562
1563 bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x02))
1564 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x02))
;
1565 bus_dmamap_unload(sc->sc_dmat, pkt->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkt
->map))
;
1566 if_rxr_put(&rq->rxring, 1)do { (&rq->rxring)->rxr_alive -= (1); } while (0);
1567
1568 frag_len = (len > rq->fragsize) ? rq->fragsize : len;
1569 pkt->mbuf->m_lenm_hdr.mh_len = frag_len;
1570
1571 if (tail != NULL((void *)0)) {
1572 /* additional fragments */
1573 pkt->mbuf->m_flagsm_hdr.mh_flags &= ~M_PKTHDR0x0002;
1574 tail->m_nextm_hdr.mh_next = pkt->mbuf;
1575 tail = pkt->mbuf;
1576 } else {
1577 /* first fragment, fill out most of the header */
1578 pkt->mbuf->m_pkthdrM_dat.MH.MH_pkthdr.len = len;
1579 pkt->mbuf->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags = 0;
1580 if (cqe->u0.s.ip_cksum_pass) {
1581 if (!cqe->u0.s.ip_ver) { /* IPV4 */
1582 pkt->mbuf->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags =
1583 M_IPV4_CSUM_IN_OK0x0008;
1584 }
1585 }
1586 if (cqe->u0.s.l4_cksum_pass) {
1587 pkt->mbuf->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |=
1588 M_TCP_CSUM_IN_OK0x0020 | M_UDP_CSUM_IN_OK0x0080;
1589 }
1590 m = tail = pkt->mbuf;
1591 }
1592 pkt->mbuf = NULL((void *)0);
1593 oce_pkt_put(&rq->pkt_free, pkt);
1594 len -= frag_len;
1595 }
1596
1597 if (m) {
1598 if (!oce_port_valid(sc, cqe)) {
1599 m_freem(m);
1600 goto exit;
1601 }
1602
1603#if NVLAN1 > 0
1604 /* This determines if vlan tag is valid */
1605 if (oce_vtp_valid(sc, cqe)) {
1606 if (sc->sc_fmode & FNM_FLEX10_MODE) {
1607 /* FLEX10. If QnQ is not set, neglect VLAN */
1608 if (cqe->u0.s.qnq) {
1609 m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag = vtag;
1610 m->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020;
1611 }
1612 } else if (sc->sc_pvid != (vtag & VLAN_VID_MASK0x0FFF)) {
1613 /*
1614 * In UMC mode generally pvid will be striped.
1615 * But in some cases we have seen it comes
1616 * with pvid. So if pvid == vlan, neglect vlan.
1617 */
1618 m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag = vtag;
1619 m->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020;
1620 }
1621 }
1622#endif
1623
1624#ifdef OCE_LRO
1625 /* Try to queue to LRO */
1626 if (IF_LRO_ENABLED(ifp)(((ifp)->if_data.ifi_capabilities) & (0x00004000)) && !(m->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020) &&
1627 cqe->u0.s.ip_cksum_pass && cqe->u0.s.l4_cksum_pass &&
1628 !cqe->u0.s.ip_ver && rq->lro.lro_cnt != 0) {
1629
1630 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1631 rq->lro_pkts_queued ++;
1632 goto exit;
1633 }
1634 /* If LRO posting fails then try to post to STACK */
1635 }
1636#endif
1637
1638 ml_enqueue(&ml, m);
1639 }
1640exit:
1641 if (ifiq_input(&ifp->if_rcv, &ml))
1642 if_rxr_livelocked(&rq->rxring);
1643}
1644
1645void
1646oce_rxeoc(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1647{
1648 struct oce_softc *sc = rq->sc;
1649 struct oce_pkt *pkt;
1650 int i, num_frags = cqe->u0.s.num_fragments;
1651
1652 if (IS_XE201(sc)(((sc)->sc_flags) & (0x00000008)) && cqe->u0.s.error) {
1653 /*
1654 * Lancer A0 workaround:
1655 * num_frags will be 1 more than actual in case of error
1656 */
1657 if (num_frags)
1658 num_frags--;
1659 }
1660 for (i = 0; i < num_frags; i++) {
1661 if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL((void *)0)) {
1662 printf("%s: missing descriptor in rxeoc\n",
1663 sc->sc_dev.dv_xname);
1664 return;
1665 }
1666 bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x02))
1667 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x02))
;
1668 bus_dmamap_unload(sc->sc_dmat, pkt->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkt
->map))
;
1669 if_rxr_put(&rq->rxring, 1)do { (&rq->rxring)->rxr_alive -= (1); } while (0);
1670 m_freem(pkt->mbuf);
1671 oce_pkt_put(&rq->pkt_free, pkt);
1672 }
1673}
1674
1675int
1676oce_vtp_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1677{
1678 struct oce_nic_rx_cqe_v1 *cqe_v1;
1679
1680 if (IS_BE(sc)(((sc)->sc_flags) & (0x00000001 | 0x00000002)) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)((sc->sc_flags) & (0x00000100))) {
1681 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1682 return (cqe_v1->u0.s.vlan_tag_present);
1683 }
1684 return (cqe->u0.s.vlan_tag_present);
1685}
1686
1687int
1688oce_port_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1689{
1690 struct oce_nic_rx_cqe_v1 *cqe_v1;
1691
1692 if (IS_BE(sc)(((sc)->sc_flags) & (0x00000001 | 0x00000002)) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)((sc->sc_flags) & (0x00000100))) {
1693 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1694 if (sc->sc_port != cqe_v1->u0.s.port)
1695 return (0);
1696 }
1697 return (1);
1698}
1699
1700#ifdef OCE_LRO
1701void
1702oce_flush_lro(struct oce_rq *rq)
1703{
1704 struct oce_softc *sc = rq->sc;
1705 struct ifnet *ifp = &sc->sc_ac.ac_if;
1706 struct lro_ctrl *lro = &rq->lro;
1707 struct lro_entry *queued;
1708
1709 if (!IF_LRO_ENABLED(ifp)(((ifp)->if_data.ifi_capabilities) & (0x00004000)))
1710 return;
1711
1712 while ((queued = SLIST_FIRST(&lro->lro_active)((&lro->lro_active)->slh_first)) != NULL((void *)0)) {
1713 SLIST_REMOVE_HEAD(&lro->lro_active, next)do { (&lro->lro_active)->slh_first = (&lro->
lro_active)->slh_first->next.sle_next; } while (0)
;
1714 tcp_lro_flush(lro, queued);
1715 }
1716 rq->lro_pkts_queued = 0;
1717}
1718
1719int
1720oce_init_lro(struct oce_softc *sc)
1721{
1722 struct lro_ctrl *lro = NULL((void *)0);
1723 int i = 0, rc = 0;
1724
1725 for (i = 0; i < sc->sc_nrq; i++) {
1726 lro = &sc->sc_rq[i]->lro;
1727 rc = tcp_lro_init(lro);
1728 if (rc != 0) {
1729 printf("%s: LRO init failed\n",
1730 sc->sc_dev.dv_xname);
1731 return rc;
1732 }
1733 lro->ifp = &sc->sc_ac.ac_if;
1734 }
1735
1736 return (rc);
1737}
1738
1739void
1740oce_free_lro(struct oce_softc *sc)
1741{
1742 struct lro_ctrl *lro = NULL((void *)0);
1743 int i = 0;
1744
1745 for (i = 0; i < sc->sc_nrq; i++) {
1746 lro = &sc->sc_rq[i]->lro;
1747 if (lro)
1748 tcp_lro_free(lro);
1749 }
1750}
1751#endif /* OCE_LRO */
1752
1753int
1754oce_get_buf(struct oce_rq *rq)
1755{
1756 struct oce_softc *sc = rq->sc;
1757 struct oce_pkt *pkt;
1758 struct oce_nic_rqe *rqe;
1759
1760 if ((pkt = oce_pkt_get(&rq->pkt_free)) == NULL((void *)0))
1761 return (0);
1762
1763 pkt->mbuf = MCLGETL(NULL, M_DONTWAIT, MCLBYTES)m_clget((((void *)0)), (0x0002), ((1 << 11)));
1764 if (pkt->mbuf == NULL((void *)0)) {
1765 oce_pkt_put(&rq->pkt_free, pkt);
1766 return (0);
1767 }
1768
1769 pkt->mbuf->m_lenm_hdr.mh_len = pkt->mbuf->m_pkthdrM_dat.MH.MH_pkthdr.len = MCLBYTES(1 << 11);
1770#ifdef __STRICT_ALIGNMENT
1771 m_adj(pkt->mbuf, ETHER_ALIGN2);
1772#endif
1773
1774 if (bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, pkt->mbuf,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
pkt->map), (pkt->mbuf), (0x0001))
1775 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
pkt->map), (pkt->mbuf), (0x0001))
) {
1776 m_freem(pkt->mbuf);
1777 pkt->mbuf = NULL((void *)0);
1778 oce_pkt_put(&rq->pkt_free, pkt);
1779 return (0);
1780 }
1781
1782 bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x01))
1783 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x01))
;
1784
1785 oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_PREREAD |(*((&rq->ring->dma)->tag)->_dmamap_sync)(((&
rq->ring->dma)->tag), ((&rq->ring->dma)->
map), (0), ((&rq->ring->dma)->map->dm_mapsize
), (0x01 | 0x04))
1786 BUS_DMASYNC_PREWRITE)(*((&rq->ring->dma)->tag)->_dmamap_sync)(((&
rq->ring->dma)->tag), ((&rq->ring->dma)->
map), (0), ((&rq->ring->dma)->map->dm_mapsize
), (0x01 | 0x04))
;
1787
1788 rqe = oce_ring_get(rq->ring);
1789 rqe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[0].ds_addr)((uint32_t)((uint64_t)(pkt->map->dm_segs[0].ds_addr) >>
32))
;
1790 rqe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[0].ds_addr)((uint32_t)((uint64_t)(pkt->map->dm_segs[0].ds_addr) &
0xffffffff))
;
1791
1792 oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_POSTREAD |(*((&rq->ring->dma)->tag)->_dmamap_sync)(((&
rq->ring->dma)->tag), ((&rq->ring->dma)->
map), (0), ((&rq->ring->dma)->map->dm_mapsize
), (0x02 | 0x08))
1793 BUS_DMASYNC_POSTWRITE)(*((&rq->ring->dma)->tag)->_dmamap_sync)(((&
rq->ring->dma)->tag), ((&rq->ring->dma)->
map), (0), ((&rq->ring->dma)->map->dm_mapsize
), (0x02 | 0x08))
;
1794
1795 oce_pkt_put(&rq->pkt_list, pkt);
1796
1797 return (1);
1798}
1799
1800int
1801oce_alloc_rx_bufs(struct oce_rq *rq)
1802{
1803 struct oce_softc *sc = rq->sc;
1804 int i, nbufs = 0;
1805 u_int slots;
1806
1807 for (slots = if_rxr_get(&rq->rxring, rq->nitems); slots > 0; slots--) {
1808 if (oce_get_buf(rq) == 0)
1809 break;
1810
1811 nbufs++;
1812 }
1813 if_rxr_put(&rq->rxring, slots)do { (&rq->rxring)->rxr_alive -= (slots); } while (
0)
;
1814
1815 if (!nbufs)
1816 return (0);
1817 for (i = nbufs / OCE_MAX_RQ_POSTS255; i > 0; i--) {
1818 oce_write_db(sc, PD_RXULP_DB0x0100, rq->id |
1819 (OCE_MAX_RQ_POSTS255 << 24));
1820 nbufs -= OCE_MAX_RQ_POSTS255;
1821 }
1822 if (nbufs > 0)
1823 oce_write_db(sc, PD_RXULP_DB0x0100, rq->id | (nbufs << 24));
1824 return (1);
1825}
1826
1827void
1828oce_refill_rx(void *arg)
1829{
1830 struct oce_softc *sc = arg;
1831 struct oce_rq *rq;
1832 int i, s;
1833
1834 s = splnet()splraise(0x4);
1835 OCE_RQ_FOREACH(sc, rq, i)for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq
= sc->sc_rq[i])
{
1836 if (!oce_alloc_rx_bufs(rq))
1837 timeout_add(&sc->sc_rxrefill, 5);
1838 }
1839 splx(s)spllower(s);
1840}
1841
1842/* Handle the Completion Queue for the Mailbox/Async notifications */
1843void
1844oce_intr_mq(void *arg)
1845{
1846 struct oce_mq *mq = (struct oce_mq *)arg;
1847 struct oce_softc *sc = mq->sc;
1848 struct oce_cq *cq = mq->cq;
1849 struct oce_mq_cqe *cqe;
1850 struct oce_async_cqe_link_state *acqe;
1851 struct oce_async_event_grp5_pvid_state *gcqe;
1852 int evtype, optype, ncqe = 0;
1853
1854 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x02))
;
1855
1856 OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe))for ((cqe) = oce_ring_first(cq->ring); ((cqe)->u0.dw[3]
); (cqe) = oce_ring_next(cq->ring))
{
1857 if (cqe->u0.s.async_event) {
1858 evtype = cqe->u0.s.event_type;
1859 optype = cqe->u0.s.async_type;
1860 if (evtype == ASYNC_EVENT_CODE_LINK_STATE0x1) {
1861 /* Link status evt */
1862 acqe = (struct oce_async_cqe_link_state *)cqe;
1863 oce_link_event(sc, acqe);
1864 } else if ((evtype == ASYNC_EVENT_GRP50x5) &&
1865 (optype == ASYNC_EVENT_PVID_STATE0x3)) {
1866 /* GRP5 PVID */
1867 gcqe =
1868 (struct oce_async_event_grp5_pvid_state *)cqe;
1869 if (gcqe->enabled)
1870 sc->sc_pvid =
1871 gcqe->tag & VLAN_VID_MASK0x0FFF;
1872 else
1873 sc->sc_pvid = 0;
1874 }
1875 }
1876 MQ_CQE_INVALIDATE(cqe)((cqe)->u0.dw[3] = 0);
1877 ncqe++;
1878 }
1879
1880 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x04))
;
1881
1882 if (ncqe)
1883 oce_arm_cq(cq, ncqe, FALSE0);
1884}
1885
1886void
1887oce_link_event(struct oce_softc *sc, struct oce_async_cqe_link_state *acqe)
1888{
1889 /* Update Link status */
1890 sc->sc_link_up = ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL0x02) ==
1891 ASYNC_EVENT_LINK_UP0x1);
1892 /* Update speed */
1893 sc->sc_link_speed = acqe->u0.s.speed;
1894 oce_link_status(sc);
1895}
1896
1897int
1898oce_init_queues(struct oce_softc *sc)
1899{
1900 struct oce_wq *wq;
1901 struct oce_rq *rq;
1902 int i;
1903
1904 sc->sc_nrq = 1;
1905 sc->sc_nwq = 1;
1906
1907 /* Create network interface on card */
1908 if (oce_create_iface(sc, sc->sc_macaddr))
1909 goto error;
1910
1911 /* create all of the event queues */
1912 for (i = 0; i < sc->sc_nintr; i++) {
1913 sc->sc_eq[i] = oce_create_eq(sc);
1914 if (!sc->sc_eq[i])
1915 goto error;
1916 }
1917
1918 /* alloc tx queues */
1919 OCE_WQ_FOREACH(sc, wq, i)for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq
= sc->sc_wq[i])
{
1920 sc->sc_wq[i] = oce_create_wq(sc, sc->sc_eq[i]);
1921 if (!sc->sc_wq[i])
1922 goto error;
1923 }
1924
1925 /* alloc rx queues */
1926 OCE_RQ_FOREACH(sc, rq, i)for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq
= sc->sc_rq[i])
{
1927 sc->sc_rq[i] = oce_create_rq(sc, sc->sc_eq[i > 0 ? i - 1 : 0],
1928 i > 0 ? sc->sc_rss_enable : 0);
1929 if (!sc->sc_rq[i])
1930 goto error;
1931 }
1932
1933 /* alloc mailbox queue */
1934 sc->sc_mq = oce_create_mq(sc, sc->sc_eq[0]);
1935 if (!sc->sc_mq)
1936 goto error;
1937
1938 return (0);
1939error:
1940 oce_release_queues(sc);
1941 return (1);
1942}
1943
1944void
1945oce_release_queues(struct oce_softc *sc)
1946{
1947 struct oce_wq *wq;
1948 struct oce_rq *rq;
1949 struct oce_eq *eq;
1950 int i;
1951
1952 OCE_RQ_FOREACH(sc, rq, i)for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq
= sc->sc_rq[i])
{
1953 if (rq)
1954 oce_destroy_rq(sc->sc_rq[i]);
1955 }
1956
1957 OCE_WQ_FOREACH(sc, wq, i)for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq
= sc->sc_wq[i])
{
1958 if (wq)
1959 oce_destroy_wq(sc->sc_wq[i]);
1960 }
1961
1962 if (sc->sc_mq)
1963 oce_destroy_mq(sc->sc_mq);
1964
1965 OCE_EQ_FOREACH(sc, eq, i)for (i = 0, eq = sc->sc_eq[0]; i < sc->sc_neq; i++, eq
= sc->sc_eq[i])
{
1966 if (eq)
1967 oce_destroy_eq(sc->sc_eq[i]);
1968 }
1969}
1970
1971/**
1972 * @brief Function to create a WQ for NIC Tx
1973 * @param sc software handle to the device
1974 * @returns the pointer to the WQ created or NULL on failure
1975 */
1976struct oce_wq *
1977oce_create_wq(struct oce_softc *sc, struct oce_eq *eq)
1978{
1979 struct oce_wq *wq;
1980 struct oce_cq *cq;
1981 struct oce_pkt *pkt;
1982 int i;
1983
1984 if (sc->sc_tx_ring_size < 256 || sc->sc_tx_ring_size > 2048)
1985 return (NULL((void *)0));
1986
1987 wq = malloc(sizeof(struct oce_wq), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
1988 if (!wq)
1989 return (NULL((void *)0));
1990
1991 wq->ring = oce_create_ring(sc, sc->sc_tx_ring_size, NIC_WQE_SIZE16, 8);
1992 if (!wq->ring) {
1993 free(wq, M_DEVBUF2, 0);
1994 return (NULL((void *)0));
1995 }
1996
1997 cq = oce_create_cq(sc, eq, CQ_LEN_512, sizeof(struct oce_nic_tx_cqe),
1998 1, 0, 3);
1999 if (!cq) {
2000 oce_destroy_ring(sc, wq->ring);
2001 free(wq, M_DEVBUF2, 0);
2002 return (NULL((void *)0));
2003 }
2004
2005 wq->id = -1;
2006 wq->sc = sc;
2007
2008 wq->cq = cq;
2009 wq->nitems = sc->sc_tx_ring_size;
2010
2011 SIMPLEQ_INIT(&wq->pkt_free)do { (&wq->pkt_free)->sqh_first = ((void *)0); (&
wq->pkt_free)->sqh_last = &(&wq->pkt_free)->
sqh_first; } while (0)
;
2012 SIMPLEQ_INIT(&wq->pkt_list)do { (&wq->pkt_list)->sqh_first = ((void *)0); (&
wq->pkt_list)->sqh_last = &(&wq->pkt_list)->
sqh_first; } while (0)
;
2013
2014 for (i = 0; i < sc->sc_tx_ring_size / 2; i++) {
2015 pkt = oce_pkt_alloc(sc, OCE_MAX_TX_SIZE65535, OCE_MAX_TX_ELEMENTS29,
2016 PAGE_SIZE(1 << 12));
2017 if (pkt == NULL((void *)0)) {
2018 oce_destroy_wq(wq);
2019 return (NULL((void *)0));
2020 }
2021 oce_pkt_put(&wq->pkt_free, pkt);
2022 }
2023
2024 if (oce_new_wq(sc, wq)) {
2025 oce_destroy_wq(wq);
2026 return (NULL((void *)0));
2027 }
2028
2029 eq->cq[eq->cq_valid] = cq;
2030 eq->cq_valid++;
2031 cq->cb_arg = wq;
2032 cq->cq_intr = oce_intr_wq;
2033
2034 return (wq);
2035}
2036
2037void
2038oce_drain_wq(struct oce_wq *wq)
2039{
2040 struct oce_cq *cq = wq->cq;
2041 struct oce_nic_tx_cqe *cqe;
2042 int ncqe = 0;
2043
2044 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x02))
;
2045 OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe))for ((cqe) = oce_ring_first(cq->ring); ((cqe)->u0.dw[3]
); (cqe) = oce_ring_next(cq->ring))
{
2046 WQ_CQE_INVALIDATE(cqe)((cqe)->u0.dw[3] = 0);
2047 ncqe++;
2048 }
2049 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x04))
;
2050 oce_arm_cq(cq, ncqe, FALSE0);
2051}
2052
2053void
2054oce_destroy_wq(struct oce_wq *wq)
2055{
2056 struct mbx_delete_nic_wq cmd;
2057 struct oce_softc *sc = wq->sc;
2058 struct oce_pkt *pkt;
2059
2060 if (wq->id >= 0) {
2061 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2062 cmd.params.req.wq_id = htole16(wq->id)((__uint16_t)(wq->id));
2063 oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_WQ, OCE_MBX_VER_V00x0000,
2064 &cmd, sizeof(cmd));
2065 }
2066 if (wq->cq != NULL((void *)0))
2067 oce_destroy_cq(wq->cq);
2068 if (wq->ring != NULL((void *)0))
2069 oce_destroy_ring(sc, wq->ring);
2070 while ((pkt = oce_pkt_get(&wq->pkt_free)) != NULL((void *)0))
2071 oce_pkt_free(sc, pkt);
2072 free(wq, M_DEVBUF2, 0);
2073}
2074
2075/**
2076 * @brief function to allocate receive queue resources
2077 * @param sc software handle to the device
2078 * @param eq pointer to associated event queue
2079 * @param rss is-rss-queue flag
2080 * @returns the pointer to the RQ created or NULL on failure
2081 */
2082struct oce_rq *
2083oce_create_rq(struct oce_softc *sc, struct oce_eq *eq, int rss)
2084{
2085 struct oce_rq *rq;
2086 struct oce_cq *cq;
2087 struct oce_pkt *pkt;
2088 int i;
2089
2090 /* Hardware doesn't support any other value */
2091 if (sc->sc_rx_ring_size != 1024)
2092 return (NULL((void *)0));
2093
2094 rq = malloc(sizeof(struct oce_rq), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2095 if (!rq)
2096 return (NULL((void *)0));
2097
2098 rq->ring = oce_create_ring(sc, sc->sc_rx_ring_size,
2099 sizeof(struct oce_nic_rqe), 2);
2100 if (!rq->ring) {
2101 free(rq, M_DEVBUF2, 0);
2102 return (NULL((void *)0));
2103 }
2104
2105 cq = oce_create_cq(sc, eq, CQ_LEN_1024, sizeof(struct oce_nic_rx_cqe),
2106 1, 0, 3);
2107 if (!cq) {
2108 oce_destroy_ring(sc, rq->ring);
2109 free(rq, M_DEVBUF2, 0);
2110 return (NULL((void *)0));
2111 }
2112
2113 rq->id = -1;
2114 rq->sc = sc;
2115
2116 rq->nitems = sc->sc_rx_ring_size;
2117 rq->fragsize = OCE_RX_BUF_SIZE2048;
2118 rq->rss = rss;
2119
2120 SIMPLEQ_INIT(&rq->pkt_free)do { (&rq->pkt_free)->sqh_first = ((void *)0); (&
rq->pkt_free)->sqh_last = &(&rq->pkt_free)->
sqh_first; } while (0)
;
2121 SIMPLEQ_INIT(&rq->pkt_list)do { (&rq->pkt_list)->sqh_first = ((void *)0); (&
rq->pkt_list)->sqh_last = &(&rq->pkt_list)->
sqh_first; } while (0)
;
2122
2123 for (i = 0; i < sc->sc_rx_ring_size; i++) {
2124 pkt = oce_pkt_alloc(sc, OCE_RX_BUF_SIZE2048, 1, OCE_RX_BUF_SIZE2048);
2125 if (pkt == NULL((void *)0)) {
2126 oce_destroy_rq(rq);
2127 return (NULL((void *)0));
2128 }
2129 oce_pkt_put(&rq->pkt_free, pkt);
2130 }
2131
2132 rq->cq = cq;
2133 eq->cq[eq->cq_valid] = cq;
2134 eq->cq_valid++;
2135 cq->cb_arg = rq;
2136 cq->cq_intr = oce_intr_rq;
2137
2138 /* RX queue is created in oce_init */
2139
2140 return (rq);
2141}
2142
2143void
2144oce_drain_rq(struct oce_rq *rq)
2145{
2146 struct oce_nic_rx_cqe *cqe;
2147 struct oce_cq *cq = rq->cq;
2148 int ncqe = 0;
2149
2150 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x02))
;
2151 OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe))for ((cqe) = oce_ring_first(cq->ring); ((cqe)->u0.dw[2]
); (cqe) = oce_ring_next(cq->ring))
{
2152 RQ_CQE_INVALIDATE(cqe)((cqe)->u0.dw[2] = 0);
2153 ncqe++;
2154 }
2155 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x04))
;
2156 oce_arm_cq(cq, ncqe, FALSE0);
2157}
2158
2159void
2160oce_destroy_rq(struct oce_rq *rq)
2161{
2162 struct mbx_delete_nic_rq cmd;
2163 struct oce_softc *sc = rq->sc;
2164 struct oce_pkt *pkt;
2165
2166 if (rq->id >= 0) {
2167 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2168 cmd.params.req.rq_id = htole16(rq->id)((__uint16_t)(rq->id));
2169 oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ, OCE_MBX_VER_V00x0000,
2170 &cmd, sizeof(cmd));
2171 }
2172 if (rq->cq != NULL((void *)0))
2173 oce_destroy_cq(rq->cq);
2174 if (rq->ring != NULL((void *)0))
2175 oce_destroy_ring(sc, rq->ring);
2176 while ((pkt = oce_pkt_get(&rq->pkt_free)) != NULL((void *)0))
2177 oce_pkt_free(sc, pkt);
2178 free(rq, M_DEVBUF2, 0);
2179}
2180
2181struct oce_eq *
2182oce_create_eq(struct oce_softc *sc)
2183{
2184 struct oce_eq *eq;
2185
2186 /* allocate an eq */
2187 eq = malloc(sizeof(struct oce_eq), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2188 if (eq == NULL((void *)0))
2189 return (NULL((void *)0));
2190
2191 eq->ring = oce_create_ring(sc, EQ_LEN_1024, EQE_SIZE_4, 8);
2192 if (!eq->ring) {
2193 free(eq, M_DEVBUF2, 0);
2194 return (NULL((void *)0));
2195 }
2196
2197 eq->id = -1;
2198 eq->sc = sc;
2199 eq->nitems = EQ_LEN_1024; /* length of event queue */
2200 eq->isize = EQE_SIZE_4; /* size of a queue item */
2201 eq->delay = OCE_DEFAULT_EQD80; /* event queue delay */
2202
2203 if (oce_new_eq(sc, eq)) {
2204 oce_destroy_ring(sc, eq->ring);
2205 free(eq, M_DEVBUF2, 0);
2206 return (NULL((void *)0));
2207 }
2208
2209 return (eq);
2210}
2211
2212/**
2213 * @brief Function to arm an EQ so that it can generate events
2214 * @param eq pointer to event queue structure
2215 * @param neqe number of EQEs to arm
2216 * @param rearm rearm bit enable/disable
2217 * @param clearint bit to clear the interrupt condition because of which
2218 * EQEs are generated
2219 */
2220static inline void
2221oce_arm_eq(struct oce_eq *eq, int neqe, int rearm, int clearint)
2222{
2223 oce_write_db(eq->sc, PD_EQ_DB0x0120, eq->id | PD_EQ_DB_EVENT(1<<10) |
2224 (clearint << 9) | (neqe << 16) | (rearm << 29));
2225}
2226
2227void
2228oce_drain_eq(struct oce_eq *eq)
2229{
2230 struct oce_eqe *eqe;
2231 int neqe = 0;
2232
2233 oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD)(*((&eq->ring->dma)->tag)->_dmamap_sync)(((&
eq->ring->dma)->tag), ((&eq->ring->dma)->
map), (0), ((&eq->ring->dma)->map->dm_mapsize
), (0x02))
;
2234 OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0)for ((eqe) = oce_ring_first(eq->ring); eqe->evnt != 0; (
eqe) = oce_ring_next(eq->ring))
{
2235 eqe->evnt = 0;
2236 neqe++;
2237 }
2238 oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE)(*((&eq->ring->dma)->tag)->_dmamap_sync)(((&
eq->ring->dma)->tag), ((&eq->ring->dma)->
map), (0), ((&eq->ring->dma)->map->dm_mapsize
), (0x04))
;
2239 oce_arm_eq(eq, neqe, FALSE0, TRUE1);
2240}
2241
2242void
2243oce_destroy_eq(struct oce_eq *eq)
2244{
2245 struct mbx_destroy_common_eq cmd;
2246 struct oce_softc *sc = eq->sc;
2247
2248 if (eq->id >= 0) {
2249 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2250 cmd.params.req.id = htole16(eq->id)((__uint16_t)(eq->id));
2251 oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_EQ,
2252 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
2253 }
2254 if (eq->ring != NULL((void *)0))
2255 oce_destroy_ring(sc, eq->ring);
2256 free(eq, M_DEVBUF2, 0);
2257}
2258
2259struct oce_mq *
2260oce_create_mq(struct oce_softc *sc, struct oce_eq *eq)
2261{
2262 struct oce_mq *mq = NULL((void *)0);
2263 struct oce_cq *cq;
2264
2265 /* allocate the mq */
2266 mq = malloc(sizeof(struct oce_mq), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2267 if (!mq)
2268 return (NULL((void *)0));
2269
2270 mq->ring = oce_create_ring(sc, 128, sizeof(struct oce_mbx), 8);
2271 if (!mq->ring) {
2272 free(mq, M_DEVBUF2, 0);
2273 return (NULL((void *)0));
2274 }
2275
2276 cq = oce_create_cq(sc, eq, CQ_LEN_256, sizeof(struct oce_mq_cqe),
2277 1, 0, 0);
2278 if (!cq) {
2279 oce_destroy_ring(sc, mq->ring);
2280 free(mq, M_DEVBUF2, 0);
2281 return (NULL((void *)0));
2282 }
2283
2284 mq->id = -1;
2285 mq->sc = sc;
2286 mq->cq = cq;
2287
2288 mq->nitems = 128;
2289
2290 if (oce_new_mq(sc, mq)) {
2291 oce_destroy_cq(mq->cq);
2292 oce_destroy_ring(sc, mq->ring);
2293 free(mq, M_DEVBUF2, 0);
2294 return (NULL((void *)0));
2295 }
2296
2297 eq->cq[eq->cq_valid] = cq;
2298 eq->cq_valid++;
2299 mq->cq->eq = eq;
2300 mq->cq->cb_arg = mq;
2301 mq->cq->cq_intr = oce_intr_mq;
2302
2303 return (mq);
2304}
2305
2306void
2307oce_drain_mq(struct oce_mq *mq)
2308{
2309 struct oce_cq *cq = mq->cq;
2310 struct oce_mq_cqe *cqe;
2311 int ncqe = 0;
2312
2313 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x02))
;
2314 OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe))for ((cqe) = oce_ring_first(cq->ring); ((cqe)->u0.dw[3]
); (cqe) = oce_ring_next(cq->ring))
{
2315 MQ_CQE_INVALIDATE(cqe)((cqe)->u0.dw[3] = 0);
2316 ncqe++;
2317 }
2318 oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE)(*((&cq->ring->dma)->tag)->_dmamap_sync)(((&
cq->ring->dma)->tag), ((&cq->ring->dma)->
map), (0), ((&cq->ring->dma)->map->dm_mapsize
), (0x04))
;
2319 oce_arm_cq(cq, ncqe, FALSE0);
2320}
2321
2322void
2323oce_destroy_mq(struct oce_mq *mq)
2324{
2325 struct mbx_destroy_common_mq cmd;
2326 struct oce_softc *sc = mq->sc;
2327
2328 if (mq->id >= 0) {
2329 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2330 cmd.params.req.id = htole16(mq->id)((__uint16_t)(mq->id));
2331 oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_MQ,
2332 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
2333 }
2334 if (mq->ring != NULL((void *)0))
2335 oce_destroy_ring(sc, mq->ring);
2336 if (mq->cq != NULL((void *)0))
2337 oce_destroy_cq(mq->cq);
2338 free(mq, M_DEVBUF2, 0);
2339}
2340
2341/**
2342 * @brief Function to create a completion queue
2343 * @param sc software handle to the device
2344 * @param eq optional eq to be associated with to the cq
2345 * @param nitems length of completion queue
2346 * @param isize size of completion queue items
2347 * @param eventable event table
2348 * @param nodelay no delay flag
2349 * @param ncoalesce no coalescence flag
2350 * @returns pointer to the cq created, NULL on failure
2351 */
2352struct oce_cq *
2353oce_create_cq(struct oce_softc *sc, struct oce_eq *eq, int nitems, int isize,
2354 int eventable, int nodelay, int ncoalesce)
2355{
2356 struct oce_cq *cq = NULL((void *)0);
2357
2358 cq = malloc(sizeof(struct oce_cq), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2359 if (!cq)
2360 return (NULL((void *)0));
2361
2362 cq->ring = oce_create_ring(sc, nitems, isize, 4);
2363 if (!cq->ring) {
2364 free(cq, M_DEVBUF2, 0);
2365 return (NULL((void *)0));
2366 }
2367
2368 cq->sc = sc;
2369 cq->eq = eq;
2370 cq->nitems = nitems;
2371 cq->nodelay = nodelay;
2372 cq->ncoalesce = ncoalesce;
2373 cq->eventable = eventable;
2374
2375 if (oce_new_cq(sc, cq)) {
2376 oce_destroy_ring(sc, cq->ring);
2377 free(cq, M_DEVBUF2, 0);
2378 return (NULL((void *)0));
2379 }
2380
2381 sc->sc_cq[sc->sc_ncq++] = cq;
2382
2383 return (cq);
2384}
2385
2386void
2387oce_destroy_cq(struct oce_cq *cq)
2388{
2389 struct mbx_destroy_common_cq cmd;
2390 struct oce_softc *sc = cq->sc;
2391
2392 if (cq->id >= 0) {
2393 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2394 cmd.params.req.id = htole16(cq->id)((__uint16_t)(cq->id));
2395 oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_CQ,
2396 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
2397 }
2398 if (cq->ring != NULL((void *)0))
2399 oce_destroy_ring(sc, cq->ring);
2400 free(cq, M_DEVBUF2, 0);
2401}
2402
2403/**
2404 * @brief Function to arm a CQ with CQEs
2405 * @param cq pointer to the completion queue structure
2406 * @param ncqe number of CQEs to arm
2407 * @param rearm rearm bit enable/disable
2408 */
2409static inline void
2410oce_arm_cq(struct oce_cq *cq, int ncqe, int rearm)
2411{
2412 oce_write_db(cq->sc, PD_CQ_DB0x0120, cq->id | (ncqe << 16) | (rearm << 29));
2413}
2414
2415void
2416oce_free_posted_rxbuf(struct oce_rq *rq)
2417{
2418 struct oce_softc *sc = rq->sc;
2419 struct oce_pkt *pkt;
2420
2421 while ((pkt = oce_pkt_get(&rq->pkt_list)) != NULL((void *)0)) {
2422 bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x02))
2423 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
map), (0), (pkt->map->dm_mapsize), (0x02))
;
2424 bus_dmamap_unload(sc->sc_dmat, pkt->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkt
->map))
;
2425 if (pkt->mbuf != NULL((void *)0)) {
2426 m_freem(pkt->mbuf);
2427 pkt->mbuf = NULL((void *)0);
2428 }
2429 oce_pkt_put(&rq->pkt_free, pkt);
2430 if_rxr_put(&rq->rxring, 1)do { (&rq->rxring)->rxr_alive -= (1); } while (0);
2431 }
2432}
2433
2434int
2435oce_dma_alloc(struct oce_softc *sc, bus_size_t size, struct oce_dma_mem *dma)
2436{
2437 int rc;
2438
2439 memset(dma, 0, sizeof(struct oce_dma_mem))__builtin_memset((dma), (0), (sizeof(struct oce_dma_mem)));
2440
2441 dma->tag = sc->sc_dmat;
2442 rc = bus_dmamap_create(dma->tag, size, 1, size, 0, BUS_DMA_NOWAIT,(*(dma->tag)->_dmamap_create)((dma->tag), (size), (1
), (size), (0), (0x0001), (&dma->map))
2443 &dma->map)(*(dma->tag)->_dmamap_create)((dma->tag), (size), (1
), (size), (0), (0x0001), (&dma->map))
;
2444 if (rc != 0) {
2445 printf("%s: failed to allocate DMA handle",
2446 sc->sc_dev.dv_xname);
2447 goto fail_0;
2448 }
2449
2450 rc = bus_dmamem_alloc(dma->tag, size, PAGE_SIZE, 0, &dma->segs, 1,(*(dma->tag)->_dmamem_alloc)((dma->tag), (size), ((1
<< 12)), (0), (&dma->segs), (1), (&dma->
nsegs), (0x0001 | 0x1000))
2451 &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(dma->tag)->_dmamem_alloc)((dma->tag), (size), ((1
<< 12)), (0), (&dma->segs), (1), (&dma->
nsegs), (0x0001 | 0x1000))
;
2452 if (rc != 0) {
2453 printf("%s: failed to allocate DMA memory",
2454 sc->sc_dev.dv_xname);
2455 goto fail_1;
2456 }
2457
2458 rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,(*(dma->tag)->_dmamem_map)((dma->tag), (&dma->
segs), (dma->nsegs), (size), (&dma->vaddr), (0x0001
))
2459 &dma->vaddr, BUS_DMA_NOWAIT)(*(dma->tag)->_dmamem_map)((dma->tag), (&dma->
segs), (dma->nsegs), (size), (&dma->vaddr), (0x0001
))
;
2460 if (rc != 0) {
2461 printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2462 goto fail_2;
2463 }
2464
2465 rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, NULL,(*(dma->tag)->_dmamap_load)((dma->tag), (dma->map
), (dma->vaddr), (size), (((void *)0)), (0x0001))
2466 BUS_DMA_NOWAIT)(*(dma->tag)->_dmamap_load)((dma->tag), (dma->map
), (dma->vaddr), (size), (((void *)0)), (0x0001))
;
2467 if (rc != 0) {
2468 printf("%s: failed to load DMA memory", sc->sc_dev.dv_xname);
2469 goto fail_3;
2470 }
2471
2472 bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->map->dm_mapsize), (0x01 | 0x04))
2473 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->map->dm_mapsize), (0x01 | 0x04))
;
2474
2475 dma->paddr = dma->map->dm_segs[0].ds_addr;
2476 dma->size = size;
2477
2478 return (0);
2479
2480fail_3:
2481 bus_dmamem_unmap(dma->tag, dma->vaddr, size)(*(dma->tag)->_dmamem_unmap)((dma->tag), (dma->vaddr
), (size))
;
2482fail_2:
2483 bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs)(*(dma->tag)->_dmamem_free)((dma->tag), (&dma->
segs), (dma->nsegs))
;
2484fail_1:
2485 bus_dmamap_destroy(dma->tag, dma->map)(*(dma->tag)->_dmamap_destroy)((dma->tag), (dma->
map))
;
2486fail_0:
2487 return (rc);
2488}
2489
2490void
2491oce_dma_free(struct oce_softc *sc, struct oce_dma_mem *dma)
2492{
2493 if (dma->tag == NULL((void *)0))
2494 return;
2495
2496 if (dma->map != NULL((void *)0)) {
2497 oce_dma_sync(dma, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*((dma)->tag)->_dmamap_sync)(((dma)->tag), ((dma)->
map), (0), ((dma)->map->dm_mapsize), (0x02 | 0x08))
;
2498 bus_dmamap_unload(dma->tag, dma->map)(*(dma->tag)->_dmamap_unload)((dma->tag), (dma->map
))
;
2499
2500 if (dma->vaddr != 0) {
2501 bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs)(*(dma->tag)->_dmamem_free)((dma->tag), (&dma->
segs), (dma->nsegs))
;
2502 dma->vaddr = 0;
2503 }
2504
2505 bus_dmamap_destroy(dma->tag, dma->map)(*(dma->tag)->_dmamap_destroy)((dma->tag), (dma->
map))
;
2506 dma->map = NULL((void *)0);
2507 dma->tag = NULL((void *)0);
2508 }
2509}
2510
2511struct oce_ring *
2512oce_create_ring(struct oce_softc *sc, int nitems, int isize, int maxsegs)
2513{
2514 struct oce_dma_mem *dma;
2515 struct oce_ring *ring;
2516 bus_size_t size = nitems * isize;
2517 int rc;
2518
2519 if (size > maxsegs * PAGE_SIZE(1 << 12))
2520 return (NULL((void *)0));
2521
2522 ring = malloc(sizeof(struct oce_ring), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2523 if (ring == NULL((void *)0))
2524 return (NULL((void *)0));
2525
2526 ring->isize = isize;
2527 ring->nitems = nitems;
2528
2529 dma = &ring->dma;
2530 dma->tag = sc->sc_dmat;
2531 rc = bus_dmamap_create(dma->tag, size, maxsegs, PAGE_SIZE, 0,(*(dma->tag)->_dmamap_create)((dma->tag), (size), (maxsegs
), ((1 << 12)), (0), (0x0001), (&dma->map))
2532 BUS_DMA_NOWAIT, &dma->map)(*(dma->tag)->_dmamap_create)((dma->tag), (size), (maxsegs
), ((1 << 12)), (0), (0x0001), (&dma->map))
;
2533 if (rc != 0) {
2534 printf("%s: failed to allocate DMA handle",
2535 sc->sc_dev.dv_xname);
2536 goto fail_0;
2537 }
2538
2539 rc = bus_dmamem_alloc(dma->tag, size, 0, 0, &dma->segs, maxsegs,(*(dma->tag)->_dmamem_alloc)((dma->tag), (size), (0)
, (0), (&dma->segs), (maxsegs), (&dma->nsegs), (
0x0001 | 0x1000))
2540 &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(dma->tag)->_dmamem_alloc)((dma->tag), (size), (0)
, (0), (&dma->segs), (maxsegs), (&dma->nsegs), (
0x0001 | 0x1000))
;
2541 if (rc != 0) {
2542 printf("%s: failed to allocate DMA memory",
2543 sc->sc_dev.dv_xname);
2544 goto fail_1;
2545 }
2546
2547 rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,(*(dma->tag)->_dmamem_map)((dma->tag), (&dma->
segs), (dma->nsegs), (size), (&dma->vaddr), (0x0001
))
2548 &dma->vaddr, BUS_DMA_NOWAIT)(*(dma->tag)->_dmamem_map)((dma->tag), (&dma->
segs), (dma->nsegs), (size), (&dma->vaddr), (0x0001
))
;
2549 if (rc != 0) {
2550 printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2551 goto fail_2;
2552 }
2553
2554 bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->map->dm_mapsize), (0x01 | 0x04))
2555 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->map->dm_mapsize), (0x01 | 0x04))
;
2556
2557 dma->paddr = 0;
2558 dma->size = size;
2559
2560 return (ring);
2561
2562fail_2:
2563 bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs)(*(dma->tag)->_dmamem_free)((dma->tag), (&dma->
segs), (dma->nsegs))
;
2564fail_1:
2565 bus_dmamap_destroy(dma->tag, dma->map)(*(dma->tag)->_dmamap_destroy)((dma->tag), (dma->
map))
;
2566fail_0:
2567 free(ring, M_DEVBUF2, 0);
2568 return (NULL((void *)0));
2569}
2570
2571void
2572oce_destroy_ring(struct oce_softc *sc, struct oce_ring *ring)
2573{
2574 oce_dma_free(sc, &ring->dma);
2575 free(ring, M_DEVBUF2, 0);
2576}
2577
2578int
2579oce_load_ring(struct oce_softc *sc, struct oce_ring *ring,
2580 struct oce_pa *pa, int maxsegs)
2581{
2582 struct oce_dma_mem *dma = &ring->dma;
2583 int i;
2584
2585 if (bus_dmamap_load(dma->tag, dma->map, dma->vaddr,(*(dma->tag)->_dmamap_load)((dma->tag), (dma->map
), (dma->vaddr), (ring->isize * ring->nitems), (((void
*)0)), (0x0001))
2586 ring->isize * ring->nitems, NULL, BUS_DMA_NOWAIT)(*(dma->tag)->_dmamap_load)((dma->tag), (dma->map
), (dma->vaddr), (ring->isize * ring->nitems), (((void
*)0)), (0x0001))
) {
2587 printf("%s: failed to load a ring map\n", sc->sc_dev.dv_xname);
2588 return (0);
2589 }
2590
2591 if (dma->map->dm_nsegs > maxsegs) {
2592 printf("%s: too many segments\n", sc->sc_dev.dv_xname);
2593 return (0);
2594 }
2595
2596 bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->map->dm_mapsize), (0x01 | 0x04))
2597 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->map->dm_mapsize), (0x01 | 0x04))
;
2598
2599 for (i = 0; i < dma->map->dm_nsegs; i++)
2600 pa[i].addr = dma->map->dm_segs[i].ds_addr;
2601
2602 return (dma->map->dm_nsegs);
2603}
2604
2605static inline void *
2606oce_ring_get(struct oce_ring *ring)
2607{
2608 int index = ring->index;
2609
2610 if (++ring->index == ring->nitems)
2611 ring->index = 0;
2612 return ((void *)(ring->dma.vaddr + index * ring->isize));
2613}
2614
2615static inline void *
2616oce_ring_first(struct oce_ring *ring)
2617{
2618 return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2619}
2620
2621static inline void *
2622oce_ring_next(struct oce_ring *ring)
2623{
2624 if (++ring->index == ring->nitems)
2625 ring->index = 0;
2626 return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2627}
2628
2629struct oce_pkt *
2630oce_pkt_alloc(struct oce_softc *sc, size_t size, int nsegs, int maxsegsz)
2631{
2632 struct oce_pkt *pkt;
2633
2634 if ((pkt = pool_get(oce_pkt_pool, PR_NOWAIT0x0002 | PR_ZERO0x0008)) == NULL((void *)0))
2635 return (NULL((void *)0));
2636
2637 if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (nsegs), (maxsegsz), (0), (0x0001 | 0x0002), (&pkt->
map))
2638 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &pkt->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (nsegs), (maxsegsz), (0), (0x0001 | 0x0002), (&pkt->
map))
) {
2639 pool_put(oce_pkt_pool, pkt);
2640 return (NULL((void *)0));
2641 }
2642
2643 return (pkt);
2644}
2645
2646void
2647oce_pkt_free(struct oce_softc *sc, struct oce_pkt *pkt)
2648{
2649 if (pkt->map) {
2650 bus_dmamap_unload(sc->sc_dmat, pkt->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkt
->map))
;
2651 bus_dmamap_destroy(sc->sc_dmat, pkt->map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (pkt
->map))
;
2652 }
2653 pool_put(oce_pkt_pool, pkt);
2654}
2655
2656static inline struct oce_pkt *
2657oce_pkt_get(struct oce_pkt_list *lst)
2658{
2659 struct oce_pkt *pkt;
2660
2661 pkt = SIMPLEQ_FIRST(lst)((lst)->sqh_first);
2662 if (pkt == NULL((void *)0))
2663 return (NULL((void *)0));
2664
2665 SIMPLEQ_REMOVE_HEAD(lst, entry)do { if (((lst)->sqh_first = (lst)->sqh_first->entry
.sqe_next) == ((void *)0)) (lst)->sqh_last = &(lst)->
sqh_first; } while (0)
;
2666
2667 return (pkt);
2668}
2669
2670static inline void
2671oce_pkt_put(struct oce_pkt_list *lst, struct oce_pkt *pkt)
2672{
2673 SIMPLEQ_INSERT_TAIL(lst, pkt, entry)do { (pkt)->entry.sqe_next = ((void *)0); *(lst)->sqh_last
= (pkt); (lst)->sqh_last = &(pkt)->entry.sqe_next;
} while (0)
;
2674}
2675
2676/**
2677 * @brief Wait for FW to become ready and reset it
2678 * @param sc software handle to the device
2679 */
2680int
2681oce_init_fw(struct oce_softc *sc)
2682{
2683 struct ioctl_common_function_reset cmd;
2684 uint32_t reg;
2685 int err = 0, tmo = 60000;
2686
2687 /* read semaphore CSR */
2688 reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc)(((((sc)->sc_flags) & (0x00000001 | 0x00000002))) ? 0x0ac
: 0x400)
);
2689
2690 /* if host is ready then wait for fw ready else send POST */
2691 if ((reg & MPU_EP_SEM_STAGE_MASK0xffff) <= POST_STAGE_AWAITING_HOST_RDY0x01) {
2692 reg = (reg & ~MPU_EP_SEM_STAGE_MASK0xffff) | POST_STAGE_CHIP_RESET0x03;
2693 oce_write_csr(sc, MPU_EP_SEMAPHORE(sc)(((((sc)->sc_flags) & (0x00000001 | 0x00000002))) ? 0x0ac
: 0x400)
, reg);
2694 }
2695
2696 /* wait for FW to become ready */
2697 for (;;) {
2698 if (--tmo == 0)
2699 break;
2700
2701 DELAY(1000)(*delay_func)(1000);
2702
2703 reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc)(((((sc)->sc_flags) & (0x00000001 | 0x00000002))) ? 0x0ac
: 0x400)
);
2704 if (reg & MPU_EP_SEM_ERROR(1<<31)) {
2705 printf(": POST failed: %#x\n", reg);
2706 return (ENXIO6);
2707 }
2708 if ((reg & MPU_EP_SEM_STAGE_MASK0xffff) == POST_STAGE_ARMFW_READY0xc000) {
2709 /* reset FW */
2710 if (ISSET(sc->sc_flags, OCE_F_RESET_RQD)((sc->sc_flags) & (0x00001000))) {
2711 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2712 err = oce_cmd(sc, SUBSYS_COMMON,
2713 OPCODE_COMMON_FUNCTION_RESET,
2714 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
2715 }
2716 return (err);
2717 }
2718 }
2719
2720 printf(": POST timed out: %#x\n", reg);
2721
2722 return (ENXIO6);
2723}
2724
2725static inline int
2726oce_mbox_wait(struct oce_softc *sc)
2727{
2728 int i;
2729
2730 for (i = 0; i < 20000; i++) {
2731 if (oce_read_db(sc, PD_MPU_MBOX_DB0x0160) & PD_MPU_MBOX_DB_READY(1<<0))
2732 return (0);
2733 DELAY(100)(*delay_func)(100);
2734 }
2735 return (ETIMEDOUT60);
2736}
2737
2738/**
2739 * @brief Mailbox dispatch
2740 * @param sc software handle to the device
2741 */
2742int
2743oce_mbox_dispatch(struct oce_softc *sc)
2744{
2745 uint32_t pa, reg;
2746 int err;
2747
2748 pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx)((&sc->sc_mbx)->paddr) >> 34);
2749 reg = PD_MPU_MBOX_DB_HI(1<<1) | (pa << PD_MPU_MBOX_DB_ADDR_SHIFT2);
2750
2751 if ((err = oce_mbox_wait(sc)) != 0)
2752 goto out;
2753
2754 oce_write_db(sc, PD_MPU_MBOX_DB0x0160, reg);
2755
2756 pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx)((&sc->sc_mbx)->paddr) >> 4) & 0x3fffffff;
2757 reg = pa << PD_MPU_MBOX_DB_ADDR_SHIFT2;
2758
2759 if ((err = oce_mbox_wait(sc)) != 0)
2760 goto out;
2761
2762 oce_write_db(sc, PD_MPU_MBOX_DB0x0160, reg);
2763
2764 oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_POSTWRITE)(*((&sc->sc_mbx)->tag)->_dmamap_sync)(((&sc->
sc_mbx)->tag), ((&sc->sc_mbx)->map), (0), ((&
sc->sc_mbx)->map->dm_mapsize), (0x08))
;
2765
2766 if ((err = oce_mbox_wait(sc)) != 0)
2767 goto out;
2768
2769out:
2770 oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD)(*((&sc->sc_mbx)->tag)->_dmamap_sync)(((&sc->
sc_mbx)->tag), ((&sc->sc_mbx)->map), (0), ((&
sc->sc_mbx)->map->dm_mapsize), (0x01))
;
2771 return (err);
2772}
2773
2774/**
2775 * @brief Function to initialize the hw with host endian information
2776 * @param sc software handle to the device
2777 * @returns 0 on success, ETIMEDOUT on failure
2778 */
2779int
2780oce_mbox_init(struct oce_softc *sc)
2781{
2782 struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx)((void *)((&sc->sc_mbx)->vaddr));
2783 uint8_t *ptr = (uint8_t *)&bmbx->mbx;
2784
2785 if (!ISSET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD)((sc->sc_flags) & (0x00002000)))
2786 return (0);
2787
2788 /* Endian Signature */
2789 *ptr++ = 0xff;
2790 *ptr++ = 0x12;
2791 *ptr++ = 0x34;
2792 *ptr++ = 0xff;
2793 *ptr++ = 0xff;
2794 *ptr++ = 0x56;
2795 *ptr++ = 0x78;
2796 *ptr = 0xff;
2797
2798 return (oce_mbox_dispatch(sc));
2799}
2800
2801int
2802oce_cmd(struct oce_softc *sc, int subsys, int opcode, int version,
2803 void *payload, int length)
2804{
2805 struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx)((void *)((&sc->sc_mbx)->vaddr));
2806 struct oce_mbx *mbx = &bmbx->mbx;
2807 struct mbx_hdr *hdr;
2808 caddr_t epayload = NULL((void *)0);
2809 int err;
2810
2811 if (length > OCE_MBX_PAYLOAD(59 * 4))
2812 epayload = OCE_MEM_KVA(&sc->sc_pld)((void *)((&sc->sc_pld)->vaddr));
2813 if (length > OCE_MAX_PAYLOAD65536)
2814 return (EINVAL22);
2815
2816 oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*((&sc->sc_mbx)->tag)->_dmamap_sync)(((&sc->
sc_mbx)->tag), ((&sc->sc_mbx)->map), (0), ((&
sc->sc_mbx)->map->dm_mapsize), (0x01 | 0x04))
;
2817
2818 memset(mbx, 0, sizeof(struct oce_mbx))__builtin_memset((mbx), (0), (sizeof(struct oce_mbx)));
2819
2820 mbx->payload_length = length;
2821
2822 if (epayload) {
2823 mbx->flags = OCE_MBX_F_SGE(1<<3);
2824 oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREREAD)(*((&sc->sc_pld)->tag)->_dmamap_sync)(((&sc->
sc_pld)->tag), ((&sc->sc_pld)->map), (0), ((&
sc->sc_pld)->map->dm_mapsize), (0x01))
;
2825 memcpy(epayload, payload, length)__builtin_memcpy((epayload), (payload), (length));
2826 mbx->pld.sgl[0].addr = OCE_MEM_DVA(&sc->sc_pld)((&sc->sc_pld)->paddr);
2827 mbx->pld.sgl[0].length = length;
2828 hdr = (struct mbx_hdr *)epayload;
2829 } else {
2830 mbx->flags = OCE_MBX_F_EMBED(1<<0);
2831 memcpy(mbx->pld.data, payload, length)__builtin_memcpy((mbx->pld.data), (payload), (length));
2832 hdr = (struct mbx_hdr *)&mbx->pld.data;
2833 }
2834
2835 hdr->subsys = subsys;
2836 hdr->opcode = opcode;
2837 hdr->version = version;
2838 hdr->length = length - sizeof(*hdr);
2839 if (opcode == OPCODE_COMMON_FUNCTION_RESET)
2840 hdr->timeout = 2 * OCE_MBX_TIMEOUT5;
2841 else
2842 hdr->timeout = OCE_MBX_TIMEOUT5;
2843
2844 if (epayload)
2845 oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREWRITE)(*((&sc->sc_pld)->tag)->_dmamap_sync)(((&sc->
sc_pld)->tag), ((&sc->sc_pld)->map), (0), ((&
sc->sc_pld)->map->dm_mapsize), (0x04))
;
2846
2847 err = oce_mbox_dispatch(sc);
2848 if (err == 0) {
2849 if (epayload) {
2850 oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_POSTWRITE)(*((&sc->sc_pld)->tag)->_dmamap_sync)(((&sc->
sc_pld)->tag), ((&sc->sc_pld)->map), (0), ((&
sc->sc_pld)->map->dm_mapsize), (0x08))
;
2851 memcpy(payload, epayload, length)__builtin_memcpy((payload), (epayload), (length));
2852 } else
2853 memcpy(payload, &mbx->pld.data, length)__builtin_memcpy((payload), (&mbx->pld.data), (length)
)
;
2854 } else
2855 printf("%s: mailbox timeout, subsys %d op %d ver %d "
2856 "%spayload length %d\n", sc->sc_dev.dv_xname, subsys,
2857 opcode, version, epayload ? "ext " : "",
2858 length);
2859 return (err);
2860}
2861
2862/**
2863 * @brief Firmware will send gracious notifications during
2864 * attach only after sending first mcc command. We
2865 * use MCC queue only for getting async and mailbox
2866 * for sending cmds. So to get gracious notifications
2867 * at least send one dummy command on mcc.
2868 */
2869void
2870oce_first_mcc(struct oce_softc *sc)
2871{
2872 struct oce_mbx *mbx;
2873 struct oce_mq *mq = sc->sc_mq;
2874 struct mbx_hdr *hdr;
2875 struct mbx_get_common_fw_version *cmd;
2876
2877 mbx = oce_ring_get(mq->ring);
2878 memset(mbx, 0, sizeof(struct oce_mbx))__builtin_memset((mbx), (0), (sizeof(struct oce_mbx)));
2879
2880 cmd = (struct mbx_get_common_fw_version *)&mbx->pld.data;
2881
2882 hdr = &cmd->hdr;
2883 hdr->subsys = SUBSYS_COMMON;
2884 hdr->opcode = OPCODE_COMMON_GET_FW_VERSION;
2885 hdr->version = OCE_MBX_VER_V00x0000;
2886 hdr->timeout = OCE_MBX_TIMEOUT5;
2887 hdr->length = sizeof(*cmd) - sizeof(*hdr);
2888
2889 mbx->flags = OCE_MBX_F_EMBED(1<<0);
2890 mbx->payload_length = sizeof(*cmd);
2891 oce_dma_sync(&mq->ring->dma, BUS_DMASYNC_PREREAD |(*((&mq->ring->dma)->tag)->_dmamap_sync)(((&
mq->ring->dma)->tag), ((&mq->ring->dma)->
map), (0), ((&mq->ring->dma)->map->dm_mapsize
), (0x01 | 0x04))
2892 BUS_DMASYNC_PREWRITE)(*((&mq->ring->dma)->tag)->_dmamap_sync)(((&
mq->ring->dma)->tag), ((&mq->ring->dma)->
map), (0), ((&mq->ring->dma)->map->dm_mapsize
), (0x01 | 0x04))
;
2893 oce_write_db(sc, PD_MQ_DB0x0140, mq->id | (1 << 16));
2894}
2895
2896int
2897oce_get_fw_config(struct oce_softc *sc)
2898{
2899 struct mbx_common_query_fw_config cmd;
2900 int err;
2901
2902 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2903
2904 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2905 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
2906 if (err)
2907 return (err);
2908
2909 sc->sc_port = cmd.params.rsp.port_id;
2910 sc->sc_fmode = cmd.params.rsp.function_mode;
2911
2912 return (0);
2913}
2914
2915int
2916oce_check_native_mode(struct oce_softc *sc)
2917{
2918 struct mbx_common_set_function_cap cmd;
2919 int err;
2920
2921 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2922
2923 cmd.params.req.valid_capability_flags = CAP_SW_TIMESTAMPS2 |
2924 CAP_BE3_NATIVE_ERX_API4;
2925 cmd.params.req.capability_flags = CAP_BE3_NATIVE_ERX_API4;
2926
2927 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FUNCTIONAL_CAPS,
2928 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
2929 if (err)
2930 return (err);
2931
2932 if (cmd.params.rsp.capability_flags & CAP_BE3_NATIVE_ERX_API4)
2933 SET(sc->sc_flags, OCE_F_BE3_NATIVE)((sc->sc_flags) |= (0x00000100));
2934
2935 return (0);
2936}
2937
2938/**
2939 * @brief Function for creating a network interface.
2940 * @param sc software handle to the device
2941 * @returns 0 on success, error otherwise
2942 */
2943int
2944oce_create_iface(struct oce_softc *sc, uint8_t *macaddr)
2945{
2946 struct mbx_create_common_iface cmd;
2947 uint32_t caps, caps_en;
2948 int err = 0;
2949
2950 /* interface capabilities to give device when creating interface */
2951 caps = MBX_RX_IFACE_BROADCAST0x000010 | MBX_RX_IFACE_UNTAGGED0x000020 |
2952 MBX_RX_IFACE_PROMISC0x000008 | MBX_RX_IFACE_MCAST_PROMISC0x000200 |
2953 MBX_RX_IFACE_RSS0x000004;
2954
2955 /* capabilities to enable by default (others set dynamically) */
2956 caps_en = MBX_RX_IFACE_BROADCAST0x000010 | MBX_RX_IFACE_UNTAGGED0x000020;
2957
2958 if (!IS_XE201(sc)(((sc)->sc_flags) & (0x00000008))) {
2959 /* LANCER A0 workaround */
2960 caps |= MBX_RX_IFACE_PASS_L3L4_ERR0x000800;
2961 caps_en |= MBX_RX_IFACE_PASS_L3L4_ERR0x000800;
2962 }
2963
2964 /* enable capabilities controlled via driver startup parameters */
2965 if (sc->sc_rss_enable)
2966 caps_en |= MBX_RX_IFACE_RSS0x000004;
2967
2968 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2969
2970 cmd.params.req.version = 0;
2971 cmd.params.req.cap_flags = htole32(caps)((__uint32_t)(caps));
2972 cmd.params.req.enable_flags = htole32(caps_en)((__uint32_t)(caps_en));
2973 if (macaddr != NULL((void *)0)) {
2974 memcpy(&cmd.params.req.mac_addr[0], macaddr, ETHER_ADDR_LEN)__builtin_memcpy((&cmd.params.req.mac_addr[0]), (macaddr)
, (6))
;
2975 cmd.params.req.mac_invalid = 0;
2976 } else
2977 cmd.params.req.mac_invalid = 1;
2978
2979 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_IFACE,
2980 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
2981 if (err)
2982 return (err);
2983
2984 sc->sc_if_id = letoh32(cmd.params.rsp.if_id)((__uint32_t)(cmd.params.rsp.if_id));
2985
2986 if (macaddr != NULL((void *)0))
2987 sc->sc_pmac_id = letoh32(cmd.params.rsp.pmac_id)((__uint32_t)(cmd.params.rsp.pmac_id));
2988
2989 return (0);
2990}
2991
2992/**
2993 * @brief Function to send the mbx command to configure vlan
2994 * @param sc software handle to the device
2995 * @param vtags array of vlan tags
2996 * @param nvtags number of elements in array
2997 * @param untagged boolean TRUE/FLASE
2998 * @param promisc flag to enable/disable VLAN promiscuous mode
2999 * @returns 0 on success, EIO on failure
3000 */
3001int
3002oce_config_vlan(struct oce_softc *sc, struct normal_vlan *vtags, int nvtags,
3003 int untagged, int promisc)
3004{
3005 struct mbx_common_config_vlan cmd;
3006
3007 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3008
3009 cmd.params.req.if_id = sc->sc_if_id;
3010 cmd.params.req.promisc = promisc;
3011 cmd.params.req.untagged = untagged;
3012 cmd.params.req.num_vlans = nvtags;
3013
3014 if (!promisc)
3015 memcpy(cmd.params.req.tags.normal_vlans, vtags,__builtin_memcpy((cmd.params.req.tags.normal_vlans), (vtags),
(nvtags * sizeof(struct normal_vlan)))
3016 nvtags * sizeof(struct normal_vlan))__builtin_memcpy((cmd.params.req.tags.normal_vlans), (vtags),
(nvtags * sizeof(struct normal_vlan)))
;
3017
3018 return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CONFIG_IFACE_VLAN,
3019 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd)));
3020}
3021
3022/**
3023 * @brief Function to set flow control capability in the hardware
3024 * @param sc software handle to the device
3025 * @param flags flow control flags to set
3026 * @returns 0 on success, EIO on failure
3027 */
3028int
3029oce_set_flow_control(struct oce_softc *sc, uint64_t flags)
3030{
3031 struct mbx_common_get_set_flow_control cmd;
3032 int err;
3033
3034 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3035
3036 cmd.rx_flow_control = flags & IFM_ETH_RXPAUSE0x0000000000020000ULL ? 1 : 0;
3037 cmd.tx_flow_control = flags & IFM_ETH_TXPAUSE0x0000000000040000ULL ? 1 : 0;
3038
3039 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FLOW_CONTROL,
3040 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
3041 if (err)
3042 return (err);
3043
3044 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3045
3046 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_GET_FLOW_CONTROL,
3047 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
3048 if (err)
3049 return (err);
3050
3051 sc->sc_fc = cmd.rx_flow_control ? IFM_ETH_RXPAUSE0x0000000000020000ULL : 0;
3052 sc->sc_fc |= cmd.tx_flow_control ? IFM_ETH_TXPAUSE0x0000000000040000ULL : 0;
3053
3054 return (0);
3055}
3056
3057#ifdef OCE_RSS
3058/**
3059 * @brief Function to set flow control capability in the hardware
3060 * @param sc software handle to the device
3061 * @param enable 0=disable, OCE_RSS_xxx flags otherwise
3062 * @returns 0 on success, EIO on failure
3063 */
3064int
3065oce_config_rss(struct oce_softc *sc, int enable)
3066{
3067 struct mbx_config_nic_rss cmd;
3068 uint8_t *tbl = &cmd.params.req.cputable;
3069 int i, j;
3070
3071 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3072
3073 if (enable)
3074 cmd.params.req.enable_rss = RSS_ENABLE_IPV4 | RSS_ENABLE_IPV6 |
3075 RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_TCP_IPV6;
3076 cmd.params.req.flush = OCE_FLUSH1;
3077 cmd.params.req.if_id = htole32(sc->sc_if_id)((__uint32_t)(sc->sc_if_id));
3078
3079 arc4random_buf(cmd.params.req.hash, sizeof(cmd.params.req.hash));
3080
3081 /*
3082 * Initialize the RSS CPU indirection table.
3083 *
3084 * The table is used to choose the queue to place incoming packets.
3085 * Incoming packets are hashed. The lowest bits in the hash result
3086 * are used as the index into the CPU indirection table.
3087 * Each entry in the table contains the RSS CPU-ID returned by the NIC
3088 * create. Based on the CPU ID, the receive completion is routed to
3089 * the corresponding RSS CQs. (Non-RSS packets are always completed
3090 * on the default (0) CQ).
3091 */
3092 for (i = 0, j = 0; j < sc->sc_nrq; j++) {
3093 if (sc->sc_rq[j]->cfg.is_rss_queue)
3094 tbl[i++] = sc->sc_rq[j]->rss_cpuid;
3095 }
3096 if (i > 0)
3097 cmd->params.req.cpu_tbl_sz_log2 = htole16(ilog2(i))((__uint16_t)(ilog2(i)));
3098 else
3099 return (ENXIO6);
3100
3101 return (oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CONFIG_RSS, OCE_MBX_VER_V00x0000,
3102 &cmd, sizeof(cmd)));
3103}
3104#endif /* OCE_RSS */
3105
3106/**
3107 * @brief Function for hardware update multicast filter
3108 * @param sc software handle to the device
3109 * @param multi table of multicast addresses
3110 * @param naddr number of multicast addresses in the table
3111 */
3112int
3113oce_update_mcast(struct oce_softc *sc,
3114 uint8_t multi[][ETHER_ADDR_LEN6], int naddr)
3115{
3116 struct mbx_set_common_iface_multicast cmd;
3117
3118 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3119
3120 memcpy(&cmd.params.req.mac[0], &multi[0], naddr * ETHER_ADDR_LEN)__builtin_memcpy((&cmd.params.req.mac[0]), (&multi[0]
), (naddr * 6))
;
3121 cmd.params.req.num_mac = htole16(naddr)((__uint16_t)(naddr));
3122 cmd.params.req.if_id = sc->sc_if_id;
3123
3124 return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_MULTICAST,
3125 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd)));
3126}
3127
3128/**
3129 * @brief RXF function to enable/disable device promiscuous mode
3130 * @param sc software handle to the device
3131 * @param enable enable/disable flag
3132 * @returns 0 on success, EIO on failure
3133 * @note
3134 * The OPCODE_NIC_CONFIG_PROMISCUOUS command deprecated for Lancer.
3135 * This function uses the COMMON_SET_IFACE_RX_FILTER command instead.
3136 */
3137int
3138oce_set_promisc(struct oce_softc *sc, int enable)
3139{
3140 struct mbx_set_common_iface_rx_filter cmd;
3141 struct iface_rx_filter_ctx *req;
3142
3143 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3144
3145 req = &cmd.params.req;
3146 req->if_id = sc->sc_if_id;
3147
3148 if (enable)
3149 req->iface_flags = req->iface_flags_mask =
3150 MBX_RX_IFACE_PROMISC0x000008 | MBX_RX_IFACE_VLAN_PROMISC0x000080;
3151
3152 return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_RX_FILTER,
3153 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd)));
3154}
3155
3156/**
3157 * @brief Function to query the link status from the hardware
3158 * @param sc software handle to the device
3159 * @param[out] link pointer to the structure returning link attributes
3160 * @returns 0 on success, EIO on failure
3161 */
3162int
3163oce_get_link_status(struct oce_softc *sc)
3164{
3165 struct mbx_query_common_link_config cmd;
3166 int err;
3167
3168 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3169
3170 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_LINK_CONFIG,
3171 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
3172 if (err)
3173 return (err);
3174
3175 sc->sc_link_up = (letoh32(cmd.params.rsp.logical_link_status)((__uint32_t)(cmd.params.rsp.logical_link_status)) ==
3176 NTWK_LOGICAL_LINK_UP1);
3177
3178 if (cmd.params.rsp.mac_speed < 5)
3179 sc->sc_link_speed = cmd.params.rsp.mac_speed;
3180 else
3181 sc->sc_link_speed = 0;
3182
3183 return (0);
3184}
3185
3186void
3187oce_macaddr_set(struct oce_softc *sc)
3188{
3189 uint32_t old_pmac_id = sc->sc_pmac_id;
3190 int status = 0;
3191
3192 if (!memcmp(sc->sc_macaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN)__builtin_memcmp((sc->sc_macaddr), (sc->sc_ac.ac_enaddr
), (6))
)
3193 return;
3194
3195 status = oce_macaddr_add(sc, sc->sc_ac.ac_enaddr, &sc->sc_pmac_id);
3196 if (!status)
3197 status = oce_macaddr_del(sc, old_pmac_id);
Value stored to 'status' is never read
3198 else
3199 printf("%s: failed to set MAC address\n", sc->sc_dev.dv_xname);
3200}
3201
3202int
3203oce_macaddr_get(struct oce_softc *sc, uint8_t *macaddr)
3204{
3205 struct mbx_query_common_iface_mac cmd;
3206 int err;
3207
3208 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3209
3210 cmd.params.req.type = MAC_ADDRESS_TYPE_NETWORK0x1;
3211 cmd.params.req.permanent = 1;
3212
3213 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_IFACE_MAC,
3214 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
3215 if (err == 0)
3216 memcpy(macaddr, &cmd.params.rsp.mac.mac_addr[0],__builtin_memcpy((macaddr), (&cmd.params.rsp.mac.mac_addr
[0]), (6))
3217 ETHER_ADDR_LEN)__builtin_memcpy((macaddr), (&cmd.params.rsp.mac.mac_addr
[0]), (6))
;
3218 return (err);
3219}
3220
3221int
3222oce_macaddr_add(struct oce_softc *sc, uint8_t *enaddr, uint32_t *pmac)
3223{
3224 struct mbx_add_common_iface_mac cmd;
3225 int err;
3226
3227 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3228
3229 cmd.params.req.if_id = htole16(sc->sc_if_id)((__uint16_t)(sc->sc_if_id));
3230 memcpy(cmd.params.req.mac_address, enaddr, ETHER_ADDR_LEN)__builtin_memcpy((cmd.params.req.mac_address), (enaddr), (6));
3231
3232 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_ADD_IFACE_MAC,
3233 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
3234 if (err == 0)
3235 *pmac = letoh32(cmd.params.rsp.pmac_id)((__uint32_t)(cmd.params.rsp.pmac_id));
3236 return (err);
3237}
3238
3239int
3240oce_macaddr_del(struct oce_softc *sc, uint32_t pmac)
3241{
3242 struct mbx_del_common_iface_mac cmd;
3243
3244 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3245
3246 cmd.params.req.if_id = htole16(sc->sc_if_id)((__uint16_t)(sc->sc_if_id));
3247 cmd.params.req.pmac_id = htole32(pmac)((__uint32_t)(pmac));
3248
3249 return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DEL_IFACE_MAC,
3250 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd)));
3251}
3252
3253int
3254oce_new_rq(struct oce_softc *sc, struct oce_rq *rq)
3255{
3256 struct mbx_create_nic_rq cmd;
3257 int err, npages;
3258
3259 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3260
3261 npages = oce_load_ring(sc, rq->ring, &cmd.params.req.pages[0],
3262 nitems(cmd.params.req.pages)(sizeof((cmd.params.req.pages)) / sizeof((cmd.params.req.pages
)[0]))
);
3263 if (!npages) {
3264 printf("%s: failed to load the rq ring\n", __func__);
3265 return (1);
3266 }
3267
3268 if (IS_XE201(sc)(((sc)->sc_flags) & (0x00000008))) {
3269 cmd.params.req.frag_size = rq->fragsize / 2048;
3270 cmd.params.req.page_size = 1;
3271 } else
3272 cmd.params.req.frag_size = ilog2(rq->fragsize);
3273 cmd.params.req.num_pages = npages;
3274 cmd.params.req.cq_id = rq->cq->id;
3275 cmd.params.req.if_id = htole32(sc->sc_if_id)((__uint32_t)(sc->sc_if_id));
3276 cmd.params.req.max_frame_size = htole16(rq->mtu)((__uint16_t)(rq->mtu));
3277 cmd.params.req.is_rss_queue = htole32(rq->rss)((__uint32_t)(rq->rss));
3278
3279 err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_RQ,
3280 IS_XE201(sc)(((sc)->sc_flags) & (0x00000008)) ? OCE_MBX_VER_V10x0001 : OCE_MBX_VER_V00x0000, &cmd,
3281 sizeof(cmd));
3282 if (err)
3283 return (err);
3284
3285 rq->id = letoh16(cmd.params.rsp.rq_id)((__uint16_t)(cmd.params.rsp.rq_id));
3286 rq->rss_cpuid = cmd.params.rsp.rss_cpuid;
3287
3288 return (0);
3289}
3290
3291int
3292oce_new_wq(struct oce_softc *sc, struct oce_wq *wq)
3293{
3294 struct mbx_create_nic_wq cmd;
3295 int err, npages;
3296
3297 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3298
3299 npages = oce_load_ring(sc, wq->ring, &cmd.params.req.pages[0],
3300 nitems(cmd.params.req.pages)(sizeof((cmd.params.req.pages)) / sizeof((cmd.params.req.pages
)[0]))
);
3301 if (!npages) {
3302 printf("%s: failed to load the wq ring\n", __func__);
3303 return (1);
3304 }
3305
3306 if (IS_XE201(sc)(((sc)->sc_flags) & (0x00000008)))
3307 cmd.params.req.if_id = sc->sc_if_id;
3308 cmd.params.req.nic_wq_type = NIC_WQ_TYPE_STANDARD0x02;
3309 cmd.params.req.num_pages = npages;
3310 cmd.params.req.wq_size = ilog2(wq->nitems) + 1;
3311 cmd.params.req.cq_id = htole16(wq->cq->id)((__uint16_t)(wq->cq->id));
3312 cmd.params.req.ulp_num = 1;
3313
3314 err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_WQ,
3315 IS_XE201(sc)(((sc)->sc_flags) & (0x00000008)) ? OCE_MBX_VER_V10x0001 : OCE_MBX_VER_V00x0000, &cmd,
3316 sizeof(cmd));
3317 if (err)
3318 return (err);
3319
3320 wq->id = letoh16(cmd.params.rsp.wq_id)((__uint16_t)(cmd.params.rsp.wq_id));
3321
3322 return (0);
3323}
3324
3325int
3326oce_new_mq(struct oce_softc *sc, struct oce_mq *mq)
3327{
3328 struct mbx_create_common_mq_ex cmd;
3329 union oce_mq_ext_ctx *ctx;
3330 int err, npages;
3331
3332 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3333
3334 npages = oce_load_ring(sc, mq->ring, &cmd.params.req.pages[0],
3335 nitems(cmd.params.req.pages)(sizeof((cmd.params.req.pages)) / sizeof((cmd.params.req.pages
)[0]))
);
3336 if (!npages) {
3337 printf("%s: failed to load the mq ring\n", __func__);
3338 return (-1);
3339 }
3340
3341 ctx = &cmd.params.req.context;
3342 ctx->v0.num_pages = npages;
3343 ctx->v0.cq_id = mq->cq->id;
3344 ctx->v0.ring_size = ilog2(mq->nitems) + 1;
3345 ctx->v0.valid = 1;
3346 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
3347 ctx->v0.async_evt_bitmap = 0xffffffff;
3348
3349 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_MQ_EXT,
3350 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
3351 if (err)
3352 return (err);
3353
3354 mq->id = letoh16(cmd.params.rsp.mq_id)((__uint16_t)(cmd.params.rsp.mq_id));
3355
3356 return (0);
3357}
3358
3359int
3360oce_new_eq(struct oce_softc *sc, struct oce_eq *eq)
3361{
3362 struct mbx_create_common_eq cmd;
3363 int err, npages;
3364
3365 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3366
3367 npages = oce_load_ring(sc, eq->ring, &cmd.params.req.pages[0],
3368 nitems(cmd.params.req.pages)(sizeof((cmd.params.req.pages)) / sizeof((cmd.params.req.pages
)[0]))
);
3369 if (!npages) {
3370 printf("%s: failed to load the eq ring\n", __func__);
3371 return (-1);
3372 }
3373
3374 cmd.params.req.ctx.num_pages = htole16(npages)((__uint16_t)(npages));
3375 cmd.params.req.ctx.valid = 1;
3376 cmd.params.req.ctx.size = (eq->isize == 4) ? 0 : 1;
3377 cmd.params.req.ctx.count = ilog2(eq->nitems / 256);
3378 cmd.params.req.ctx.armed = 0;
3379 cmd.params.req.ctx.delay_mult = htole32(eq->delay)((__uint32_t)(eq->delay));
3380
3381 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_EQ,
3382 OCE_MBX_VER_V00x0000, &cmd, sizeof(cmd));
3383 if (err)
3384 return (err);
3385
3386 eq->id = letoh16(cmd.params.rsp.eq_id)((__uint16_t)(cmd.params.rsp.eq_id));
3387
3388 return (0);
3389}
3390
3391int
3392oce_new_cq(struct oce_softc *sc, struct oce_cq *cq)
3393{
3394 struct mbx_create_common_cq cmd;
3395 union oce_cq_ctx *ctx;
3396 int err, npages;
3397
3398 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3399
3400 npages = oce_load_ring(sc, cq->ring, &cmd.params.req.pages[0],
3401 nitems(cmd.params.req.pages)(sizeof((cmd.params.req.pages)) / sizeof((cmd.params.req.pages
)[0]))
);
3402 if (!npages) {
3403 printf("%s: failed to load the cq ring\n", __func__);
3404 return (-1);
3405 }
3406
3407 ctx = &cmd.params.req.cq_ctx;
3408
3409 if (IS_XE201(sc)(((sc)->sc_flags) & (0x00000008))) {
3410 ctx->v2.num_pages = htole16(npages)((__uint16_t)(npages));
3411 ctx->v2.page_size = 1; /* for 4K */
3412 ctx->v2.eventable = cq->eventable;
3413 ctx->v2.valid = 1;
3414 ctx->v2.count = ilog2(cq->nitems / 256);
3415 ctx->v2.nodelay = cq->nodelay;
3416 ctx->v2.coalesce_wm = cq->ncoalesce;
3417 ctx->v2.armed = 0;
3418 ctx->v2.eq_id = cq->eq->id;
3419 if (ctx->v2.count == 3) {
3420 if (cq->nitems > (4*1024)-1)
3421 ctx->v2.cqe_count = (4*1024)-1;
3422 else
3423 ctx->v2.cqe_count = cq->nitems;
3424 }
3425 } else {
3426 ctx->v0.num_pages = htole16(npages)((__uint16_t)(npages));
3427 ctx->v0.eventable = cq->eventable;
3428 ctx->v0.valid = 1;
3429 ctx->v0.count = ilog2(cq->nitems / 256);
3430 ctx->v0.nodelay = cq->nodelay;
3431 ctx->v0.coalesce_wm = cq->ncoalesce;
3432 ctx->v0.armed = 0;
3433 ctx->v0.eq_id = cq->eq->id;
3434 }
3435
3436 err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_CQ,
3437 IS_XE201(sc)(((sc)->sc_flags) & (0x00000008)) ? OCE_MBX_VER_V20x0002 : OCE_MBX_VER_V00x0000, &cmd,
3438 sizeof(cmd));
3439 if (err)
3440 return (err);
3441
3442 cq->id = letoh16(cmd.params.rsp.cq_id)((__uint16_t)(cmd.params.rsp.cq_id));
3443
3444 return (0);
3445}
3446
3447int
3448oce_init_stats(struct oce_softc *sc)
3449{
3450 union cmd {
3451 struct mbx_get_nic_stats_v0 _be2;
3452 struct mbx_get_nic_stats _be3;
3453 struct mbx_get_pport_stats _xe201;
3454 };
3455
3456 sc->sc_statcmd = malloc(sizeof(union cmd), M_DEVBUF2, M_ZERO0x0008 | M_NOWAIT0x0002);
3457 if (sc->sc_statcmd == NULL((void *)0)) {
3458 printf("%s: failed to allocate statistics command block\n",
3459 sc->sc_dev.dv_xname);
3460 return (-1);
3461 }
3462 return (0);
3463}
3464
3465int
3466oce_update_stats(struct oce_softc *sc)
3467{
3468 struct ifnet *ifp = &sc->sc_ac.ac_if;
3469 uint64_t rxe, txe;
3470 int err;
3471
3472 if (ISSET(sc->sc_flags, OCE_F_BE2)((sc->sc_flags) & (0x00000001)))
3473 err = oce_stats_be2(sc, &rxe, &txe);
3474 else if (ISSET(sc->sc_flags, OCE_F_BE3)((sc->sc_flags) & (0x00000002)))
3475 err = oce_stats_be3(sc, &rxe, &txe);
3476 else
3477 err = oce_stats_xe(sc, &rxe, &txe);
3478 if (err)
3479 return (err);
3480
3481 ifp->if_ierrorsif_data.ifi_ierrors += (rxe > sc->sc_rx_errors) ?
3482 rxe - sc->sc_rx_errors : sc->sc_rx_errors - rxe;
3483 sc->sc_rx_errors = rxe;
3484 ifp->if_oerrorsif_data.ifi_oerrors += (txe > sc->sc_tx_errors) ?
3485 txe - sc->sc_tx_errors : sc->sc_tx_errors - txe;
3486 sc->sc_tx_errors = txe;
3487
3488 return (0);
3489}
3490
3491int
3492oce_stats_be2(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3493{
3494 struct mbx_get_nic_stats_v0 *cmd = sc->sc_statcmd;
3495 struct oce_pmem_stats *ms;
3496 struct oce_rxf_stats_v0 *rs;
3497 struct oce_port_rxf_stats_v0 *ps;
3498 int err;
3499
3500 memset(cmd, 0, sizeof(*cmd))__builtin_memset((cmd), (0), (sizeof(*cmd)));
3501
3502 err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V00x0000,
3503 cmd, sizeof(*cmd));
3504 if (err)
3505 return (err);
3506
3507 ms = &cmd->params.rsp.stats.pmem;
3508 rs = &cmd->params.rsp.stats.rxf;
3509 ps = &rs->port[sc->sc_port];
3510
3511 *rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3512 ps->rx_frame_too_long + ps->rx_dropped_runt +
3513 ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3514 ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3515 ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3516 ps->rx_dropped_too_short + ps->rx_out_range_errors +
3517 ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3518 ps->rx_alignment_symbol_errors;
3519 if (sc->sc_if_id)
3520 *rxe += rs->port1_jabber_events;
3521 else
3522 *rxe += rs->port0_jabber_events;
3523 *rxe += ms->eth_red_drops;
3524
3525 *txe = 0; /* hardware doesn't provide any extra tx error statistics */
3526
3527 return (0);
3528}
3529
3530int
3531oce_stats_be3(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3532{
3533 struct mbx_get_nic_stats *cmd = sc->sc_statcmd;
3534 struct oce_pmem_stats *ms;
3535 struct oce_rxf_stats_v1 *rs;
3536 struct oce_port_rxf_stats_v1 *ps;
3537 int err;
3538
3539 memset(cmd, 0, sizeof(*cmd))__builtin_memset((cmd), (0), (sizeof(*cmd)));
3540
3541 err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V10x0001,
3542 cmd, sizeof(*cmd));
3543 if (err)
3544 return (err);
3545
3546 ms = &cmd->params.rsp.stats.pmem;
3547 rs = &cmd->params.rsp.stats.rxf;
3548 ps = &rs->port[sc->sc_port];
3549
3550 *rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3551 ps->rx_frame_too_long + ps->rx_dropped_runt +
3552 ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3553 ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3554 ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3555 ps->rx_dropped_too_short + ps->rx_out_range_errors +
3556 ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3557 ps->rx_alignment_symbol_errors + ps->jabber_events;
3558 *rxe += ms->eth_red_drops;
3559
3560 *txe = 0; /* hardware doesn't provide any extra tx error statistics */
3561
3562 return (0);
3563}
3564
3565int
3566oce_stats_xe(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3567{
3568 struct mbx_get_pport_stats *cmd = sc->sc_statcmd;
3569 struct oce_pport_stats *pps;
3570 int err;
3571
3572 memset(cmd, 0, sizeof(*cmd))__builtin_memset((cmd), (0), (sizeof(*cmd)));
3573
3574 cmd->params.req.reset_stats = 0;
3575 cmd->params.req.port_number = sc->sc_if_id;
3576
3577 err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_PPORT_STATS,
3578 OCE_MBX_VER_V00x0000, cmd, sizeof(*cmd));
3579 if (err)
3580 return (err);
3581
3582 pps = &cmd->params.rsp.pps;
3583
3584 *rxe = pps->rx_discards + pps->rx_errors + pps->rx_crc_errors +
3585 pps->rx_alignment_errors + pps->rx_symbol_errors +
3586 pps->rx_frames_too_long + pps->rx_internal_mac_errors +
3587 pps->rx_undersize_pkts + pps->rx_oversize_pkts + pps->rx_jabbers +
3588 pps->rx_control_frames_unknown_opcode + pps->rx_in_range_errors +
3589 pps->rx_out_of_range_errors + pps->rx_ip_checksum_errors +
3590 pps->rx_tcp_checksum_errors + pps->rx_udp_checksum_errors +
3591 pps->rx_fifo_overflow + pps->rx_input_fifo_overflow +
3592 pps->rx_drops_too_many_frags + pps->rx_drops_mtu;
3593
3594 *txe = pps->tx_discards + pps->tx_errors + pps->tx_internal_mac_errors;
3595
3596 return (0);
3597}