Bug Summary

File:dev/pv/if_xnf.c
Warning:line 649, column 25
Dereference of null pointer

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name if_xnf.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pv/if_xnf.c
1/* $OpenBSD: if_xnf.c,v 1.67 2022/01/09 05:42:58 jsg Exp $ */
2
3/*
4 * Copyright (c) 2015, 2016 Mike Belopuhov
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include "bpfilter.h"
20#include "vlan.h"
21#include "xen.h"
22
23#include <sys/param.h>
24#include <sys/systm.h>
25#include <sys/atomic.h>
26#include <sys/device.h>
27#include <sys/kernel.h>
28#include <sys/malloc.h>
29#include <sys/mbuf.h>
30#include <sys/pool.h>
31#include <sys/queue.h>
32#include <sys/socket.h>
33#include <sys/sockio.h>
34#include <sys/task.h>
35#include <sys/timeout.h>
36
37#include <machine/bus.h>
38
39#include <dev/pv/xenreg.h>
40#include <dev/pv/xenvar.h>
41
42#include <net/if.h>
43#include <net/if_media.h>
44
45#include <netinet/in.h>
46#include <netinet/if_ether.h>
47
48#ifdef INET61
49#include <netinet/ip6.h>
50#endif
51
52#if NBPFILTER1 > 0
53#include <net/bpf.h>
54#endif
55
56/* #define XNF_DEBUG */
57
58#ifdef XNF_DEBUG
59#define DPRINTF(x...) printf(x)
60#else
61#define DPRINTF(x...)
62#endif
63
64/*
65 * Rx ring
66 */
67
68struct xnf_rx_req {
69 uint16_t rxq_id;
70 uint16_t rxq_pad;
71 uint32_t rxq_ref;
72} __packed__attribute__((__packed__));
73
74struct xnf_rx_rsp {
75 uint16_t rxp_id;
76 uint16_t rxp_offset;
77 uint16_t rxp_flags;
78#define XNF_RXF_CSUM_VALID0x0001 0x0001
79#define XNF_RXF_CSUM_BLANK0x0002 0x0002
80#define XNF_RXF_CHUNK0x0004 0x0004
81#define XNF_RXF_MGMT0x0008 0x0008
82 int16_t rxp_status;
83} __packed__attribute__((__packed__));
84
85union xnf_rx_desc {
86 struct xnf_rx_req rxd_req;
87 struct xnf_rx_rsp rxd_rsp;
88} __packed__attribute__((__packed__));
89
90#define XNF_RX_DESC256 256
91#define XNF_MCLEN(1 << 12) PAGE_SIZE(1 << 12)
92#define XNF_RX_MIN32 32
93
94struct xnf_rx_ring {
95 volatile uint32_t rxr_prod;
96 volatile uint32_t rxr_prod_event;
97 volatile uint32_t rxr_cons;
98 volatile uint32_t rxr_cons_event;
99 uint32_t rxr_reserved[12];
100 union xnf_rx_desc rxr_desc[XNF_RX_DESC256];
101} __packed__attribute__((__packed__));
102
103
104/*
105 * Tx ring
106 */
107
108struct xnf_tx_req {
109 uint32_t txq_ref;
110 uint16_t txq_offset;
111 uint16_t txq_flags;
112#define XNF_TXF_CSUM_BLANK0x0001 0x0001
113#define XNF_TXF_CSUM_VALID0x0002 0x0002
114#define XNF_TXF_CHUNK0x0004 0x0004
115#define XNF_TXF_ETXRA0x0008 0x0008
116 uint16_t txq_id;
117 uint16_t txq_size;
118} __packed__attribute__((__packed__));
119
120struct xnf_tx_rsp {
121 uint16_t txp_id;
122 int16_t txp_status;
123} __packed__attribute__((__packed__));
124
125union xnf_tx_desc {
126 struct xnf_tx_req txd_req;
127 struct xnf_tx_rsp txd_rsp;
128} __packed__attribute__((__packed__));
129
130#define XNF_TX_DESC256 256
131#define XNF_TX_FRAG18 18
132
133struct xnf_tx_ring {
134 volatile uint32_t txr_prod;
135 volatile uint32_t txr_prod_event;
136 volatile uint32_t txr_cons;
137 volatile uint32_t txr_cons_event;
138 uint32_t txr_reserved[12];
139 union xnf_tx_desc txr_desc[XNF_TX_DESC256];
140} __packed__attribute__((__packed__));
141
142struct xnf_tx_buf {
143 uint32_t txb_ndesc;
144 bus_dmamap_t txb_dmap;
145 struct mbuf *txb_mbuf;
146};
147
148/* Management frame, "extra info" in Xen parlance */
149struct xnf_mgmt {
150 uint8_t mg_type;
151#define XNF_MGMT_MCAST_ADD2 2
152#define XNF_MGMT_MCAST_DEL3 3
153 uint8_t mg_flags;
154 union {
155 uint8_t mgu_mcaddr[ETHER_ADDR_LEN6];
156 uint16_t mgu_pad[3];
157 } u;
158#define mg_mcaddru.mgu_mcaddr u.mgu_mcaddr
159} __packed__attribute__((__packed__));
160
161
162struct xnf_softc {
163 struct device sc_dev;
164 struct device *sc_parent;
165 char sc_node[XEN_MAX_NODE_LEN64];
166 char sc_backend[XEN_MAX_BACKEND_LEN128];
167 bus_dma_tag_t sc_dmat;
168 int sc_domid;
169
170 struct arpcom sc_ac;
171 struct ifmedia sc_media;
172
173 xen_intr_handle_t sc_xih;
174
175 int sc_caps;
176#define XNF_CAP_SG0x0001 0x0001
177#define XNF_CAP_CSUM40x0002 0x0002
178#define XNF_CAP_CSUM60x0004 0x0004
179#define XNF_CAP_MCAST0x0008 0x0008
180#define XNF_CAP_SPLIT0x0010 0x0010
181#define XNF_CAP_MULTIQ0x0020 0x0020
182
183 /* Rx ring */
184 struct xnf_rx_ring *sc_rx_ring;
185 bus_dmamap_t sc_rx_rmap; /* map for the ring */
186 bus_dma_segment_t sc_rx_seg;
187 uint32_t sc_rx_ref; /* grant table ref */
188 uint32_t sc_rx_cons;
189 struct mbuf *sc_rx_buf[XNF_RX_DESC256];
190 bus_dmamap_t sc_rx_dmap[XNF_RX_DESC256]; /* maps for packets */
191 struct mbuf *sc_rx_cbuf[2]; /* chain handling */
192
193 /* Tx ring */
194 struct xnf_tx_ring *sc_tx_ring;
195 bus_dmamap_t sc_tx_rmap; /* map for the ring */
196 bus_dma_segment_t sc_tx_seg;
197 uint32_t sc_tx_ref; /* grant table ref */
198 uint32_t sc_tx_cons;
199 int sc_tx_frags;
200 uint32_t sc_tx_next; /* next buffer */
201 volatile unsigned int sc_tx_avail;
202 struct xnf_tx_buf sc_tx_buf[XNF_TX_DESC256];
203};
204
205int xnf_match(struct device *, void *, void *);
206void xnf_attach(struct device *, struct device *, void *);
207int xnf_detach(struct device *, int);
208int xnf_lladdr(struct xnf_softc *);
209int xnf_ioctl(struct ifnet *, u_long, caddr_t);
210int xnf_media_change(struct ifnet *);
211void xnf_media_status(struct ifnet *, struct ifmediareq *);
212int xnf_iff(struct xnf_softc *);
213void xnf_init(struct xnf_softc *);
214void xnf_stop(struct xnf_softc *);
215void xnf_start(struct ifqueue *);
216int xnf_encap(struct xnf_softc *, struct mbuf *, uint32_t *);
217void xnf_intr(void *);
218void xnf_watchdog(struct ifnet *);
219void xnf_txeof(struct xnf_softc *);
220void xnf_rxeof(struct xnf_softc *);
221int xnf_rx_ring_fill(struct xnf_softc *);
222int xnf_rx_ring_create(struct xnf_softc *);
223void xnf_rx_ring_drain(struct xnf_softc *);
224void xnf_rx_ring_destroy(struct xnf_softc *);
225int xnf_tx_ring_create(struct xnf_softc *);
226void xnf_tx_ring_drain(struct xnf_softc *);
227void xnf_tx_ring_destroy(struct xnf_softc *);
228int xnf_capabilities(struct xnf_softc *sc);
229int xnf_init_backend(struct xnf_softc *);
230
231struct cfdriver xnf_cd = {
232 NULL((void *)0), "xnf", DV_IFNET
233};
234
235const struct cfattach xnf_ca = {
236 sizeof(struct xnf_softc), xnf_match, xnf_attach, xnf_detach
237};
238
239int
240xnf_match(struct device *parent, void *match, void *aux)
241{
242 struct xen_attach_args *xa = aux;
243
244 if (strcmp("vif", xa->xa_name))
245 return (0);
246
247 return (1);
248}
249
250void
251xnf_attach(struct device *parent, struct device *self, void *aux)
252{
253 struct xen_attach_args *xa = aux;
254 struct xnf_softc *sc = (struct xnf_softc *)self;
255 struct ifnet *ifp = &sc->sc_ac.ac_if;
256
257 sc->sc_parent = parent;
258 sc->sc_dmat = xa->xa_dmat;
259 sc->sc_domid = xa->xa_domid;
260
261 memcpy(sc->sc_node, xa->xa_node, XEN_MAX_NODE_LEN)__builtin_memcpy((sc->sc_node), (xa->xa_node), (64));
262 memcpy(sc->sc_backend, xa->xa_backend, XEN_MAX_BACKEND_LEN)__builtin_memcpy((sc->sc_backend), (xa->xa_backend), (128
))
;
263
264 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ16);
265
266 if (xnf_lladdr(sc)) {
267 printf(": failed to obtain MAC address\n");
268 return;
269 }
270
271 if (xen_intr_establish(0, &sc->sc_xih, sc->sc_domid, xnf_intr, sc,
272 ifp->if_xname)) {
273 printf(": failed to establish an interrupt\n");
274 return;
275 }
276 xen_intr_mask(sc->sc_xih);
277
278 printf(" backend %d channel %u: address %s\n", sc->sc_domid,
279 sc->sc_xih, ether_sprintf(sc->sc_ac.ac_enaddr));
280
281 if (xnf_capabilities(sc)) {
282 xen_intr_disestablish(sc->sc_xih);
283 return;
284 }
285
286 if (sc->sc_caps & XNF_CAP_SG0x0001)
287 ifp->if_hardmtu = 9000;
288
289 if (xnf_rx_ring_create(sc)) {
290 xen_intr_disestablish(sc->sc_xih);
291 return;
292 }
293 if (xnf_tx_ring_create(sc)) {
294 xen_intr_disestablish(sc->sc_xih);
295 xnf_rx_ring_destroy(sc);
296 return;
297 }
298 if (xnf_init_backend(sc)) {
299 xen_intr_disestablish(sc->sc_xih);
300 xnf_rx_ring_destroy(sc);
301 xnf_tx_ring_destroy(sc);
302 return;
303 }
304
305 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
306 ifp->if_xflags = IFXF_MPSAFE0x1;
307 ifp->if_ioctl = xnf_ioctl;
308 ifp->if_qstart = xnf_start;
309 ifp->if_watchdog = xnf_watchdog;
310 ifp->if_softc = sc;
311
312 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010;
313 if (sc->sc_caps & XNF_CAP_CSUM40x0002)
314 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_CSUM_TCPv40x00000002 | IFCAP_CSUM_UDPv40x00000004;
315 if (sc->sc_caps & XNF_CAP_CSUM60x0004)
316 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_CSUM_TCPv60x00000080 | IFCAP_CSUM_UDPv60x00000100;
317
318 ifq_set_maxlen(&ifp->if_snd, XNF_TX_DESC - 1)((&ifp->if_snd)->ifq_maxlen = (256 - 1));
319
320 ifmedia_init(&sc->sc_media, IFM_IMASK0xff00000000000000ULL, xnf_media_change,
321 xnf_media_status);
322 ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_MANUAL1ULL, 0, NULL((void *)0));
323 ifmedia_set(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_MANUAL1ULL);
324
325 if_attach(ifp);
326 ether_ifattach(ifp);
327
328 /* Kick out emulated em's and re's */
329 xen_unplug_emulated(parent, XEN_UNPLUG_NIC0x0001);
330}
331
332int
333xnf_detach(struct device *self, int flags)
334{
335 struct xnf_softc *sc = (struct xnf_softc *)self;
336 struct ifnet *ifp = &sc->sc_ac.ac_if;
337
338 xnf_stop(sc);
339
340 ether_ifdetach(ifp);
341 if_detach(ifp);
342
343 xen_intr_disestablish(sc->sc_xih);
344
345 if (sc->sc_tx_ring)
346 xnf_tx_ring_destroy(sc);
347 if (sc->sc_rx_ring)
348 xnf_rx_ring_destroy(sc);
349
350 return (0);
351}
352
353static int
354nibble(int ch)
355{
356 if (ch >= '0' && ch <= '9')
357 return (ch - '0');
358 if (ch >= 'A' && ch <= 'F')
359 return (10 + ch - 'A');
360 if (ch >= 'a' && ch <= 'f')
361 return (10 + ch - 'a');
362 return (-1);
363}
364
365int
366xnf_lladdr(struct xnf_softc *sc)
367{
368 char enaddr[ETHER_ADDR_LEN6];
369 char mac[32];
370 int i, j, lo, hi;
371
372 if (xs_getprop(sc->sc_parent, sc->sc_backend, "mac", mac, sizeof(mac)))
373 return (-1);
374
375 for (i = 0, j = 0; j < ETHER_ADDR_LEN6; i += 3, j++) {
376 if ((hi = nibble(mac[i])) == -1 ||
377 (lo = nibble(mac[i+1])) == -1)
378 return (-1);
379 enaddr[j] = hi << 4 | lo;
380 }
381
382 memcpy(sc->sc_ac.ac_enaddr, enaddr, ETHER_ADDR_LEN)__builtin_memcpy((sc->sc_ac.ac_enaddr), (enaddr), (6));
383 return (0);
384}
385
386int
387xnf_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
388{
389 struct xnf_softc *sc = ifp->if_softc;
390 struct ifreq *ifr = (struct ifreq *)data;
391 int s, error = 0;
392
393 s = splnet()splraise(0x7);
394
395 switch (command) {
396 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
397 ifp->if_flags |= IFF_UP0x1;
398 if (!(ifp->if_flags & IFF_RUNNING0x40))
399 xnf_init(sc);
400 break;
401 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
402 if (ifp->if_flags & IFF_UP0x1) {
403 if (ifp->if_flags & IFF_RUNNING0x40)
404 error = ENETRESET52;
405 else
406 xnf_init(sc);
407 } else {
408 if (ifp->if_flags & IFF_RUNNING0x40)
409 xnf_stop(sc);
410 }
411 break;
412 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
413 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
414 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
415 break;
416 default:
417 error = ether_ioctl(ifp, &sc->sc_ac, command, data);
418 break;
419 }
420
421 if (error == ENETRESET52) {
422 if (ifp->if_flags & IFF_RUNNING0x40)
423 xnf_iff(sc);
424 error = 0;
425 }
426
427 splx(s)spllower(s);
428
429 return (error);
430}
431
432int
433xnf_media_change(struct ifnet *ifp)
434{
435 return (0);
436}
437
438void
439xnf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
440{
441 ifmr->ifm_status = IFM_ACTIVE0x0000000000000002ULL | IFM_AVALID0x0000000000000001ULL;
442 ifmr->ifm_active = IFM_ETHER0x0000000000000100ULL | IFM_MANUAL1ULL;
443}
444
445int
446xnf_iff(struct xnf_softc *sc)
447{
448 return (0);
449}
450
451void
452xnf_init(struct xnf_softc *sc)
453{
454 struct ifnet *ifp = &sc->sc_ac.ac_if;
455
456 xnf_stop(sc);
457
458 xnf_iff(sc);
459
460 xnf_rx_ring_fill(sc);
461
462 if (xen_intr_unmask(sc->sc_xih)) {
463 printf("%s: failed to enable interrupts\n", ifp->if_xname);
464 xnf_stop(sc);
465 return;
466 }
467
468 ifp->if_flags |= IFF_RUNNING0x40;
469 ifq_clr_oactive(&ifp->if_snd);
470}
471
472void
473xnf_stop(struct xnf_softc *sc)
474{
475 struct ifnet *ifp = &sc->sc_ac.ac_if;
476
477 ifp->if_flags &= ~IFF_RUNNING0x40;
478
479 xen_intr_mask(sc->sc_xih);
480
481 ifp->if_timer = 0;
482
483 ifq_barrier(&ifp->if_snd);
484 xen_intr_barrier(sc->sc_xih);
485
486 ifq_clr_oactive(&ifp->if_snd);
487
488 if (sc->sc_tx_ring)
489 xnf_tx_ring_drain(sc);
490 if (sc->sc_rx_ring)
491 xnf_rx_ring_drain(sc);
492}
493
494void
495xnf_start(struct ifqueue *ifq)
496{
497 struct ifnet *ifp = ifq->ifq_if;
498 struct xnf_softc *sc = ifp->if_softc;
499 struct xnf_tx_ring *txr = sc->sc_tx_ring;
500 struct mbuf *m;
501 int pkts = 0;
502 uint32_t prod, oprod;
503
504 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_tx_rmap), (0), (0), (0x02))
505 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_tx_rmap), (0), (0), (0x02))
;
506
507 prod = oprod = txr->txr_prod;
508
509 for (;;) {
1
Loop condition is true. Entering loop body
510 if (((XNF_TX_DESC256 - (prod - sc->sc_tx_cons)) <
2
Assuming the condition is false
4
Taking false branch
511 sc->sc_tx_frags) || !sc->sc_tx_avail) {
3
Assuming field 'sc_tx_avail' is not equal to 0
512 /* transient */
513 ifq_set_oactive(ifq);
514 break;
515 }
516
517 m = ifq_dequeue(ifq);
518 if (m == NULL((void *)0))
5
Assuming 'm' is not equal to NULL
6
Taking false branch
519 break;
520
521 if (xnf_encap(sc, m, &prod)) {
7
Calling 'xnf_encap'
522 /* the chain is too large */
523 ifp->if_oerrorsif_data.ifi_oerrors++;
524 m_freem(m);
525 continue;
526 }
527
528#if NBPFILTER1 > 0
529 if (ifp->if_bpf)
530 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
531#endif
532 pkts++;
533 }
534 if (pkts > 0) {
535 txr->txr_prod = prod;
536 if (txr->txr_cons_event <= txr->txr_cons)
537 txr->txr_cons_event = txr->txr_cons +
538 ((txr->txr_prod - txr->txr_cons) >> 1) + 1;
539 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_tx_rmap), (0), (0), (0x01 | 0x04))
540 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_tx_rmap), (0), (0), (0x01 | 0x04))
;
541 if (prod - txr->txr_prod_event < prod - oprod)
542 xen_intr_signal(sc->sc_xih);
543 ifp->if_timer = 5;
544 }
545}
546
547static inline int
548xnf_fragcount(struct mbuf *m_head)
549{
550 struct mbuf *m;
551 vaddr_t va, va0;
552 int n = 0;
553
554 for (m = m_head; m != NULL((void *)0); m = m->m_nextm_hdr.mh_next) {
555 if (m->m_lenm_hdr.mh_len == 0)
556 continue;
557 /* start of the buffer */
558 for (va0 = va = mtod(m, vaddr_t)((vaddr_t)((m)->m_hdr.mh_data));
559 /* does the buffer end on this page? */
560 va + (PAGE_SIZE(1 << 12) - (va & PAGE_MASK((1 << 12) - 1))) < va0 + m->m_lenm_hdr.mh_len;
561 /* move on to the next page */
562 va += PAGE_SIZE(1 << 12) - (va & PAGE_MASK((1 << 12) - 1)))
563 n++;
564 n++;
565 }
566 return (n);
567}
568
569int
570xnf_encap(struct xnf_softc *sc, struct mbuf *m_head, uint32_t *prod)
571{
572 struct xnf_tx_ring *txr = sc->sc_tx_ring;
573 struct xnf_tx_buf *txb = NULL((void *)0);
574 union xnf_tx_desc *txd = NULL((void *)0);
8
'txd' initialized to a null pointer value
575 struct mbuf *m, **next;
576 uint32_t oprod = *prod;
577 uint16_t id;
578 int i, flags, n, used = 0;
579
580 if ((xnf_fragcount(m_head) > sc->sc_tx_frags) &&
9
Assuming the condition is false
581 m_defrag(m_head, M_DONTWAIT0x0002))
582 return (ENOBUFS55);
583
584 flags = (sc->sc_domid << 16) | BUS_DMA_WRITE0x0400 | BUS_DMA_NOWAIT0x0001;
585
586 next = &m_head->m_nextm_hdr.mh_next;
587 for (m = m_head; m
9.1
'm' is not equal to NULL
!= NULL((void *)0)
; m = *next) {
10
Loop condition is true. Entering loop body
14
Assuming 'm' is equal to NULL
15
Loop condition is false. Execution continues on line 649
588 /* Unlink and free zero length nodes. */
589 if (m->m_lenm_hdr.mh_len == 0) {
11
Assuming field 'mh_len' is equal to 0
12
Taking true branch
590 *next = m->m_nextm_hdr.mh_next;
591 m_free(m);
592 continue;
13
Execution continues on line 587
593 }
594 next = &m->m_nextm_hdr.mh_next;
595
596 i = *prod & (XNF_TX_DESC256 - 1);
597 txd = &txr->txr_desc[i];
598
599 /*
600 * Find an unused TX buffer. We're guaranteed to find one
601 * because xnf_encap cannot be called with sc_tx_avail == 0.
602 */
603 do {
604 id = sc->sc_tx_next++ & (XNF_TX_DESC256 - 1);
605 txb = &sc->sc_tx_buf[id];
606 } while (txb->txb_mbuf);
607
608 if (bus_dmamap_load(sc->sc_dmat, txb->txb_dmap, m->m_data,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (txb->
txb_dmap), (m->m_hdr.mh_data), (m->m_hdr.mh_len), (((void
*)0)), (flags))
609 m->m_len, NULL, flags)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (txb->
txb_dmap), (m->m_hdr.mh_data), (m->m_hdr.mh_len), (((void
*)0)), (flags))
) {
610 DPRINTF("%s: failed to load %u bytes @%lu\n",
611 sc->sc_dev.dv_xname, m->m_len,
612 mtod(m, vaddr_t) & PAGE_MASK);
613 goto unroll;
614 }
615
616 for (n = 0; n < txb->txb_dmap->dm_nsegs; n++) {
617 i = *prod & (XNF_TX_DESC256 - 1);
618 txd = &txr->txr_desc[i];
619
620 if (m == m_head && n == 0) {
621 if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags &
622 (M_TCP_CSUM_OUT0x0002 | M_UDP_CSUM_OUT0x0004))
623 txd->txd_req.txq_flags =
624 XNF_TXF_CSUM_BLANK0x0001 |
625 XNF_TXF_CSUM_VALID0x0002;
626 txd->txd_req.txq_size = m->m_pkthdrM_dat.MH.MH_pkthdr.len;
627 } else {
628 txd->txd_req.txq_size =
629 txb->txb_dmap->dm_segs[n].ds_len;
630 }
631 txd->txd_req.txq_ref =
632 txb->txb_dmap->dm_segs[n].ds_addr;
633 if (n == 0)
634 txd->txd_req.txq_offset =
635 mtod(m, vaddr_t)((vaddr_t)((m)->m_hdr.mh_data)) & PAGE_MASK((1 << 12) - 1);
636 /* The chunk flag will be removed from the last one */
637 txd->txd_req.txq_flags |= XNF_TXF_CHUNK0x0004;
638 txd->txd_req.txq_id = id;
639
640 txb->txb_ndesc++;
641 (*prod)++;
642 }
643
644 txb->txb_mbuf = m;
645 used++;
646 }
647
648 /* Clear the chunk flag from the last segment */
649 txd->txd_req.txq_flags &= ~XNF_TXF_CHUNK0x0004;
16
Dereference of null pointer
650 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_tx_rmap), (0), (0), (0x04))
651 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_tx_rmap), (0), (0), (0x04))
;
652
653 KASSERT(sc->sc_tx_avail > used)((sc->sc_tx_avail > used) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pv/if_xnf.c", 653, "sc->sc_tx_avail > used"
))
;
654 atomic_sub_int(&sc->sc_tx_avail, used)_atomic_sub_int(&sc->sc_tx_avail, used);
655
656 return (0);
657
658 unroll:
659 DPRINTF("%s: unrolling from %u to %u\n", sc->sc_dev.dv_xname,
660 *prod, oprod);
661 for (; *prod != oprod; (*prod)--) {
662 i = (*prod - 1) & (XNF_TX_DESC256 - 1);
663 txd = &txr->txr_desc[i];
664 id = txd->txd_req.txq_id;
665 txb = &sc->sc_tx_buf[id];
666
667 memset(txd, 0, sizeof(*txd))__builtin_memset((txd), (0), (sizeof(*txd)));
668
669 if (txb->txb_mbuf) {
670 bus_dmamap_sync(sc->sc_dmat, txb->txb_dmap, 0, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txb->
txb_dmap), (0), (0), (0x02 | 0x08))
671 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txb->
txb_dmap), (0), (0), (0x02 | 0x08))
;
672 bus_dmamap_unload(sc->sc_dmat, txb->txb_dmap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txb
->txb_dmap))
;
673
674 txb->txb_mbuf = NULL((void *)0);
675 txb->txb_ndesc = 0;
676 }
677 }
678 return (ENOBUFS55);
679}
680
681void
682xnf_intr(void *arg)
683{
684 struct xnf_softc *sc = arg;
685 struct ifnet *ifp = &sc->sc_ac.ac_if;
686
687 if (ifp->if_flags & IFF_RUNNING0x40) {
688 xnf_txeof(sc);
689 xnf_rxeof(sc);
690 }
691}
692
693void
694xnf_watchdog(struct ifnet *ifp)
695{
696 struct xnf_softc *sc = ifp->if_softc;
697 struct xnf_tx_ring *txr = sc->sc_tx_ring;
698
699 printf("%s: tx stuck: prod %u cons %u,%u evt %u,%u\n",
700 ifp->if_xname, txr->txr_prod, txr->txr_cons, sc->sc_tx_cons,
701 txr->txr_prod_event, txr->txr_cons_event);
702}
703
704void
705xnf_txeof(struct xnf_softc *sc)
706{
707 struct ifnet *ifp = &sc->sc_ac.ac_if;
708 struct xnf_tx_ring *txr = sc->sc_tx_ring;
709 struct xnf_tx_buf *txb;
710 union xnf_tx_desc *txd;
711 uint done = 0;
712 uint32_t cons;
713 uint16_t id;
714 int i;
715
716 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_tx_rmap), (0), (0), (0x02 | 0x08))
717 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_tx_rmap), (0), (0), (0x02 | 0x08))
;
718
719 for (cons = sc->sc_tx_cons; cons != txr->txr_cons; cons++) {
720 i = cons & (XNF_TX_DESC256 - 1);
721 txd = &txr->txr_desc[i];
722 id = txd->txd_rsp.txp_id;
723 txb = &sc->sc_tx_buf[id];
724
725 KASSERT(txb->txb_ndesc > 0)((txb->txb_ndesc > 0) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pv/if_xnf.c", 725, "txb->txb_ndesc > 0"
))
;
726 if (--txb->txb_ndesc == 0) {
727 bus_dmamap_sync(sc->sc_dmat, txb->txb_dmap, 0, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txb->
txb_dmap), (0), (0), (0x02 | 0x08))
728 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txb->
txb_dmap), (0), (0), (0x02 | 0x08))
;
729 bus_dmamap_unload(sc->sc_dmat, txb->txb_dmap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txb
->txb_dmap))
;
730
731 m_free(txb->txb_mbuf);
732 txb->txb_mbuf = NULL((void *)0);
733 done++;
734 }
735
736 memset(txd, 0, sizeof(*txd))__builtin_memset((txd), (0), (sizeof(*txd)));
737 }
738
739 sc->sc_tx_cons = cons;
740 txr->txr_cons_event = sc->sc_tx_cons +
741 ((txr->txr_prod - sc->sc_tx_cons) >> 1) + 1;
742 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_tx_rmap), (0), (0), (0x01 | 0x04))
743 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_tx_rmap), (0), (0), (0x01 | 0x04))
;
744
745 atomic_add_int(&sc->sc_tx_avail, done)_atomic_add_int(&sc->sc_tx_avail, done);
746
747 if (sc->sc_tx_cons == txr->txr_prod)
748 ifp->if_timer = 0;
749 if (ifq_is_oactive(&ifp->if_snd))
750 ifq_restart(&ifp->if_snd);
751}
752
753void
754xnf_rxeof(struct xnf_softc *sc)
755{
756 struct ifnet *ifp = &sc->sc_ac.ac_if;
757 struct xnf_rx_ring *rxr = sc->sc_rx_ring;
758 union xnf_rx_desc *rxd;
759 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
760 struct mbuf *fmp = sc->sc_rx_cbuf[0];
761 struct mbuf *lmp = sc->sc_rx_cbuf[1];
762 struct mbuf *m;
763 bus_dmamap_t dmap;
764 uint32_t cons;
765 uint16_t id;
766 int i, flags, len, offset;
767
768 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rx_rmap), (0), (0), (0x02 | 0x08))
769 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rx_rmap), (0), (0), (0x02 | 0x08))
;
770
771 for (cons = sc->sc_rx_cons; cons != rxr->rxr_cons; cons++) {
772 i = cons & (XNF_RX_DESC256 - 1);
773 rxd = &rxr->rxr_desc[i];
774
775 id = rxd->rxd_rsp.rxp_id;
776 len = rxd->rxd_rsp.rxp_status;
777 flags = rxd->rxd_rsp.rxp_flags;
778 offset = rxd->rxd_rsp.rxp_offset;
779
780 dmap = sc->sc_rx_dmap[id];
781 bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (0), (0x02 | 0x08))
782 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (0), (0x02 | 0x08))
;
783 bus_dmamap_unload(sc->sc_dmat, dmap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (dmap
))
;
784
785 m = sc->sc_rx_buf[id];
786 KASSERT(m != NULL)((m != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pv/if_xnf.c"
, 786, "m != NULL"))
;
787 sc->sc_rx_buf[id] = NULL((void *)0);
788
789 if (flags & XNF_RXF_MGMT0x0008) {
790 printf("%s: management data present\n",
791 ifp->if_xname);
792 m_freem(m);
793 continue;
794 }
795
796 if (flags & XNF_RXF_CSUM_VALID0x0001)
797 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags = M_TCP_CSUM_IN_OK0x0020 |
798 M_UDP_CSUM_IN_OK0x0080;
799
800 if (len < 0 || (len + offset > PAGE_SIZE(1 << 12))) {
801 ifp->if_ierrorsif_data.ifi_ierrors++;
802 m_freem(m);
803 continue;
804 }
805
806 m->m_lenm_hdr.mh_len = len;
807 m->m_datam_hdr.mh_data += offset;
808
809 if (fmp == NULL((void *)0)) {
810 m->m_pkthdrM_dat.MH.MH_pkthdr.len = len;
811 fmp = m;
812 } else {
813 m->m_flagsm_hdr.mh_flags &= ~M_PKTHDR0x0002;
814 lmp->m_nextm_hdr.mh_next = m;
815 fmp->m_pkthdrM_dat.MH.MH_pkthdr.len += m->m_lenm_hdr.mh_len;
816 }
817 lmp = m;
818
819 if (flags & XNF_RXF_CHUNK0x0004) {
820 sc->sc_rx_cbuf[0] = fmp;
821 sc->sc_rx_cbuf[1] = lmp;
822 continue;
823 }
824
825 m = fmp;
826
827 ml_enqueue(&ml, m);
828 sc->sc_rx_cbuf[0] = sc->sc_rx_cbuf[1] = fmp = lmp = NULL((void *)0);
829
830 memset(rxd, 0, sizeof(*rxd))__builtin_memset((rxd), (0), (sizeof(*rxd)));
831 rxd->rxd_req.rxq_id = id;
832 }
833
834 sc->sc_rx_cons = cons;
835 rxr->rxr_cons_event = sc->sc_rx_cons + 1;
836 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rx_rmap), (0), (0), (0x01 | 0x04))
837 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rx_rmap), (0), (0), (0x01 | 0x04))
;
838
839 if_input(ifp, &ml);
840
841 if (xnf_rx_ring_fill(sc) || (sc->sc_rx_cons != rxr->rxr_cons))
842 xen_intr_schedule(sc->sc_xih);
843}
844
845int
846xnf_rx_ring_fill(struct xnf_softc *sc)
847{
848 struct ifnet *ifp = &sc->sc_ac.ac_if;
849 struct xnf_rx_ring *rxr = sc->sc_rx_ring;
850 union xnf_rx_desc *rxd;
851 bus_dmamap_t dmap;
852 struct mbuf *m;
853 uint32_t cons, prod, oprod;
854 uint16_t id;
855 int i, flags, resched = 0;
856
857 cons = rxr->rxr_cons;
858 prod = oprod = rxr->rxr_prod;
859
860 while (prod - cons < XNF_RX_DESC256) {
861 i = prod & (XNF_RX_DESC256 - 1);
862 rxd = &rxr->rxr_desc[i];
863
864 id = rxd->rxd_rsp.rxp_id;
865 if (sc->sc_rx_buf[id])
866 break;
867 m = MCLGETL(NULL, M_DONTWAIT, XNF_MCLEN)m_clget((((void *)0)), (0x0002), ((1 << 12)));
868 if (m == NULL((void *)0))
869 break;
870 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = XNF_MCLEN(1 << 12);
871 dmap = sc->sc_rx_dmap[id];
872 flags = (sc->sc_domid << 16) | BUS_DMA_READ0x0200 | BUS_DMA_NOWAIT0x0001;
873 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, flags)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
dmap), (m), (flags))
) {
874 m_freem(m);
875 break;
876 }
877 sc->sc_rx_buf[id] = m;
878 rxd->rxd_req.rxq_ref = dmap->dm_segs[0].ds_addr;
879 bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (0), (0x04))
;
880 prod++;
881 }
882
883 rxr->rxr_prod = prod;
884 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rx_rmap), (0), (0), (0x01 | 0x04))
885 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rx_rmap), (0), (0), (0x01 | 0x04))
;
886
887 if ((prod - cons < XNF_RX_MIN32) && (ifp->if_flags & IFF_RUNNING0x40))
888 resched = 1;
889 if (prod - rxr->rxr_prod_event < prod - oprod)
890 xen_intr_signal(sc->sc_xih);
891
892 return (resched);
893}
894
895int
896xnf_rx_ring_create(struct xnf_softc *sc)
897{
898 int i, flags, rsegs;
899
900 /* Allocate a page of memory for the ring */
901 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((1 <<
12)), ((1 << 12)), (0), (&sc->sc_rx_seg), (1), (
&rsegs), (0x1000 | 0x0001))
902 &sc->sc_rx_seg, 1, &rsegs, BUS_DMA_ZERO | BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((1 <<
12)), ((1 << 12)), (0), (&sc->sc_rx_seg), (1), (
&rsegs), (0x1000 | 0x0001))
) {
903 printf("%s: failed to allocate memory for the rx ring\n",
904 sc->sc_dev.dv_xname);
905 return (-1);
906 }
907 /* Map in the allocated memory into the ring structure */
908 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_rx_seg, 1, PAGE_SIZE,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->sc_rx_seg), (1), ((1 << 12)), ((caddr_t *)(&sc
->sc_rx_ring)), (0x0001))
909 (caddr_t *)(&sc->sc_rx_ring), BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->sc_rx_seg), (1), ((1 << 12)), ((caddr_t *)(&sc
->sc_rx_ring)), (0x0001))
) {
910 printf("%s: failed to map memory for the rx ring\n",
911 sc->sc_dev.dv_xname);
912 goto errout;
913 }
914 /* Create a map to load the ring memory into */
915 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
12)), (1), ((1 << 12)), (0), (0x0001), (&sc->sc_rx_rmap
))
916 BUS_DMA_NOWAIT, &sc->sc_rx_rmap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
12)), (1), ((1 << 12)), (0), (0x0001), (&sc->sc_rx_rmap
))
) {
917 printf("%s: failed to create a memory map for the rx ring\n",
918 sc->sc_dev.dv_xname);
919 goto errout;
920 }
921 /* Load the ring into the ring map to extract the PA */
922 flags = (sc->sc_domid << 16) | BUS_DMA_NOWAIT0x0001;
923 if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_rmap, sc->sc_rx_ring,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_rx_rmap), (sc->sc_rx_ring), ((1 << 12)), (((void *
)0)), (flags))
924 PAGE_SIZE, NULL, flags)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_rx_rmap), (sc->sc_rx_ring), ((1 << 12)), (((void *
)0)), (flags))
) {
925 printf("%s: failed to load the rx ring map\n",
926 sc->sc_dev.dv_xname);
927 goto errout;
928 }
929 sc->sc_rx_ref = sc->sc_rx_rmap->dm_segs[0].ds_addr;
930
931 sc->sc_rx_ring->rxr_prod_event = sc->sc_rx_ring->rxr_cons_event = 1;
932
933 for (i = 0; i < XNF_RX_DESC256; i++) {
934 if (bus_dmamap_create(sc->sc_dmat, XNF_MCLEN, 1, XNF_MCLEN,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
12)), (1), ((1 << 12)), ((1 << 12)), (0x0001), (
&sc->sc_rx_dmap[i]))
935 PAGE_SIZE, BUS_DMA_NOWAIT, &sc->sc_rx_dmap[i])(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
12)), (1), ((1 << 12)), ((1 << 12)), (0x0001), (
&sc->sc_rx_dmap[i]))
) {
936 printf("%s: failed to create a memory map for the"
937 " rx slot %d\n", sc->sc_dev.dv_xname, i);
938 goto errout;
939 }
940 sc->sc_rx_ring->rxr_desc[i].rxd_req.rxq_id = i;
941 }
942
943 return (0);
944
945 errout:
946 xnf_rx_ring_destroy(sc);
947 return (-1);
948}
949
950void
951xnf_rx_ring_drain(struct xnf_softc *sc)
952{
953 struct xnf_rx_ring *rxr = sc->sc_rx_ring;
954
955 if (sc->sc_rx_cons != rxr->rxr_cons)
956 xnf_rxeof(sc);
957}
958
959void
960xnf_rx_ring_destroy(struct xnf_softc *sc)
961{
962 int i;
963
964 for (i = 0; i < XNF_RX_DESC256; i++) {
965 if (sc->sc_rx_buf[i] == NULL((void *)0))
966 continue;
967 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmap[i], 0, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rx_dmap[i]), (0), (0), (0x02))
968 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rx_dmap[i]), (0), (0), (0x02))
;
969 bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_dmap[i])(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
sc_rx_dmap[i]))
;
970 m_freem(sc->sc_rx_buf[i]);
971 sc->sc_rx_buf[i] = NULL((void *)0);
972 }
973
974 for (i = 0; i < XNF_RX_DESC256; i++) {
975 if (sc->sc_rx_dmap[i] == NULL((void *)0))
976 continue;
977 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmap[i])(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_rx_dmap[i]))
;
978 sc->sc_rx_dmap[i] = NULL((void *)0);
979 }
980 if (sc->sc_rx_rmap) {
981 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rx_rmap), (0), (0), (0x02 | 0x08))
982 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_rx_rmap), (0), (0), (0x02 | 0x08))
;
983 bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_rmap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
sc_rx_rmap))
;
984 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_rmap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_rx_rmap))
;
985 }
986 if (sc->sc_rx_ring) {
987 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_rx_ring,(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)sc->sc_rx_ring), ((1 << 12)))
988 PAGE_SIZE)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)sc->sc_rx_ring), ((1 << 12)))
;
989 bus_dmamem_free(sc->sc_dmat, &sc->sc_rx_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
sc->sc_rx_seg), (1))
;
990 }
991 sc->sc_rx_ring = NULL((void *)0);
992 sc->sc_rx_rmap = NULL((void *)0);
993 sc->sc_rx_cons = 0;
994}
995
996int
997xnf_tx_ring_create(struct xnf_softc *sc)
998{
999 struct ifnet *ifp = &sc->sc_ac.ac_if;
1000 int i, flags, nsegs, rsegs;
1001 bus_size_t segsz;
1002
1003 sc->sc_tx_frags = sc->sc_caps & XNF_CAP_SG0x0001 ? XNF_TX_FRAG18 : 1;
1004
1005 /* Allocate a page of memory for the ring */
1006 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((1 <<
12)), ((1 << 12)), (0), (&sc->sc_tx_seg), (1), (
&rsegs), (0x1000 | 0x0001))
1007 &sc->sc_tx_seg, 1, &rsegs, BUS_DMA_ZERO | BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), ((1 <<
12)), ((1 << 12)), (0), (&sc->sc_tx_seg), (1), (
&rsegs), (0x1000 | 0x0001))
) {
1008 printf("%s: failed to allocate memory for the tx ring\n",
1009 sc->sc_dev.dv_xname);
1010 return (-1);
1011 }
1012 /* Map in the allocated memory into the ring structure */
1013 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_seg, 1, PAGE_SIZE,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->sc_tx_seg), (1), ((1 << 12)), ((caddr_t *)&sc->
sc_tx_ring), (0x0001))
1014 (caddr_t *)&sc->sc_tx_ring, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->sc_tx_seg), (1), ((1 << 12)), ((caddr_t *)&sc->
sc_tx_ring), (0x0001))
) {
1015 printf("%s: failed to map memory for the tx ring\n",
1016 sc->sc_dev.dv_xname);
1017 goto errout;
1018 }
1019 /* Create a map to load the ring memory into */
1020 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
12)), (1), ((1 << 12)), (0), (0x0001), (&sc->sc_tx_rmap
))
1021 BUS_DMA_NOWAIT, &sc->sc_tx_rmap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
12)), (1), ((1 << 12)), (0), (0x0001), (&sc->sc_tx_rmap
))
) {
1022 printf("%s: failed to create a memory map for the tx ring\n",
1023 sc->sc_dev.dv_xname);
1024 goto errout;
1025 }
1026 /* Load the ring into the ring map to extract the PA */
1027 flags = (sc->sc_domid << 16) | BUS_DMA_NOWAIT0x0001;
1028 if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_ring,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_tx_rmap), (sc->sc_tx_ring), ((1 << 12)), (((void *
)0)), (flags))
1029 PAGE_SIZE, NULL, flags)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_tx_rmap), (sc->sc_tx_ring), ((1 << 12)), (((void *
)0)), (flags))
) {
1030 printf("%s: failed to load the tx ring map\n",
1031 sc->sc_dev.dv_xname);
1032 goto errout;
1033 }
1034 sc->sc_tx_ref = sc->sc_tx_rmap->dm_segs[0].ds_addr;
1035
1036 sc->sc_tx_ring->txr_prod_event = sc->sc_tx_ring->txr_cons_event = 1;
1037
1038 if (sc->sc_caps & XNF_CAP_SG0x0001) {
1039 nsegs = roundup(ifp->if_hardmtu, XNF_MCLEN)((((ifp->if_hardmtu)+(((1 << 12))-1))/((1 << 12
)))*((1 << 12)))
/ XNF_MCLEN(1 << 12) + 1;
1040 segsz = nsegs * XNF_MCLEN(1 << 12);
1041 } else {
1042 nsegs = 1;
1043 segsz = XNF_MCLEN(1 << 12);
1044 }
1045 for (i = 0; i < XNF_TX_DESC256; i++) {
1046 if (bus_dmamap_create(sc->sc_dmat, segsz, nsegs, XNF_MCLEN,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (segsz
), (nsegs), ((1 << 12)), ((1 << 12)), (0x0001), (
&sc->sc_tx_buf[i].txb_dmap))
1047 PAGE_SIZE, BUS_DMA_NOWAIT, &sc->sc_tx_buf[i].txb_dmap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (segsz
), (nsegs), ((1 << 12)), ((1 << 12)), (0x0001), (
&sc->sc_tx_buf[i].txb_dmap))
) {
1048 printf("%s: failed to create a memory map for the"
1049 " tx slot %d\n", sc->sc_dev.dv_xname, i);
1050 goto errout;
1051 }
1052 }
1053
1054 sc->sc_tx_avail = XNF_TX_DESC256;
1055 sc->sc_tx_next = 0;
1056
1057 return (0);
1058
1059 errout:
1060 xnf_tx_ring_destroy(sc);
1061 return (-1);
1062}
1063
1064void
1065xnf_tx_ring_drain(struct xnf_softc *sc)
1066{
1067 struct xnf_tx_ring *txr = sc->sc_tx_ring;
1068
1069 if (sc->sc_tx_cons != txr->txr_cons)
1070 xnf_txeof(sc);
1071}
1072
1073void
1074xnf_tx_ring_destroy(struct xnf_softc *sc)
1075{
1076 int i;
1077
1078 for (i = 0; i < XNF_TX_DESC256; i++) {
1079 if (sc->sc_tx_buf[i].txb_dmap == NULL((void *)0))
1080 continue;
1081 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_buf[i].txb_dmap, 0, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_tx_buf[i].txb_dmap), (0), (0), (0x08))
1082 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_tx_buf[i].txb_dmap), (0), (0), (0x08))
;
1083 bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_buf[i].txb_dmap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
sc_tx_buf[i].txb_dmap))
;
1084 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_buf[i].txb_dmap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_tx_buf[i].txb_dmap))
;
1085 sc->sc_tx_buf[i].txb_dmap = NULL((void *)0);
1086 if (sc->sc_tx_buf[i].txb_mbuf == NULL((void *)0))
1087 continue;
1088 m_free(sc->sc_tx_buf[i].txb_mbuf);
1089 sc->sc_tx_buf[i].txb_mbuf = NULL((void *)0);
1090 sc->sc_tx_buf[i].txb_ndesc = 0;
1091 }
1092 if (sc->sc_tx_rmap) {
1093 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_tx_rmap), (0), (0), (0x02 | 0x08))
1094 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_tx_rmap), (0), (0), (0x02 | 0x08))
;
1095 bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
sc_tx_rmap))
;
1096 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_tx_rmap))
;
1097 }
1098 if (sc->sc_tx_ring) {
1099 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_tx_ring,(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)sc->sc_tx_ring), ((1 << 12)))
1100 PAGE_SIZE)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)sc->sc_tx_ring), ((1 << 12)))
;
1101 bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
sc->sc_tx_seg), (1))
;
1102 }
1103 sc->sc_tx_ring = NULL((void *)0);
1104 sc->sc_tx_rmap = NULL((void *)0);
1105 sc->sc_tx_avail = XNF_TX_DESC256;
1106 sc->sc_tx_next = 0;
1107}
1108
1109int
1110xnf_capabilities(struct xnf_softc *sc)
1111{
1112 unsigned long long res;
1113 const char *prop;
1114 int error;
1115
1116 /* Query scatter-gather capability */
1117 prop = "feature-sg";
1118 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1119 && error != ENOENT2)
1120 goto errout;
1121 if (error == 0 && res == 1)
1122 sc->sc_caps |= XNF_CAP_SG0x0001;
1123
1124#if 0
1125 /* Query IPv4 checksum offloading capability, enabled by default */
1126 sc->sc_caps |= XNF_CAP_CSUM40x0002;
1127 prop = "feature-no-csum-offload";
1128 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1129 && error != ENOENT2)
1130 goto errout;
1131 if (error == 0 && res == 1)
1132 sc->sc_caps &= ~XNF_CAP_CSUM40x0002;
1133
1134 /* Query IPv6 checksum offloading capability */
1135 prop = "feature-ipv6-csum-offload";
1136 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1137 && error != ENOENT2)
1138 goto errout;
1139 if (error == 0 && res == 1)
1140 sc->sc_caps |= XNF_CAP_CSUM60x0004;
1141#endif
1142
1143 /* Query multicast traffic control capability */
1144 prop = "feature-multicast-control";
1145 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1146 && error != ENOENT2)
1147 goto errout;
1148 if (error == 0 && res == 1)
1149 sc->sc_caps |= XNF_CAP_MCAST0x0008;
1150
1151 /* Query split Rx/Tx event channel capability */
1152 prop = "feature-split-event-channels";
1153 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1154 && error != ENOENT2)
1155 goto errout;
1156 if (error == 0 && res == 1)
1157 sc->sc_caps |= XNF_CAP_SPLIT0x0010;
1158
1159 /* Query multiqueue capability */
1160 prop = "multi-queue-max-queues";
1161 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1162 && error != ENOENT2)
1163 goto errout;
1164 if (error == 0)
1165 sc->sc_caps |= XNF_CAP_MULTIQ0x0020;
1166
1167 DPRINTF("%s: capabilities %b\n", sc->sc_dev.dv_xname, sc->sc_caps,
1168 "\20\006MULTIQ\005SPLIT\004MCAST\003CSUM6\002CSUM4\001SG");
1169 return (0);
1170
1171 errout:
1172 printf("%s: failed to read \"%s\" property\n", sc->sc_dev.dv_xname,
1173 prop);
1174 return (-1);
1175}
1176
1177int
1178xnf_init_backend(struct xnf_softc *sc)
1179{
1180 const char *prop;
1181
1182 /* Plumb the Rx ring */
1183 prop = "rx-ring-ref";
1184 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_rx_ref))
1185 goto errout;
1186 /* Enable "copy" mode */
1187 prop = "request-rx-copy";
1188 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1189 goto errout;
1190 /* Enable notify mode */
1191 prop = "feature-rx-notify";
1192 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1193 goto errout;
1194
1195 /* Plumb the Tx ring */
1196 prop = "tx-ring-ref";
1197 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_tx_ref))
1198 goto errout;
1199 /* Enable scatter-gather mode */
1200 if (sc->sc_tx_frags > 1) {
1201 prop = "feature-sg";
1202 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1203 goto errout;
1204 }
1205
1206 /* Disable IPv4 checksum offloading */
1207 if (!(sc->sc_caps & XNF_CAP_CSUM40x0002)) {
1208 prop = "feature-no-csum-offload";
1209 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1210 goto errout;
1211 }
1212
1213 /* Enable IPv6 checksum offloading */
1214 if (sc->sc_caps & XNF_CAP_CSUM60x0004) {
1215 prop = "feature-ipv6-csum-offload";
1216 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1217 goto errout;
1218 }
1219
1220 /* Plumb the event channel port */
1221 prop = "event-channel";
1222 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_xih))
1223 goto errout;
1224
1225 /* Connect the device */
1226 prop = "state";
1227 if (xs_setprop(sc->sc_parent, sc->sc_node, prop, XEN_STATE_CONNECTED"4",
1228 strlen(XEN_STATE_CONNECTED"4")))
1229 goto errout;
1230
1231 return (0);
1232
1233 errout:
1234 printf("%s: failed to set \"%s\" property\n", sc->sc_dev.dv_xname, prop);
1235 return (-1);
1236}