Bug Summary

File:dev/pv/virtio.c
Warning:line 388, column 3
Value stored to 'r' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name virtio.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pv/virtio.c
1/* $OpenBSD: virtio.c,v 1.24 2023/12/02 10:01:35 sf Exp $ */
2/* $NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $ */
3
4/*
5 * Copyright (c) 2012 Stefan Fritsch, Alexander Fiveg.
6 * Copyright (c) 2010 Minoura Makoto.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/device.h>
34#include <sys/mutex.h>
35#include <sys/atomic.h>
36#include <sys/malloc.h>
37
38#include <dev/pv/virtioreg.h>
39#include <dev/pv/virtiovar.h>
40
41#if VIRTIO_DEBUG0
42#define VIRTIO_ASSERT(x) KASSERT(x)((x) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pv/virtio.c"
, 42, "x"))
43#else
44#define VIRTIO_ASSERT(x)
45#endif
46
47void virtio_init_vq(struct virtio_softc *,
48 struct virtqueue *);
49void vq_free_entry(struct virtqueue *, struct vq_entry *);
50struct vq_entry *vq_alloc_entry(struct virtqueue *);
51
52struct cfdriver virtio_cd = {
53 NULL((void *)0), "virtio", DV_DULL
54};
55
56static const char * const virtio_device_name[] = {
57 "Unknown (0)", /* 0 */
58 "Network", /* 1 */
59 "Block", /* 2 */
60 "Console", /* 3 */
61 "Entropy", /* 4 */
62 "Memory Balloon", /* 5 */
63 "IO Memory", /* 6 */
64 "Rpmsg", /* 7 */
65 "SCSI host", /* 8 */
66 "9P Transport", /* 9 */
67 "mac80211 wlan", /* 10 */
68 NULL((void *)0), /* 11 */
69 NULL((void *)0), /* 12 */
70 NULL((void *)0), /* 13 */
71 NULL((void *)0), /* 14 */
72 NULL((void *)0), /* 15 */
73 "GPU", /* 16 */
74};
75#define NDEVNAMES(sizeof(virtio_device_name)/sizeof(char*)) (sizeof(virtio_device_name)/sizeof(char*))
76
77const char *
78virtio_device_string(int id)
79{
80 return id < NDEVNAMES(sizeof(virtio_device_name)/sizeof(char*)) ? virtio_device_name[id] : "Unknown";
81}
82
83#if VIRTIO_DEBUG0
84static const struct virtio_feature_name transport_feature_names[] = {
85 { VIRTIO_F_NOTIFY_ON_EMPTY(1ULL<<24), "NotifyOnEmpty"},
86 { VIRTIO_F_RING_INDIRECT_DESC(1ULL<<28), "RingIndirectDesc"},
87 { VIRTIO_F_RING_EVENT_IDX(1ULL<<29), "RingEventIdx"},
88 { VIRTIO_F_BAD_FEATURE(1ULL<<30), "BadFeature"},
89 { VIRTIO_F_VERSION_1(1ULL<<32), "Version1"},
90 { 0, NULL((void *)0)}
91};
92
93void
94virtio_log_features(uint64_t host, uint64_t neg,
95 const struct virtio_feature_name *guest_feature_names)
96{
97 const struct virtio_feature_name *namep;
98 int i;
99 char c;
100 uint64_t bit;
101
102 for (i = 0; i < 64; i++) {
103 if (i == 30) {
104 /*
105 * VIRTIO_F_BAD_FEATURE is only used for
106 * checking correct negotiation
107 */
108 continue;
109 }
110 bit = 1ULL << i;
111 if ((host&bit) == 0)
112 continue;
113 namep = guest_feature_names;
114 while (namep->bit && namep->bit != bit)
115 namep++;
116 if (namep->name == NULL((void *)0)) {
117 namep = transport_feature_names;
118 while (namep->bit && namep->bit != bit)
119 namep++;
120 }
121 c = (neg&bit) ? '+' : '-';
122 if (namep->name)
123 printf(" %c%s", c, namep->name);
124 else
125 printf(" %cUnknown(%d)", c, i);
126 }
127}
128#endif
129
130/*
131 * Reset the device.
132 */
133/*
134 * To reset the device to a known state, do following:
135 * virtio_reset(sc); // this will stop the device activity
136 * <dequeue finished requests>; // virtio_dequeue() still can be called
137 * <revoke pending requests in the vqs if any>;
138 * virtio_reinit_start(sc); // dequeue prohibited
139 * <some other initialization>;
140 * virtio_reinit_end(sc); // device activated; enqueue allowed
141 * Once attached, features are assumed to not change again.
142 */
143void
144virtio_reset(struct virtio_softc *sc)
145{
146 virtio_device_reset(sc)((sc))->sc_ops->set_status((sc), 0);
147 sc->sc_active_features = 0;
148}
149
150void
151virtio_reinit_start(struct virtio_softc *sc)
152{
153 int i;
154
155 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK)(sc)->sc_ops->set_status(sc, 1);
156 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER)(sc)->sc_ops->set_status(sc, 2);
157 virtio_negotiate_features(sc, NULL)(sc)->sc_ops->neg_features(sc, ((void *)0));
158 for (i = 0; i < sc->sc_nvqs; i++) {
159 int n;
160 struct virtqueue *vq = &sc->sc_vqs[i];
161 n = virtio_read_queue_size(sc, vq->vq_index)(sc)->sc_ops->read_queue_size(sc, vq->vq_index);
162 if (n == 0) /* vq disappeared */
163 continue;
164 if (n != vq->vq_num) {
165 panic("%s: virtqueue size changed, vq index %d",
166 sc->sc_dev.dv_xname, vq->vq_index);
167 }
168 virtio_init_vq(sc, vq);
169 virtio_setup_queue(sc, vq, vq->vq_dmamap->dm_segs[0].ds_addr)(sc)->sc_ops->setup_queue(sc, vq, vq->vq_dmamap->
dm_segs[0].ds_addr)
;
170 }
171}
172
173void
174virtio_reinit_end(struct virtio_softc *sc)
175{
176 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK)(sc)->sc_ops->set_status(sc, 4);
177}
178
179/*
180 * dmamap sync operations for a virtqueue.
181 */
182static inline void
183vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops)
184{
185 /* availoffset == sizeof(vring_desc)*vq_num */
186 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (vq->
vq_dmamap), (0), (vq->vq_availoffset), (ops))
187 ops)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (vq->
vq_dmamap), (0), (vq->vq_availoffset), (ops))
;
188}
189
190static inline void
191vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
192{
193 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, vq->vq_availoffset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (vq->
vq_dmamap), (vq->vq_availoffset), (__builtin_offsetof(struct
vring_avail, ring) + vq->vq_num * sizeof(uint16_t)), (ops
))
194 offsetof(struct vring_avail, ring) + vq->vq_num * sizeof(uint16_t),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (vq->
vq_dmamap), (vq->vq_availoffset), (__builtin_offsetof(struct
vring_avail, ring) + vq->vq_num * sizeof(uint16_t)), (ops
))
195 ops)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (vq->
vq_dmamap), (vq->vq_availoffset), (__builtin_offsetof(struct
vring_avail, ring) + vq->vq_num * sizeof(uint16_t)), (ops
))
;
196}
197
198static inline void
199vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
200{
201 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, vq->vq_usedoffset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (vq->
vq_dmamap), (vq->vq_usedoffset), (__builtin_offsetof(struct
vring_used, ring) + vq->vq_num * sizeof(struct vring_used_elem
)), (ops))
202 offsetof(struct vring_used, ring) + vq->vq_num *(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (vq->
vq_dmamap), (vq->vq_usedoffset), (__builtin_offsetof(struct
vring_used, ring) + vq->vq_num * sizeof(struct vring_used_elem
)), (ops))
203 sizeof(struct vring_used_elem), ops)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (vq->
vq_dmamap), (vq->vq_usedoffset), (__builtin_offsetof(struct
vring_used, ring) + vq->vq_num * sizeof(struct vring_used_elem
)), (ops))
;
204}
205
206static inline void
207vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot,
208 int ops)
209{
210 int offset = vq->vq_indirectoffset +
211 sizeof(struct vring_desc) * vq->vq_maxnsegs * slot;
212
213 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, offset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (vq->
vq_dmamap), (offset), (sizeof(struct vring_desc) * vq->vq_maxnsegs
), (ops))
214 sizeof(struct vring_desc) * vq->vq_maxnsegs, ops)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (vq->
vq_dmamap), (offset), (sizeof(struct vring_desc) * vq->vq_maxnsegs
), (ops))
;
215}
216
217/*
218 * Scan vq, bus_dmamap_sync for the vqs (not for the payload),
219 * and calls (*vq_done)() if some entries are consumed.
220 * For use in transport specific irq handlers.
221 */
222int
223virtio_check_vqs(struct virtio_softc *sc)
224{
225 int i, r = 0;
226
227 /* going backwards is better for if_vio */
228 for (i = sc->sc_nvqs - 1; i >= 0; i--)
229 r |= virtio_check_vq(sc, &sc->sc_vqs[i]);
230
231 return r;
232}
233
234int
235virtio_check_vq(struct virtio_softc *sc, struct virtqueue *vq)
236{
237 if (vq->vq_queued) {
238 vq->vq_queued = 0;
239 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE0x08);
240 }
241 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD0x02);
242 if (vq->vq_used_idx != vq->vq_used->idx) {
243 if (vq->vq_done)
244 return (vq->vq_done)(vq);
245 }
246
247 return 0;
248}
249
250/*
251 * Initialize vq structure.
252 */
253void
254virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq)
255{
256 int i, j;
257 int vq_size = vq->vq_num;
258
259 memset(vq->vq_vaddr, 0, vq->vq_bytesize)__builtin_memset((vq->vq_vaddr), (0), (vq->vq_bytesize)
)
;
260
261 /* build the indirect descriptor chain */
262 if (vq->vq_indirect != NULL((void *)0)) {
263 struct vring_desc *vd;
264
265 for (i = 0; i < vq_size; i++) {
266 vd = vq->vq_indirect;
267 vd += vq->vq_maxnsegs * i;
268 for (j = 0; j < vq->vq_maxnsegs-1; j++)
269 vd[j].next = j + 1;
270 }
271 }
272
273 /* free slot management */
274 SLIST_INIT(&vq->vq_freelist){ ((&vq->vq_freelist)->slh_first) = ((void *)0); };
275 /*
276 * virtio_enqueue_trim needs monotonely raising entries, therefore
277 * initialize in reverse order
278 */
279 for (i = vq_size - 1; i >= 0; i--) {
280 SLIST_INSERT_HEAD(&vq->vq_freelist, &vq->vq_entries[i],do { (&vq->vq_entries[i])->qe_list.sle_next = (&
vq->vq_freelist)->slh_first; (&vq->vq_freelist)->
slh_first = (&vq->vq_entries[i]); } while (0)
281 qe_list)do { (&vq->vq_entries[i])->qe_list.sle_next = (&
vq->vq_freelist)->slh_first; (&vq->vq_freelist)->
slh_first = (&vq->vq_entries[i]); } while (0)
;
282 vq->vq_entries[i].qe_index = i;
283 }
284
285 /* enqueue/dequeue status */
286 vq->vq_avail_idx = 0;
287 vq->vq_used_idx = 0;
288 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE0x04);
289 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD0x01);
290 vq->vq_queued = 1;
291}
292
293/*
294 * Allocate/free a vq.
295 *
296 * maxnsegs denotes how much space should be allocated for indirect
297 * descriptors. maxnsegs == 1 can be used to disable use indirect
298 * descriptors for this queue.
299 */
300int
301virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index,
302 int maxsegsize, int maxnsegs, const char *name)
303{
304 int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0;
305 int rsegs, r, hdrlen;
306#define VIRTQUEUE_ALIGN(n)(((n)+((4096)-1))& ~((4096)-1)) (((n)+(VIRTIO_PAGE_SIZE(4096)-1))& \
307 ~(VIRTIO_PAGE_SIZE(4096)-1))
308
309 memset(vq, 0, sizeof(*vq))__builtin_memset((vq), (0), (sizeof(*vq)));
310
311 vq_size = virtio_read_queue_size(sc, index)(sc)->sc_ops->read_queue_size(sc, index);
312 if (vq_size == 0) {
313 printf("virtqueue not exist, index %d for %s\n", index, name);
314 goto err;
315 }
316 if (((vq_size - 1) & vq_size) != 0)
317 panic("vq_size not power of two: %d", vq_size);
318
319 hdrlen = virtio_has_feature(sc, VIRTIO_F_RING_EVENT_IDX(1ULL<<29)) ? 3 : 2;
320
321 /* allocsize1: descriptor table + avail ring + pad */
322 allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc) * vq_size(((sizeof(struct vring_desc) * vq_size + sizeof(uint16_t) * (
hdrlen + vq_size))+((4096)-1))& ~((4096)-1))
323 + sizeof(uint16_t) * (hdrlen + vq_size))(((sizeof(struct vring_desc) * vq_size + sizeof(uint16_t) * (
hdrlen + vq_size))+((4096)-1))& ~((4096)-1))
;
324 /* allocsize2: used ring + pad */
325 allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t) * hdrlen(((sizeof(uint16_t) * hdrlen + sizeof(struct vring_used_elem)
* vq_size)+((4096)-1))& ~((4096)-1))
326 + sizeof(struct vring_used_elem) * vq_size)(((sizeof(uint16_t) * hdrlen + sizeof(struct vring_used_elem)
* vq_size)+((4096)-1))& ~((4096)-1))
;
327 /* allocsize3: indirect table */
328 if (sc->sc_indirect && maxnsegs > 1)
329 allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size;
330 else
331 allocsize3 = 0;
332 allocsize = allocsize1 + allocsize2 + allocsize3;
333
334 /* alloc and map the memory */
335 r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (allocsize
), ((4096)), (0), (&vq->vq_segs[0]), (1), (&rsegs)
, (0x0001))
336 &vq->vq_segs[0], 1, &rsegs, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (allocsize
), ((4096)), (0), (&vq->vq_segs[0]), (1), (&rsegs)
, (0x0001))
;
337 if (r != 0) {
338 printf("virtqueue %d for %s allocation failed, error %d\n",
339 index, name, r);
340 goto err;
341 }
342 r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], 1, allocsize,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&vq
->vq_segs[0]), (1), (allocsize), ((caddr_t*)&vq->vq_vaddr
), (0x0001))
343 (caddr_t*)&vq->vq_vaddr, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&vq
->vq_segs[0]), (1), (allocsize), ((caddr_t*)&vq->vq_vaddr
), (0x0001))
;
344 if (r != 0) {
345 printf("virtqueue %d for %s map failed, error %d\n", index,
346 name, r);
347 goto err;
348 }
349 r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (allocsize
), (1), (allocsize), (0), (0x0001), (&vq->vq_dmamap))
350 BUS_DMA_NOWAIT, &vq->vq_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (allocsize
), (1), (allocsize), (0), (0x0001), (&vq->vq_dmamap))
;
351 if (r != 0) {
352 printf("virtqueue %d for %s dmamap creation failed, "
353 "error %d\n", index, name, r);
354 goto err;
355 }
356 r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap, vq->vq_vaddr,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (vq->
vq_dmamap), (vq->vq_vaddr), (allocsize), (((void *)0)), (0x0001
))
357 allocsize, NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (vq->
vq_dmamap), (vq->vq_vaddr), (allocsize), (((void *)0)), (0x0001
))
;
358 if (r != 0) {
359 printf("virtqueue %d for %s dmamap load failed, error %d\n",
360 index, name, r);
361 goto err;
362 }
363
364 /* remember addresses and offsets for later use */
365 vq->vq_owner = sc;
366 vq->vq_num = vq_size;
367 vq->vq_mask = vq_size - 1;
368 vq->vq_index = index;
369 vq->vq_desc = vq->vq_vaddr;
370 vq->vq_availoffset = sizeof(struct vring_desc)*vq_size;
371 vq->vq_avail = (struct vring_avail*)(((char*)vq->vq_desc) +
372 vq->vq_availoffset);
373 vq->vq_usedoffset = allocsize1;
374 vq->vq_used = (struct vring_used*)(((char*)vq->vq_desc) +
375 vq->vq_usedoffset);
376 if (allocsize3 > 0) {
377 vq->vq_indirectoffset = allocsize1 + allocsize2;
378 vq->vq_indirect = (void*)(((char*)vq->vq_desc)
379 + vq->vq_indirectoffset);
380 }
381 vq->vq_bytesize = allocsize;
382 vq->vq_maxnsegs = maxnsegs;
383
384 /* free slot management */
385 vq->vq_entries = mallocarray(vq_size, sizeof(struct vq_entry),
386 M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
387 if (vq->vq_entries == NULL((void *)0)) {
388 r = ENOMEM12;
Value stored to 'r' is never read
389 goto err;
390 }
391
392 virtio_init_vq(sc, vq);
393 virtio_setup_queue(sc, vq, vq->vq_dmamap->dm_segs[0].ds_addr)(sc)->sc_ops->setup_queue(sc, vq, vq->vq_dmamap->
dm_segs[0].ds_addr)
;
394
395#if VIRTIO_DEBUG0
396 printf("\nallocated %u byte for virtqueue %d for %s, size %d\n",
397 allocsize, index, name, vq_size);
398 if (allocsize3 > 0)
399 printf("using %d byte (%d entries) indirect descriptors\n",
400 allocsize3, maxnsegs * vq_size);
401#endif
402 return 0;
403
404err:
405 if (vq->vq_dmamap)
406 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (vq
->vq_dmamap))
;
407 if (vq->vq_vaddr)
408 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (vq->
vq_vaddr), (allocsize))
;
409 if (vq->vq_segs[0].ds_addr)
410 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
vq->vq_segs[0]), (1))
;
411 memset(vq, 0, sizeof(*vq))__builtin_memset((vq), (0), (sizeof(*vq)));
412
413 return -1;
414}
415
416int
417virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq)
418{
419 struct vq_entry *qe;
420 int i = 0;
421
422 /* device must be already deactivated */
423 /* confirm the vq is empty */
424 SLIST_FOREACH(qe, &vq->vq_freelist, qe_list)for((qe) = ((&vq->vq_freelist)->slh_first); (qe) !=
((void *)0); (qe) = ((qe)->qe_list.sle_next))
{
425 i++;
426 }
427 if (i != vq->vq_num) {
428 printf("%s: freeing non-empty vq, index %d\n",
429 sc->sc_dev.dv_xname, vq->vq_index);
430 return EBUSY16;
431 }
432
433 /* tell device that there's no virtqueue any longer */
434 virtio_setup_queue(sc, vq, 0)(sc)->sc_ops->setup_queue(sc, vq, 0);
435
436 free(vq->vq_entries, M_DEVBUF2, 0);
437 bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (vq->
vq_dmamap))
;
438 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (vq
->vq_dmamap))
;
439 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (vq->
vq_vaddr), (vq->vq_bytesize))
;
440 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
vq->vq_segs[0]), (1))
;
441 memset(vq, 0, sizeof(*vq))__builtin_memset((vq), (0), (sizeof(*vq)));
442
443 return 0;
444}
445
446/*
447 * Free descriptor management.
448 */
449struct vq_entry *
450vq_alloc_entry(struct virtqueue *vq)
451{
452 struct vq_entry *qe;
453
454 if (SLIST_EMPTY(&vq->vq_freelist)(((&vq->vq_freelist)->slh_first) == ((void *)0)))
455 return NULL((void *)0);
456 qe = SLIST_FIRST(&vq->vq_freelist)((&vq->vq_freelist)->slh_first);
457 SLIST_REMOVE_HEAD(&vq->vq_freelist, qe_list)do { (&vq->vq_freelist)->slh_first = (&vq->vq_freelist
)->slh_first->qe_list.sle_next; } while (0)
;
458
459 return qe;
460}
461
462void
463vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
464{
465 SLIST_INSERT_HEAD(&vq->vq_freelist, qe, qe_list)do { (qe)->qe_list.sle_next = (&vq->vq_freelist)->
slh_first; (&vq->vq_freelist)->slh_first = (qe); } while
(0)
;
466}
467
468/*
469 * Enqueue several dmamaps as a single request.
470 */
471/*
472 * Typical usage:
473 * <queue size> number of followings are stored in arrays
474 * - command blocks (in dmamem) should be pre-allocated and mapped
475 * - dmamaps for command blocks should be pre-allocated and loaded
476 * - dmamaps for payload should be pre-allocated
477 * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot
478 * if (r) // currently 0 or EAGAIN
479 * return r;
480 * r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..);
481 * if (r) {
482 * virtio_enqueue_abort(sc, vq, slot);
483 * bus_dmamap_unload(dmat, dmamap_payload[slot]);
484 * return r;
485 * }
486 * r = virtio_enqueue_reserve(sc, vq, slot,
487 * dmamap_payload[slot]->dm_nsegs+1);
488 * // ^ +1 for command
489 * if (r) { // currently 0 or EAGAIN
490 * bus_dmamap_unload(dmat, dmamap_payload[slot]);
491 * return r; // do not call abort()
492 * }
493 * <setup and prepare commands>
494 * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE);
495 * bus_dmamap_sync(dmat, dmamap_payload[slot],...);
496 * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], 0);
497 * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite);
498 * virtio_enqueue_commit(sc, vq, slot, 1);
499 *
500 * Alternative usage with statically allocated slots:
501 * <during initialization>
502 * // while not out of slots, do
503 * virtio_enqueue_prep(sc, vq, &slot); // allocate a slot
504 * virtio_enqueue_reserve(sc, vq, slot, max_segs); // reserve all slots
505 * that may ever be needed
506 *
507 * <when enqueuing a request>
508 * // Don't call virtio_enqueue_prep()
509 * bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..);
510 * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE);
511 * bus_dmamap_sync(dmat, dmamap_payload[slot],...);
512 * virtio_enqueue_trim(sc, vq, slot, num_segs_needed);
513 * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], 0);
514 * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite);
515 * virtio_enqueue_commit(sc, vq, slot, 1);
516 *
517 * <when dequeuing>
518 * // don't call virtio_dequeue_commit()
519 */
520
521/*
522 * enqueue_prep: allocate a slot number
523 */
524int
525virtio_enqueue_prep(struct virtqueue *vq, int *slotp)
526{
527 struct vq_entry *qe1;
528
529 VIRTIO_ASSERT(slotp != NULL);
530
531 qe1 = vq_alloc_entry(vq);
532 if (qe1 == NULL((void *)0))
533 return EAGAIN35;
534 /* next slot is not allocated yet */
535 qe1->qe_next = -1;
536 *slotp = qe1->qe_index;
537
538 return 0;
539}
540
541/*
542 * enqueue_reserve: allocate remaining slots and build the descriptor chain.
543 * Calls virtio_enqueue_abort() on failure.
544 */
545int
546virtio_enqueue_reserve(struct virtqueue *vq, int slot, int nsegs)
547{
548 struct vq_entry *qe1 = &vq->vq_entries[slot];
549
550 VIRTIO_ASSERT(qe1->qe_next == -1);
551 VIRTIO_ASSERT(1 <= nsegs && nsegs <= vq->vq_num);
552
553 if (vq->vq_indirect != NULL((void *)0) && nsegs > 1 && nsegs <= vq->vq_maxnsegs) {
554 struct vring_desc *vd;
555 int i;
556
557 qe1->qe_indirect = 1;
558
559 vd = &vq->vq_desc[qe1->qe_index];
560 vd->addr = vq->vq_dmamap->dm_segs[0].ds_addr +
561 vq->vq_indirectoffset;
562 vd->addr += sizeof(struct vring_desc) * vq->vq_maxnsegs *
563 qe1->qe_index;
564 vd->len = sizeof(struct vring_desc) * nsegs;
565 vd->flags = VRING_DESC_F_INDIRECT4;
566
567 vd = vq->vq_indirect;
568 vd += vq->vq_maxnsegs * qe1->qe_index;
569 qe1->qe_desc_base = vd;
570
571 for (i = 0; i < nsegs-1; i++)
572 vd[i].flags = VRING_DESC_F_NEXT1;
573 vd[i].flags = 0;
574 qe1->qe_next = 0;
575
576 return 0;
577 } else {
578 struct vring_desc *vd;
579 struct vq_entry *qe;
580 int i, s;
581
582 qe1->qe_indirect = 0;
583
584 vd = &vq->vq_desc[0];
585 qe1->qe_desc_base = vd;
586 qe1->qe_next = qe1->qe_index;
587 s = slot;
588 for (i = 0; i < nsegs - 1; i++) {
589 qe = vq_alloc_entry(vq);
590 if (qe == NULL((void *)0)) {
591 vd[s].flags = 0;
592 virtio_enqueue_abort(vq, slot);
593 return EAGAIN35;
594 }
595 vd[s].flags = VRING_DESC_F_NEXT1;
596 vd[s].next = qe->qe_index;
597 s = qe->qe_index;
598 }
599 vd[s].flags = 0;
600
601 return 0;
602 }
603}
604
605/*
606 * enqueue: enqueue a single dmamap.
607 */
608int
609virtio_enqueue(struct virtqueue *vq, int slot, bus_dmamap_t dmamap, int write)
610{
611 struct vq_entry *qe1 = &vq->vq_entries[slot];
612 struct vring_desc *vd = qe1->qe_desc_base;
613 int i;
614 int s = qe1->qe_next;
615
616 VIRTIO_ASSERT(s >= 0);
617 VIRTIO_ASSERT(dmamap->dm_nsegs > 0);
618 if (dmamap->dm_nsegs > vq->vq_maxnsegs) {
619#if VIRTIO_DEBUG0
620 for (i = 0; i < dmamap->dm_nsegs; i++) {
621 printf(" %d (%d): %p %lx \n", i, write,
622 (void *)dmamap->dm_segs[i].ds_addr,
623 dmamap->dm_segs[i].ds_len);
624 }
625#endif
626 panic("dmamap->dm_nseg %d > vq->vq_maxnsegs %d",
627 dmamap->dm_nsegs, vq->vq_maxnsegs);
628 }
629
630 for (i = 0; i < dmamap->dm_nsegs; i++) {
631 vd[s].addr = dmamap->dm_segs[i].ds_addr;
632 vd[s].len = dmamap->dm_segs[i].ds_len;
633 if (!write)
634 vd[s].flags |= VRING_DESC_F_WRITE2;
635 s = vd[s].next;
636 }
637 qe1->qe_next = s;
638
639 return 0;
640}
641
642int
643virtio_enqueue_p(struct virtqueue *vq, int slot, bus_dmamap_t dmamap,
644 bus_addr_t start, bus_size_t len, int write)
645{
646 struct vq_entry *qe1 = &vq->vq_entries[slot];
647 struct vring_desc *vd = qe1->qe_desc_base;
648 int s = qe1->qe_next;
649
650 VIRTIO_ASSERT(s >= 0);
651 /* XXX todo: handle more segments */
652 VIRTIO_ASSERT(dmamap->dm_nsegs == 1);
653 VIRTIO_ASSERT((dmamap->dm_segs[0].ds_len > start) &&
654 (dmamap->dm_segs[0].ds_len >= start + len));
655
656 vd[s].addr = dmamap->dm_segs[0].ds_addr + start;
657 vd[s].len = len;
658 if (!write)
659 vd[s].flags |= VRING_DESC_F_WRITE2;
660 qe1->qe_next = vd[s].next;
661
662 return 0;
663}
664
665static void
666publish_avail_idx(struct virtio_softc *sc, struct virtqueue *vq)
667{
668 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE0x04);
669
670 virtio_membar_producer()do { __asm volatile("" ::: "memory"); } while (0);
671 vq->vq_avail->idx = vq->vq_avail_idx;
672 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE0x08);
673 vq->vq_queued = 1;
674}
675
676/*
677 * enqueue_commit: add it to the aring.
678 */
679void
680virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot,
681 int notifynow)
682{
683 struct vq_entry *qe1;
684
685 if (slot < 0)
686 goto notify;
687 vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE0x04);
688 qe1 = &vq->vq_entries[slot];
689 if (qe1->qe_indirect)
690 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE0x04);
691 vq->vq_avail->ring[(vq->vq_avail_idx++) & vq->vq_mask] = slot;
692
693notify:
694 if (notifynow) {
695 if (virtio_has_feature(vq->vq_owner, VIRTIO_F_RING_EVENT_IDX(1ULL<<29))) {
696 uint16_t o = vq->vq_avail->idx;
697 uint16_t n = vq->vq_avail_idx;
698 uint16_t t;
699 publish_avail_idx(sc, vq);
700
701 virtio_membar_sync()do { __asm volatile("mfence" ::: "memory"); } while (0);
702 t = VQ_AVAIL_EVENT(vq)(*(uint16_t*)(&(vq)->vq_used->ring[(vq)->vq_num]
))
+ 1;
703 if ((uint16_t)(n - t) < (uint16_t)(n - o))
704 sc->sc_ops->kick(sc, vq->vq_index);
705 } else {
706 publish_avail_idx(sc, vq);
707
708 virtio_membar_sync()do { __asm volatile("mfence" ::: "memory"); } while (0);
709 if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY1))
710 sc->sc_ops->kick(sc, vq->vq_index);
711 }
712 }
713}
714
715/*
716 * enqueue_abort: rollback.
717 */
718int
719virtio_enqueue_abort(struct virtqueue *vq, int slot)
720{
721 struct vq_entry *qe = &vq->vq_entries[slot];
722 struct vring_desc *vd;
723 int s;
724
725 if (qe->qe_next < 0) {
726 vq_free_entry(vq, qe);
727 return 0;
728 }
729
730 s = slot;
731 vd = &vq->vq_desc[0];
732 while (vd[s].flags & VRING_DESC_F_NEXT1) {
733 s = vd[s].next;
734 vq_free_entry(vq, qe);
735 qe = &vq->vq_entries[s];
736 }
737 vq_free_entry(vq, qe);
738 return 0;
739}
740
741/*
742 * enqueue_trim: adjust buffer size to given # of segments, a.k.a.
743 * descriptors.
744 */
745void
746virtio_enqueue_trim(struct virtqueue *vq, int slot, int nsegs)
747{
748 struct vq_entry *qe1 = &vq->vq_entries[slot];
749 struct vring_desc *vd = &vq->vq_desc[0];
750 int i;
751
752 if ((vd[slot].flags & VRING_DESC_F_INDIRECT4) == 0) {
753 qe1->qe_next = qe1->qe_index;
754 /*
755 * N.B.: the vq_entries are ASSUMED to be a contiguous
756 * block with slot being the index to the first one.
757 */
758 } else {
759 qe1->qe_next = 0;
760 vd = &vq->vq_desc[qe1->qe_index];
761 vd->len = sizeof(struct vring_desc) * nsegs;
762 vd = qe1->qe_desc_base;
763 slot = 0;
764 }
765
766 for (i = 0; i < nsegs -1 ; i++) {
767 vd[slot].flags = VRING_DESC_F_NEXT1;
768 slot++;
769 }
770 vd[slot].flags = 0;
771}
772
773/*
774 * Dequeue a request.
775 */
776/*
777 * dequeue: dequeue a request from uring; dmamap_sync for uring is
778 * already done in the interrupt handler.
779 */
780int
781virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
782 int *slotp, int *lenp)
783{
784 uint16_t slot, usedidx;
785 struct vq_entry *qe;
786
787 if (vq->vq_used_idx == vq->vq_used->idx)
788 return ENOENT2;
789 usedidx = vq->vq_used_idx++;
790 usedidx &= vq->vq_mask;
791
792 virtio_membar_consumer()do { __asm volatile("" ::: "memory"); } while (0);
793 slot = vq->vq_used->ring[usedidx].id;
794 qe = &vq->vq_entries[slot];
795
796 if (qe->qe_indirect)
797 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE0x08);
798
799 if (slotp)
800 *slotp = slot;
801 if (lenp)
802 *lenp = vq->vq_used->ring[usedidx].len;
803
804 return 0;
805}
806
807/*
808 * dequeue_commit: complete dequeue; the slot is recycled for future use.
809 * if you forget to call this the slot will be leaked.
810 *
811 * Don't call this if you use statically allocated slots
812 * and virtio_dequeue_trim().
813 */
814int
815virtio_dequeue_commit(struct virtqueue *vq, int slot)
816{
817 struct vq_entry *qe = &vq->vq_entries[slot];
818 struct vring_desc *vd = &vq->vq_desc[0];
819 int s = slot;
820
821 while (vd[s].flags & VRING_DESC_F_NEXT1) {
822 s = vd[s].next;
823 vq_free_entry(vq, qe);
824 qe = &vq->vq_entries[s];
825 }
826 vq_free_entry(vq, qe);
827
828 return 0;
829}
830
831/*
832 * Increase the event index in order to delay interrupts.
833 * Returns 0 on success; returns 1 if the used ring has already advanced
834 * too far, and the caller must process the queue again (otherwise, no
835 * more interrupts will happen).
836 */
837int
838virtio_postpone_intr(struct virtqueue *vq, uint16_t nslots)
839{
840 uint16_t idx;
841
842 idx = vq->vq_used_idx + nslots;
843
844 /* set the new event index: avail_ring->used_event = idx */
845 VQ_USED_EVENT(vq)(*(uint16_t*)(&(vq)->vq_avail->ring[(vq)->vq_num
]))
= idx;
846 virtio_membar_sync()do { __asm volatile("mfence" ::: "memory"); } while (0);
847
848 vq_sync_aring(vq->vq_owner, vq, BUS_DMASYNC_PREWRITE0x04);
849 vq->vq_queued++;
850
851 if (nslots < virtio_nused(vq))
852 return 1;
853
854 return 0;
855}
856
857/*
858 * Postpone interrupt until 3/4 of the available descriptors have been
859 * consumed.
860 */
861int
862virtio_postpone_intr_smart(struct virtqueue *vq)
863{
864 uint16_t nslots;
865
866 nslots = (uint16_t)(vq->vq_avail->idx - vq->vq_used_idx) * 3 / 4;
867
868 return virtio_postpone_intr(vq, nslots);
869}
870
871/*
872 * Postpone interrupt until all of the available descriptors have been
873 * consumed.
874 */
875int
876virtio_postpone_intr_far(struct virtqueue *vq)
877{
878 uint16_t nslots;
879
880 nslots = (uint16_t)(vq->vq_avail->idx - vq->vq_used_idx);
881
882 return virtio_postpone_intr(vq, nslots);
883}
884
885
886/*
887 * Start/stop vq interrupt. No guarantee.
888 */
889void
890virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
891{
892 if (virtio_has_feature(sc, VIRTIO_F_RING_EVENT_IDX(1ULL<<29))) {
893 /*
894 * No way to disable the interrupt completely with
895 * RingEventIdx. Instead advance used_event by half
896 * the possible value. This won't happen soon and
897 * is far enough in the past to not trigger a spurious
898 * interrupt.
899 */
900 VQ_USED_EVENT(vq)(*(uint16_t*)(&(vq)->vq_avail->ring[(vq)->vq_num
]))
= vq->vq_used_idx + 0x8000;
901 } else {
902 vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT1;
903 }
904 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE0x04);
905 vq->vq_queued++;
906}
907
908int
909virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
910{
911 /*
912 * If event index feature is negotiated, enabling
913 * interrupts is done through setting the latest
914 * consumed index in the used_event field
915 */
916 if (virtio_has_feature(sc, VIRTIO_F_RING_EVENT_IDX(1ULL<<29)))
917 VQ_USED_EVENT(vq)(*(uint16_t*)(&(vq)->vq_avail->ring[(vq)->vq_num
]))
= vq->vq_used_idx;
918 else
919 vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT1;
920
921 virtio_membar_sync()do { __asm volatile("mfence" ::: "memory"); } while (0);
922
923 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE0x04);
924 vq->vq_queued++;
925
926 if (vq->vq_used_idx != vq->vq_used->idx)
927 return 1;
928
929 return 0;
930}
931
932/*
933 * Returns a number of slots in the used ring available to
934 * be supplied to the avail ring.
935 */
936int
937virtio_nused(struct virtqueue *vq)
938{
939 uint16_t n;
940
941 n = (uint16_t)(vq->vq_used->idx - vq->vq_used_idx);
942 VIRTIO_ASSERT(n <= vq->vq_num);
943
944 return n;
945}
946
947#if VIRTIO_DEBUG0
948void
949virtio_vq_dump(struct virtqueue *vq)
950{
951 /* Common fields */
952 printf(" + vq num: %d\n", vq->vq_num);
953 printf(" + vq mask: 0x%X\n", vq->vq_mask);
954 printf(" + vq index: %d\n", vq->vq_index);
955 printf(" + vq used idx: %d\n", vq->vq_used_idx);
956 printf(" + vq avail idx: %d\n", vq->vq_avail_idx);
957 printf(" + vq queued: %d\n",vq->vq_queued);
958 /* Avail ring fields */
959 printf(" + avail flags: 0x%X\n", vq->vq_avail->flags);
960 printf(" + avail idx: %d\n", vq->vq_avail->idx);
961 printf(" + avail event: %d\n", VQ_AVAIL_EVENT(vq)(*(uint16_t*)(&(vq)->vq_used->ring[(vq)->vq_num]
))
);
962 /* Used ring fields */
963 printf(" + used flags: 0x%X\n",vq->vq_used->flags);
964 printf(" + used idx: %d\n",vq->vq_used->idx);
965 printf(" + used event: %d\n", VQ_USED_EVENT(vq)(*(uint16_t*)(&(vq)->vq_avail->ring[(vq)->vq_num
]))
);
966 printf(" +++++++++++++++++++++++++++\n");
967}
968#endif