Bug Summary

File:dev/pv/xenstore.c
Warning:line 1041, column 34
Access to field 'iov_base' results in a dereference of a null pointer (loaded from variable 'iovp')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name xenstore.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pv/xenstore.c
1/* $OpenBSD: xenstore.c,v 1.46 2022/01/09 05:42:58 jsg Exp $ */
2
3/*
4 * Copyright (c) 2015 Mike Belopuhov
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <sys/param.h>
20#include <sys/systm.h>
21#include <sys/atomic.h>
22#include <sys/kernel.h>
23#include <sys/malloc.h>
24#include <sys/device.h>
25#include <sys/mutex.h>
26#include <sys/rwlock.h>
27#include <sys/ioctl.h>
28#include <sys/task.h>
29
30#include <machine/bus.h>
31
32#include <uvm/uvm_extern.h>
33
34#include <dev/pv/pvvar.h>
35#include <dev/pv/xenreg.h>
36#include <dev/pv/xenvar.h>
37
38/* #define XS_DEBUG */
39
40#ifdef XS_DEBUG
41#define DPRINTF(x...) printf(x)
42#else
43#define DPRINTF(x...)
44#endif
45
46/*
47 * The XenStore interface is a simple storage system that is a means of
48 * communicating state and configuration data between the Xen Domain 0
49 * and the various guest domains. All configuration data other than
50 * a small amount of essential information required during the early
51 * boot process of launching a Xen aware guest, is managed using the
52 * XenStore.
53 *
54 * The XenStore is ASCII string based, and has a structure and semantics
55 * similar to a filesystem. There are files and directories that are
56 * able to contain files or other directories. The depth of the hierarchy
57 * is only limited by the XenStore's maximum path length.
58 *
59 * The communication channel between the XenStore service and other
60 * domains is via two, guest specific, ring buffers in a shared memory
61 * area. One ring buffer is used for communicating in each direction.
62 * The grant table references for this shared memory are given to the
63 * guest via HVM hypercalls.
64 *
65 * The XenStore communication relies on an event channel and thus
66 * interrupts. Several Xen services depend on the XenStore, most
67 * notably the XenBus used to discover and manage Xen devices.
68 */
69
70const struct {
71 const char *xse_errstr;
72 int xse_errnum;
73} xs_errors[] = {
74 { "EINVAL", EINVAL22 },
75 { "EACCES", EACCES13 },
76 { "EEXIST", EEXIST17 },
77 { "EISDIR", EISDIR21 },
78 { "ENOENT", ENOENT2 },
79 { "ENOMEM", ENOMEM12 },
80 { "ENOSPC", ENOSPC28 },
81 { "EIO", EIO5 },
82 { "ENOTEMPTY", ENOTEMPTY66 },
83 { "ENOSYS", ENOSYS78 },
84 { "EROFS", EROFS30 },
85 { "EBUSY", EBUSY16 },
86 { "EAGAIN", EAGAIN35 },
87 { "EISCONN", EISCONN56 },
88 { NULL((void *)0), -1 },
89};
90
91struct xs_msghdr {
92 /* Message type */
93 uint32_t xmh_type;
94 /* Request identifier, echoed in daemon's response. */
95 uint32_t xmh_rid;
96 /* Transaction id (0 if not related to a transaction). */
97 uint32_t xmh_tid;
98 /* Length of data following this. */
99 uint32_t xmh_len;
100 /* Generally followed by nul-terminated string(s). */
101} __packed__attribute__((__packed__));
102
103/*
104 * A minimum output buffer size needed to store an error string.
105 */
106#define XS_ERR_PAYLOAD16 16
107
108/*
109 * Although the Xen source code implies that the limit is 4k,
110 * in practice it turns out that we can only send 2k bytes of
111 * payload before receiving a ENOSPC. We set it to an even
112 * smaller value however, because there's no real need to use
113 * large buffers for anything.
114 */
115#define XS_MAX_PAYLOAD1024 1024
116
117struct xs_msg {
118 struct xs_msghdr xsm_hdr;
119 uint32_t xsm_read;
120 uint32_t xsm_dlen;
121 uint8_t *xsm_data;
122 TAILQ_ENTRY(xs_msg)struct { struct xs_msg *tqe_next; struct xs_msg **tqe_prev; } xsm_link;
123};
124TAILQ_HEAD(xs_msgq, xs_msg)struct xs_msgq { struct xs_msg *tqh_first; struct xs_msg **tqh_last
; }
;
125
126#define XS_RING_SIZE1024 1024
127
128struct xs_ring {
129 uint8_t xsr_req[XS_RING_SIZE1024];
130 uint8_t xsr_rsp[XS_RING_SIZE1024];
131 uint32_t xsr_req_cons;
132 uint32_t xsr_req_prod;
133 uint32_t xsr_rsp_cons;
134 uint32_t xsr_rsp_prod;
135} __packed__attribute__((__packed__));
136
137#define XST_DELAY1 1 /* in seconds */
138
139#define XSW_TOKLEN(sizeof(void *) * 2 + 1) (sizeof(void *) * 2 + 1)
140
141struct xs_watch {
142 TAILQ_ENTRY(xs_watch)struct { struct xs_watch *tqe_next; struct xs_watch **tqe_prev
; }
xsw_entry;
143 uint8_t xsw_token[XSW_TOKLEN(sizeof(void *) * 2 + 1)];
144 struct task *xsw_task;
145};
146
147/*
148 * Container for all XenStore related state.
149 */
150struct xs_softc {
151 struct xen_softc *xs_sc;
152
153 evtchn_port_t xs_port;
154 xen_intr_handle_t xs_ih;
155
156 struct xs_ring *xs_ring;
157
158 struct xs_msg xs_msgs[10];
159 struct xs_msg *xs_rmsg;
160
161 struct xs_msgq xs_free;
162 struct xs_msgq xs_reqs;
163 struct xs_msgq xs_rsps;
164
165 volatile uint xs_rid;
166
167 const char *xs_wchan;
168 const char *xs_rchan;
169
170 struct mutex xs_reqlck; /* request queue mutex */
171 struct mutex xs_rsplck; /* response queue mutex */
172 struct mutex xs_frqlck; /* free queue mutex */
173
174 TAILQ_HEAD(, xs_watch)struct { struct xs_watch *tqh_first; struct xs_watch **tqh_last
; }
xs_watches;
175 struct mutex xs_watchlck;
176 struct xs_msg xs_emsg;
177 struct taskq *xs_watchtq;
178
179 struct rwlock xs_rnglck;
180};
181
182struct xs_msg *
183 xs_get_msg(struct xs_softc *, int);
184void xs_put_msg(struct xs_softc *, struct xs_msg *);
185int xs_ring_get(struct xs_softc *, void *, size_t);
186int xs_ring_put(struct xs_softc *, void *, size_t);
187void xs_intr(void *);
188void xs_poll(struct xs_softc *, int);
189int xs_output(struct xs_transaction *, uint8_t *, int);
190int xs_start(struct xs_transaction *, struct xs_msg *, struct iovec *, int);
191struct xs_msg *
192 xs_reply(struct xs_transaction *, uint);
193int xs_parse(struct xs_transaction *, struct xs_msg *, struct iovec **,
194 int *);
195int xs_event(struct xs_softc *, struct xs_msg *);
196
197int
198xs_attach(struct xen_softc *sc)
199{
200 struct xen_hvm_param xhv;
201 struct xs_softc *xs;
202 paddr_t pa;
203 int i;
204
205 if ((xs = malloc(sizeof(*xs), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008)) == NULL((void *)0)) {
206 printf(": failed to allocate xenstore softc\n");
207 return (-1);
208 }
209 sc->sc_xs = xs;
210 xs->xs_sc = sc;
211
212 /* Fetch event channel port */
213 memset(&xhv, 0, sizeof(xhv))__builtin_memset((&xhv), (0), (sizeof(xhv)));
214 xhv.domid = DOMID_SELF(0x7FF0U);
215 xhv.index = HVM_PARAM_STORE_EVTCHN2;
216 if (xen_hypercall(sc, XC_HVM34, 2, HVMOP_get_param1, &xhv)) {
217 printf(": failed to obtain a xenstore event channel\n");
218 goto fail_1;
219 }
220 xs->xs_port = xhv.value;
221
222 printf(", event channel %u\n", xs->xs_port);
223
224 /* Fetch a frame number (PA) of a shared xenstore page */
225 memset(&xhv, 0, sizeof(xhv))__builtin_memset((&xhv), (0), (sizeof(xhv)));
226 xhv.domid = DOMID_SELF(0x7FF0U);
227 xhv.index = HVM_PARAM_STORE_PFN1;
228 if (xen_hypercall(sc, XC_HVM34, 2, HVMOP_get_param1, &xhv))
229 goto fail_1;
230 pa = ptoa(xhv.value)((paddr_t)(xhv.value) << 12);
231 /* Allocate a page of virtual memory */
232 xs->xs_ring = km_alloc(PAGE_SIZE(1 << 12), &kv_any, &kp_none, &kd_nowait);
233 if (xs->xs_ring == NULL((void *)0))
234 goto fail_1;
235 /* Map in the xenstore page into our KVA */
236 pa |= PMAP_NOCACHE0x1;
237 pmap_kenter_pa((vaddr_t)xs->xs_ring, pa, PROT_READ0x01 | PROT_WRITE0x02);
238 pmap_update(pmap_kernel());
239
240 if (xen_intr_establish(xs->xs_port, &xs->xs_ih, 0, xs_intr, xs,
241 sc->sc_dev.dv_xname))
242 goto fail_2;
243
244 xs->xs_wchan = "xswrite";
245 xs->xs_rchan = "xsread";
246
247 TAILQ_INIT(&xs->xs_free)do { (&xs->xs_free)->tqh_first = ((void *)0); (&
xs->xs_free)->tqh_last = &(&xs->xs_free)->
tqh_first; } while (0)
;
248 TAILQ_INIT(&xs->xs_reqs)do { (&xs->xs_reqs)->tqh_first = ((void *)0); (&
xs->xs_reqs)->tqh_last = &(&xs->xs_reqs)->
tqh_first; } while (0)
;
249 TAILQ_INIT(&xs->xs_rsps)do { (&xs->xs_rsps)->tqh_first = ((void *)0); (&
xs->xs_rsps)->tqh_last = &(&xs->xs_rsps)->
tqh_first; } while (0)
;
250 for (i = 0; i < nitems(xs->xs_msgs)(sizeof((xs->xs_msgs)) / sizeof((xs->xs_msgs)[0])); i++)
251 TAILQ_INSERT_TAIL(&xs->xs_free, &xs->xs_msgs[i], xsm_link)do { (&xs->xs_msgs[i])->xsm_link.tqe_next = ((void *
)0); (&xs->xs_msgs[i])->xsm_link.tqe_prev = (&xs
->xs_free)->tqh_last; *(&xs->xs_free)->tqh_last
= (&xs->xs_msgs[i]); (&xs->xs_free)->tqh_last
= &(&xs->xs_msgs[i])->xsm_link.tqe_next; } while
(0)
;
252
253 mtx_init(&xs->xs_reqlck, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&xs->
xs_reqlck), ((((0x7)) > 0x0 && ((0x7)) < 0x9) ?
0x9 : ((0x7)))); } while (0)
;
254 mtx_init(&xs->xs_rsplck, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&xs->
xs_rsplck), ((((0x7)) > 0x0 && ((0x7)) < 0x9) ?
0x9 : ((0x7)))); } while (0)
;
255 mtx_init(&xs->xs_frqlck, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&xs->
xs_frqlck), ((((0x7)) > 0x0 && ((0x7)) < 0x9) ?
0x9 : ((0x7)))); } while (0)
;
256
257 rw_init(&xs->xs_rnglck, "xsrnglck")_rw_init_flags(&xs->xs_rnglck, "xsrnglck", 0, ((void *
)0))
;
258
259 xs->xs_watchtq = taskq_create("xenwatch", 1, IPL_NET0x7, 0);
260
261 mtx_init(&xs->xs_watchlck, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&xs->
xs_watchlck), ((((0x7)) > 0x0 && ((0x7)) < 0x9)
? 0x9 : ((0x7)))); } while (0)
;
262 TAILQ_INIT(&xs->xs_watches)do { (&xs->xs_watches)->tqh_first = ((void *)0); (&
xs->xs_watches)->tqh_last = &(&xs->xs_watches
)->tqh_first; } while (0)
;
263
264 xs->xs_emsg.xsm_data = malloc(XS_MAX_PAYLOAD1024, M_DEVBUF2,
265 M_ZERO0x0008 | M_NOWAIT0x0002);
266 if (xs->xs_emsg.xsm_data == NULL((void *)0))
267 goto fail_2;
268 xs->xs_emsg.xsm_dlen = XS_MAX_PAYLOAD1024;
269
270 return (0);
271
272 fail_2:
273 pmap_kremove((vaddr_t)xs->xs_ring, PAGE_SIZE(1 << 12));
274 pmap_update(pmap_kernel());
275 km_free(xs->xs_ring, PAGE_SIZE(1 << 12), &kv_any, &kp_none);
276 xs->xs_ring = NULL((void *)0);
277 fail_1:
278 free(xs, sizeof(*xs), M_DEVBUF2);
279 sc->sc_xs = NULL((void *)0);
280 return (-1);
281}
282
283struct xs_msg *
284xs_get_msg(struct xs_softc *xs, int waitok)
285{
286 static const char *chan = "xsalloc";
287 struct xs_msg *xsm;
288
289 mtx_enter(&xs->xs_frqlck);
290 for (;;) {
291 xsm = TAILQ_FIRST(&xs->xs_free)((&xs->xs_free)->tqh_first);
292 if (xsm != NULL((void *)0)) {
293 TAILQ_REMOVE(&xs->xs_free, xsm, xsm_link)do { if (((xsm)->xsm_link.tqe_next) != ((void *)0)) (xsm)->
xsm_link.tqe_next->xsm_link.tqe_prev = (xsm)->xsm_link.
tqe_prev; else (&xs->xs_free)->tqh_last = (xsm)->
xsm_link.tqe_prev; *(xsm)->xsm_link.tqe_prev = (xsm)->xsm_link
.tqe_next; ((xsm)->xsm_link.tqe_prev) = ((void *)-1); ((xsm
)->xsm_link.tqe_next) = ((void *)-1); } while (0)
;
294 break;
295 }
296 if (!waitok) {
297 mtx_leave(&xs->xs_frqlck);
298 delay(XST_DELAY * 1000 >> 2)(*delay_func)(1 * 1000 >> 2);
299 mtx_enter(&xs->xs_frqlck);
300 } else
301 msleep_nsec(chan, &xs->xs_frqlck, PRIBIO16, chan,
302 SEC_TO_NSEC(XST_DELAY1) >> 2);
303 }
304 mtx_leave(&xs->xs_frqlck);
305 return (xsm);
306}
307
308void
309xs_put_msg(struct xs_softc *xs, struct xs_msg *xsm)
310{
311 memset(xsm, 0, sizeof(*xsm))__builtin_memset((xsm), (0), (sizeof(*xsm)));
312 mtx_enter(&xs->xs_frqlck);
313 TAILQ_INSERT_TAIL(&xs->xs_free, xsm, xsm_link)do { (xsm)->xsm_link.tqe_next = ((void *)0); (xsm)->xsm_link
.tqe_prev = (&xs->xs_free)->tqh_last; *(&xs->
xs_free)->tqh_last = (xsm); (&xs->xs_free)->tqh_last
= &(xsm)->xsm_link.tqe_next; } while (0)
;
314 mtx_leave(&xs->xs_frqlck);
315}
316
317int
318xs_geterror(struct xs_msg *xsm)
319{
320 int i;
321
322 for (i = 0; i < nitems(xs_errors)(sizeof((xs_errors)) / sizeof((xs_errors)[0])); i++)
323 if (strcmp(xs_errors[i].xse_errstr, xsm->xsm_data) == 0)
324 return (xs_errors[i].xse_errnum);
325 return (EOPNOTSUPP45);
326}
327
328static inline uint32_t
329xs_ring_avail(struct xs_ring *xsr, int req)
330{
331 uint32_t cons = req ? xsr->xsr_req_cons : xsr->xsr_rsp_cons;
332 uint32_t prod = req ? xsr->xsr_req_prod : xsr->xsr_rsp_prod;
333
334 KASSERT(prod - cons <= XS_RING_SIZE)((prod - cons <= 1024) ? (void)0 : __assert("diagnostic ",
"/usr/src/sys/dev/pv/xenstore.c", 334, "prod - cons <= XS_RING_SIZE"
))
;
335 return (req ? XS_RING_SIZE1024 - (prod - cons) : prod - cons);
336}
337
338void
339xs_poll(struct xs_softc *xs, int nosleep)
340{
341 int s;
342
343 if (nosleep) {
344 delay(XST_DELAY * 1000 >> 2)(*delay_func)(1 * 1000 >> 2);
345 s = splnet()splraise(0x7);
346 xs_intr(xs);
347 splx(s)spllower(s);
348 } else {
349 tsleep_nsec(xs->xs_wchan, PRIBIO16, xs->xs_wchan,
350 SEC_TO_NSEC(XST_DELAY1) >> 2);
351 }
352}
353
354int
355xs_output(struct xs_transaction *xst, uint8_t *bp, int len)
356{
357 struct xs_softc *xs = xst->xst_cookie;
358 int chunk;
359
360 while (len > 0) {
361 chunk = xs_ring_put(xs, bp, MIN(len, XS_RING_SIZE)(((len)<(1024))?(len):(1024)));
362 if (chunk < 0)
363 return (-1);
364 if (chunk > 0) {
365 len -= chunk;
366 bp += chunk;
367 if (xs_ring_avail(xs->xs_ring, 1) > 0)
368 continue;
369 }
370 /* Squeaky wheel gets the kick */
371 xen_intr_signal(xs->xs_ih);
372 /*
373 * chunk == 0: we need to wait for hv to consume
374 * what has already been written;
375 *
376 * Alternatively we have managed to fill the ring
377 * and must wait for HV to collect the data.
378 */
379 while (xs->xs_ring->xsr_req_prod != xs->xs_ring->xsr_req_cons)
380 xs_poll(xs, 1);
381 }
382 return (0);
383}
384
385int
386xs_start(struct xs_transaction *xst, struct xs_msg *xsm, struct iovec *iov,
387 int iov_cnt)
388{
389 struct xs_softc *xs = xst->xst_cookie;
390 int i;
391
392 rw_enter_write(&xs->xs_rnglck);
393
394 /* Header */
395 if (xs_output(xst, (uint8_t *)&xsm->xsm_hdr,
396 sizeof(xsm->xsm_hdr)) == -1) {
397 printf("%s: failed to write the header\n", __func__);
398 rw_exit_write(&xs->xs_rnglck);
399 return (-1);
400 }
401
402 /* Data loop */
403 for (i = 0; i < iov_cnt; i++) {
404 if (xs_output(xst, iov[i].iov_base, iov[i].iov_len) == -1) {
405 printf("%s: failed on iovec #%d len %lu\n", __func__,
406 i, iov[i].iov_len);
407 rw_exit_write(&xs->xs_rnglck);
408 return (-1);
409 }
410 }
411
412 mtx_enter(&xs->xs_reqlck);
413 TAILQ_INSERT_TAIL(&xs->xs_reqs, xsm, xsm_link)do { (xsm)->xsm_link.tqe_next = ((void *)0); (xsm)->xsm_link
.tqe_prev = (&xs->xs_reqs)->tqh_last; *(&xs->
xs_reqs)->tqh_last = (xsm); (&xs->xs_reqs)->tqh_last
= &(xsm)->xsm_link.tqe_next; } while (0)
;
414 mtx_leave(&xs->xs_reqlck);
415
416 xen_intr_signal(xs->xs_ih);
417
418 rw_exit_write(&xs->xs_rnglck);
419
420 return (0);
421}
422
423struct xs_msg *
424xs_reply(struct xs_transaction *xst, uint rid)
425{
426 struct xs_softc *xs = xst->xst_cookie;
427 struct xs_msg *xsm;
428 int s;
429
430 mtx_enter(&xs->xs_rsplck);
431 for (;;) {
432 TAILQ_FOREACH(xsm, &xs->xs_rsps, xsm_link)for((xsm) = ((&xs->xs_rsps)->tqh_first); (xsm) != (
(void *)0); (xsm) = ((xsm)->xsm_link.tqe_next))
{
433 if (xsm->xsm_hdr.xmh_tid == xst->xst_id &&
434 xsm->xsm_hdr.xmh_rid == rid)
435 break;
436 }
437 if (xsm != NULL((void *)0)) {
438 TAILQ_REMOVE(&xs->xs_rsps, xsm, xsm_link)do { if (((xsm)->xsm_link.tqe_next) != ((void *)0)) (xsm)->
xsm_link.tqe_next->xsm_link.tqe_prev = (xsm)->xsm_link.
tqe_prev; else (&xs->xs_rsps)->tqh_last = (xsm)->
xsm_link.tqe_prev; *(xsm)->xsm_link.tqe_prev = (xsm)->xsm_link
.tqe_next; ((xsm)->xsm_link.tqe_prev) = ((void *)-1); ((xsm
)->xsm_link.tqe_next) = ((void *)-1); } while (0)
;
439 break;
440 }
441 if (cold) {
442 mtx_leave(&xs->xs_rsplck);
443 delay(XST_DELAY * 1000 >> 2)(*delay_func)(1 * 1000 >> 2);
444 s = splnet()splraise(0x7);
445 xs_intr(xs);
446 splx(s)spllower(s);
447 mtx_enter(&xs->xs_rsplck);
448 } else
449 msleep_nsec(xs->xs_rchan, &xs->xs_rsplck, PRIBIO16,
450 xs->xs_rchan, SEC_TO_NSEC(XST_DELAY1) >> 2);
451 }
452 mtx_leave(&xs->xs_rsplck);
453 return (xsm);
454}
455
456int
457xs_ring_put(struct xs_softc *xs, void *src, size_t size)
458{
459 struct xs_ring *xsr = xs->xs_ring;
460 uint32_t prod = xsr->xsr_req_prod & (XS_RING_SIZE1024 - 1);
461 uint32_t avail = xs_ring_avail(xsr, 1);
462 size_t left;
463
464 if (size > XS_RING_SIZE1024)
465 return (-1);
466 if (avail == 0)
467 return (0);
468
469 /* Bound the size by the number of available slots */
470 size = MIN(size, avail)(((size)<(avail))?(size):(avail));
471 /* How many contiguous bytes can we memcpy... */
472 left = XS_RING_SIZE1024 - prod;
473 /* ...bounded by by how much we need to write? */
474 left = MIN(left, size)(((left)<(size))?(left):(size));
475
476 memcpy(&xsr->xsr_req[prod], src, left)__builtin_memcpy((&xsr->xsr_req[prod]), (src), (left));
477 memcpy(&xsr->xsr_req[0], (caddr_t)src + left, size - left)__builtin_memcpy((&xsr->xsr_req[0]), ((caddr_t)src + left
), (size - left))
;
478 virtio_membar_sync()do { __asm volatile("mfence" ::: "memory"); } while (0);
479 xsr->xsr_req_prod += size;
480 return (size);
481}
482
483int
484xs_ring_get(struct xs_softc *xs, void *dst, size_t size)
485{
486 struct xs_ring *xsr = xs->xs_ring;
487 uint32_t cons = xsr->xsr_rsp_cons & (XS_RING_SIZE1024 - 1);
488 uint32_t avail = xs_ring_avail(xsr, 0);
489 size_t left;
490
491 if (size > XS_RING_SIZE1024)
492 return (-1);
493 if (avail == 0)
494 return (0);
495
496 /* Bound the size by the number of available slots */
497 size = MIN(size, avail)(((size)<(avail))?(size):(avail));
498 /* How many contiguous bytes can we memcpy... */
499 left = XS_RING_SIZE1024 - cons;
500 /* ...bounded by by how much we need to read? */
501 left = MIN(left, size)(((left)<(size))?(left):(size));
502
503 memcpy(dst, &xsr->xsr_rsp[cons], left)__builtin_memcpy((dst), (&xsr->xsr_rsp[cons]), (left));
504 memcpy((caddr_t)dst + left, &xsr->xsr_rsp[0], size - left)__builtin_memcpy(((caddr_t)dst + left), (&xsr->xsr_rsp
[0]), (size - left))
;
505 virtio_membar_sync()do { __asm volatile("mfence" ::: "memory"); } while (0);
506 xsr->xsr_rsp_cons += size;
507 return (size);
508}
509
510void
511xs_intr(void *arg)
512{
513 struct xs_softc *xs = arg;
514 struct xs_ring *xsr = xs->xs_ring;
515 struct xen_softc *sc = xs->xs_sc;
516 struct xs_msg *xsm = xs->xs_rmsg;
517 struct xs_msghdr xmh;
518 uint32_t avail;
519 int len;
520
521 virtio_membar_sync()do { __asm volatile("mfence" ::: "memory"); } while (0);
522
523 if (xsr->xsr_rsp_cons == xsr->xsr_rsp_prod)
524 return;
525
526 avail = xs_ring_avail(xsr, 0);
527
528 /* Response processing */
529
530 again:
531 if (xs->xs_rmsg == NULL((void *)0)) {
532 if (avail < sizeof(xmh)) {
533 DPRINTF("%s: incomplete header: %u\n",
534 sc->sc_dev.dv_xname, avail);
535 goto out;
536 }
537 avail -= sizeof(xmh);
538
539 if ((len = xs_ring_get(xs, &xmh, sizeof(xmh))) != sizeof(xmh)) {
540 printf("%s: message too short: %d\n",
541 sc->sc_dev.dv_xname, len);
542 goto out;
543 }
544
545 if (xmh.xmh_type == XS_EVENT0x0f) {
546 xsm = &xs->xs_emsg;
547 xsm->xsm_read = 0;
548 } else {
549 mtx_enter(&xs->xs_reqlck);
550 TAILQ_FOREACH(xsm, &xs->xs_reqs, xsm_link)for((xsm) = ((&xs->xs_reqs)->tqh_first); (xsm) != (
(void *)0); (xsm) = ((xsm)->xsm_link.tqe_next))
{
551 if (xsm->xsm_hdr.xmh_rid == xmh.xmh_rid) {
552 TAILQ_REMOVE(&xs->xs_reqs, xsm,do { if (((xsm)->xsm_link.tqe_next) != ((void *)0)) (xsm)->
xsm_link.tqe_next->xsm_link.tqe_prev = (xsm)->xsm_link.
tqe_prev; else (&xs->xs_reqs)->tqh_last = (xsm)->
xsm_link.tqe_prev; *(xsm)->xsm_link.tqe_prev = (xsm)->xsm_link
.tqe_next; ((xsm)->xsm_link.tqe_prev) = ((void *)-1); ((xsm
)->xsm_link.tqe_next) = ((void *)-1); } while (0)
553 xsm_link)do { if (((xsm)->xsm_link.tqe_next) != ((void *)0)) (xsm)->
xsm_link.tqe_next->xsm_link.tqe_prev = (xsm)->xsm_link.
tqe_prev; else (&xs->xs_reqs)->tqh_last = (xsm)->
xsm_link.tqe_prev; *(xsm)->xsm_link.tqe_prev = (xsm)->xsm_link
.tqe_next; ((xsm)->xsm_link.tqe_prev) = ((void *)-1); ((xsm
)->xsm_link.tqe_next) = ((void *)-1); } while (0)
;
554 break;
555 }
556 }
557 mtx_leave(&xs->xs_reqlck);
558 if (xsm == NULL((void *)0)) {
559 printf("%s: unexpected message id %u\n",
560 sc->sc_dev.dv_xname, xmh.xmh_rid);
561 goto out;
562 }
563 }
564 memcpy(&xsm->xsm_hdr, &xmh, sizeof(xmh))__builtin_memcpy((&xsm->xsm_hdr), (&xmh), (sizeof(
xmh)))
;
565 xs->xs_rmsg = xsm;
566 }
567
568 if (xsm->xsm_hdr.xmh_len > xsm->xsm_dlen)
569 panic("message too large: %d vs %d for type %d, rid %u",
570 xsm->xsm_hdr.xmh_len, xsm->xsm_dlen, xsm->xsm_hdr.xmh_type,
571 xsm->xsm_hdr.xmh_rid);
572
573 len = MIN(xsm->xsm_hdr.xmh_len - xsm->xsm_read, avail)(((xsm->xsm_hdr.xmh_len - xsm->xsm_read)<(avail))?(xsm
->xsm_hdr.xmh_len - xsm->xsm_read):(avail))
;
574 if (len) {
575 /* Get data if reply is not empty */
576 if ((len = xs_ring_get(xs,
577 &xsm->xsm_data[xsm->xsm_read], len)) <= 0) {
578 printf("%s: read failure %d\n", sc->sc_dev.dv_xname,
579 len);
580 goto out;
581 }
582 xsm->xsm_read += len;
583 }
584
585 /* Notify reader that we've managed to read the whole message */
586 if (xsm->xsm_read == xsm->xsm_hdr.xmh_len) {
587 xs->xs_rmsg = NULL((void *)0);
588 if (xsm->xsm_hdr.xmh_type == XS_EVENT0x0f) {
589 xs_event(xs, xsm);
590 } else {
591 mtx_enter(&xs->xs_rsplck);
592 TAILQ_INSERT_TAIL(&xs->xs_rsps, xsm, xsm_link)do { (xsm)->xsm_link.tqe_next = ((void *)0); (xsm)->xsm_link
.tqe_prev = (&xs->xs_rsps)->tqh_last; *(&xs->
xs_rsps)->tqh_last = (xsm); (&xs->xs_rsps)->tqh_last
= &(xsm)->xsm_link.tqe_next; } while (0)
;
593 mtx_leave(&xs->xs_rsplck);
594 wakeup(xs->xs_rchan);
595 }
596 }
597
598 if ((avail = xs_ring_avail(xsr, 0)) > 0)
599 goto again;
600
601 out:
602 /* Wakeup sleeping writes (if any) */
603 wakeup(xs->xs_wchan);
604 xen_intr_signal(xs->xs_ih);
605}
606
607static inline int
608xs_get_buf(struct xs_transaction *xst, struct xs_msg *xsm, int len)
609{
610 unsigned char *buf;
611
612 buf = malloc(len, M_DEVBUF2, M_ZERO0x0008 | (cold ? M_NOWAIT0x0002 : M_WAITOK0x0001));
613 if (buf == NULL((void *)0))
614 return (-1);
615 xsm->xsm_dlen = len;
616 xsm->xsm_data = buf;
617 return (0);
618}
619
620static inline void
621xs_put_buf(struct xs_transaction *xst, struct xs_msg *xsm)
622{
623 free(xsm->xsm_data, M_DEVBUF2, xsm->xsm_dlen);
624 xsm->xsm_data = NULL((void *)0);
625}
626
627void
628xs_resfree(struct xs_transaction *xst, struct iovec *iov, int iov_cnt)
629{
630 int i;
631
632 for (i = 0; i < iov_cnt; i++)
633 free(iov[i].iov_base, M_DEVBUF2, iov[i].iov_len);
634 free(iov, M_DEVBUF2, sizeof(struct iovec) * iov_cnt);
635}
636
637int
638xs_parse(struct xs_transaction *xst, struct xs_msg *xsm, struct iovec **iov,
639 int *iov_cnt)
640{
641 char *bp, *cp;
642 uint32_t dlen;
643 int i, flags;
644
645 /* If the response size is zero, we return an empty string */
646 dlen = MAX(xsm->xsm_hdr.xmh_len, 1)(((xsm->xsm_hdr.xmh_len)>(1))?(xsm->xsm_hdr.xmh_len)
:(1))
;
647 flags = M_ZERO0x0008 | (cold ? M_NOWAIT0x0002 : M_WAITOK0x0001);
648
649 *iov_cnt = 0;
650 /* Make sure that the data is NUL terminated */
651 if (xsm->xsm_data[dlen - 1] != '\0') {
652 /*
653 * The XS_READ operation always returns length without
654 * the trailing NUL so we have to adjust the length.
655 */
656 dlen = MIN(dlen + 1, xsm->xsm_dlen)(((dlen + 1)<(xsm->xsm_dlen))?(dlen + 1):(xsm->xsm_dlen
))
;
657 xsm->xsm_data[dlen - 1] = '\0';
658 }
659 for (i = 0; i < dlen; i++)
660 if (xsm->xsm_data[i] == '\0')
661 (*iov_cnt)++;
662 *iov = mallocarray(*iov_cnt, sizeof(struct iovec), M_DEVBUF2, flags);
663 if (*iov == NULL((void *)0))
664 goto cleanup;
665 bp = xsm->xsm_data;
666 for (i = 0; i < *iov_cnt; i++) {
667 cp = bp;
668 while (cp - (caddr_t)xsm->xsm_data < dlen && *cp != '\0')
669 cp++;
670 (*iov)[i].iov_len = cp - bp + 1;
671 (*iov)[i].iov_base = malloc((*iov)[i].iov_len, M_DEVBUF2, flags);
672 if (!(*iov)[i].iov_base) {
673 xs_resfree(xst, *iov, *iov_cnt);
674 goto cleanup;
675 }
676 memcpy((*iov)[i].iov_base, bp, (*iov)[i].iov_len)__builtin_memcpy(((*iov)[i].iov_base), (bp), ((*iov)[i].iov_len
))
;
677 bp = ++cp;
678 }
679 return (0);
680
681 cleanup:
682 *iov = NULL((void *)0);
683 *iov_cnt = 0;
684 return (ENOMEM12);
685}
686
687int
688xs_event(struct xs_softc *xs, struct xs_msg *xsm)
689{
690 struct xs_watch *xsw;
691 char *token = NULL((void *)0);
692 int i;
693
694 for (i = 0; i < xsm->xsm_read; i++) {
695 if (xsm->xsm_data[i] == '\0') {
696 token = &xsm->xsm_data[i+1];
697 break;
698 }
699 }
700 if (token == NULL((void *)0)) {
701 printf("%s: event on \"%s\" without token\n",
702 xs->xs_sc->sc_dev.dv_xname, xsm->xsm_data);
703 return (-1);
704 }
705
706 mtx_enter(&xs->xs_watchlck);
707 TAILQ_FOREACH(xsw, &xs->xs_watches, xsw_entry)for((xsw) = ((&xs->xs_watches)->tqh_first); (xsw) !=
((void *)0); (xsw) = ((xsw)->xsw_entry.tqe_next))
{
708 if (strcmp(xsw->xsw_token, token))
709 continue;
710 mtx_leave(&xs->xs_watchlck);
711 task_add(xs->xs_watchtq, xsw->xsw_task);
712 return (0);
713 }
714 mtx_leave(&xs->xs_watchlck);
715
716 printf("%s: no watchers for node \"%s\"\n",
717 xs->xs_sc->sc_dev.dv_xname, xsm->xsm_data);
718 return (-1);
719}
720
721int
722xs_cmd(struct xs_transaction *xst, int cmd, const char *path,
723 struct iovec **iov, int *iov_cnt)
724{
725 struct xs_softc *xs = xst->xst_cookie;
726 struct xs_msg *xsm;
727 struct iovec ov[10]; /* output vector */
728 int datalen = XS_ERR_PAYLOAD16;
729 int ov_cnt = 0;
730 enum { READ, WRITE } mode = READ;
731 int i, error = 0;
732
733 if (cmd
10.1
'cmd' is < XS_MAX
>= XS_MAX0x16)
11
Taking false branch
734 return (EINVAL22);
735
736 switch (cmd) {
12
Control jumps to the 'default' case at line 748
737 case XS_TOPEN0x06:
738 ov[0].iov_base = "";
739 ov[0].iov_len = 1;
740 ov_cnt++;
741 break;
742 case XS_TCLOSE0x07:
743 case XS_RM0x0d:
744 case XS_WATCH0x04:
745 case XS_WRITE0x0b:
746 mode = WRITE;
747 /* FALLTHROUGH */
748 default:
749 if (mode
12.1
'mode' is equal to READ
== READ)
13
Taking true branch
750 datalen = XS_MAX_PAYLOAD1024;
751 break;
14
Execution continues on line 754
752 }
753
754 if (path
14.1
'path' is non-null
) {
15
Taking true branch
755 ov[ov_cnt].iov_base = (void *)path;
756 ov[ov_cnt++].iov_len = strlen(path) + 1; /* +NUL */
757 }
758
759 if (mode
15.1
'mode' is not equal to WRITE
== WRITE && iov && iov_cnt && *iov_cnt > 0) {
760 for (i = 0; i < *iov_cnt && ov_cnt < nitems(ov)(sizeof((ov)) / sizeof((ov)[0]));
761 i++, ov_cnt++) {
762 ov[ov_cnt].iov_base = (*iov)[i].iov_base;
763 ov[ov_cnt].iov_len = (*iov)[i].iov_len;
764 }
765 }
766
767 xsm = xs_get_msg(xs, !cold);
16
Assuming 'cold' is not equal to 0
768
769 if (xs_get_buf(xst, xsm, datalen)) {
17
Taking false branch
770 xs_put_msg(xs, xsm);
771 return (ENOMEM12);
772 }
773
774 xsm->xsm_hdr.xmh_tid = xst->xst_id;
775 xsm->xsm_hdr.xmh_type = cmd;
776 xsm->xsm_hdr.xmh_rid = atomic_inc_int_nv(&xs->xs_rid)_atomic_add_int_nv((&xs->xs_rid), 1);
777
778 for (i = 0; i < ov_cnt; i++)
18
Loop condition is true. Entering loop body
19
Loop condition is false. Execution continues on line 781
779 xsm->xsm_hdr.xmh_len += ov[i].iov_len;
780
781 if (xsm->xsm_hdr.xmh_len > XS_MAX_PAYLOAD1024) {
20
Assuming field 'xmh_len' is <= XS_MAX_PAYLOAD
21
Taking false branch
782 printf("%s: message type %d with payload above the limit\n",
783 xs->xs_sc->sc_dev.dv_xname, cmd);
784 xs_put_buf(xst, xsm);
785 xs_put_msg(xs, xsm);
786 return (EIO5);
787 }
788
789 if (xs_start(xst, xsm, ov, ov_cnt)) {
22
Assuming the condition is false
23
Taking false branch
790 printf("%s: message type %d transmission failed\n",
791 xs->xs_sc->sc_dev.dv_xname, cmd);
792 xs_put_buf(xst, xsm);
793 xs_put_msg(xs, xsm);
794 return (EIO5);
795 }
796
797 xsm = xs_reply(xst, xsm->xsm_hdr.xmh_rid);
798
799 if (xsm->xsm_hdr.xmh_type == XS_ERROR0x10) {
24
Assuming field 'xmh_type' is equal to XS_ERROR
25
Taking true branch
800 error = xs_geterror(xsm);
801 DPRINTF("%s: xenstore request %d \"%s\" error %s\n",
802 xs->xs_sc->sc_dev.dv_xname, cmd, path, xsm->xsm_data);
803 } else if (mode == READ) {
804 KASSERT(iov && iov_cnt)((iov && iov_cnt) ? (void)0 : __assert("diagnostic ",
"/usr/src/sys/dev/pv/xenstore.c", 804, "iov && iov_cnt"
))
;
805 error = xs_parse(xst, xsm, iov, iov_cnt);
806 }
807#ifdef XS_DEBUG
808 else
809 if (strcmp(xsm->xsm_data, "OK"))
810 printf("%s: xenstore request %d failed: %s\n",
811 xs->xs_sc->sc_dev.dv_xname, cmd, xsm->xsm_data);
812#endif
813
814 xs_put_buf(xst, xsm);
815 xs_put_msg(xs, xsm);
816
817 return (error);
26
Returning without writing to '*iov'
818}
819
820int
821xs_watch(void *xsc, const char *path, const char *property, struct task *task,
822 void (*cb)(void *), void *arg)
823{
824 struct xen_softc *sc = xsc;
825 struct xs_softc *xs = sc->sc_xs;
826 struct xs_transaction xst;
827 struct xs_watch *xsw;
828 struct iovec iov, *iovp = &iov;
829 char key[256];
830 int error, iov_cnt, ret;
831
832 memset(&xst, 0, sizeof(xst))__builtin_memset((&xst), (0), (sizeof(xst)));
833 xst.xst_id = 0;
834 xst.xst_cookie = sc->sc_xs;
835
836 xsw = malloc(sizeof(*xsw), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
837 if (xsw == NULL((void *)0))
838 return (-1);
839
840 task_set(task, cb, arg);
841 xsw->xsw_task = task;
842
843 snprintf(xsw->xsw_token, sizeof(xsw->xsw_token), "%0lx",
844 (unsigned long)xsw);
845
846 if (path)
847 ret = snprintf(key, sizeof(key), "%s/%s", path, property);
848 else
849 ret = snprintf(key, sizeof(key), "%s", property);
850 if (ret == -1 || ret >= sizeof(key)) {
851 free(xsw, M_DEVBUF2, sizeof(*xsw));
852 return (EINVAL22);
853 }
854
855 iov.iov_base = xsw->xsw_token;
856 iov.iov_len = sizeof(xsw->xsw_token);
857 iov_cnt = 1;
858
859 /*
860 * xs_watches must be prepared pre-emptively because a xenstore
861 * event is raised immediately after a watch is established.
862 */
863 mtx_enter(&xs->xs_watchlck);
864 TAILQ_INSERT_TAIL(&xs->xs_watches, xsw, xsw_entry)do { (xsw)->xsw_entry.tqe_next = ((void *)0); (xsw)->xsw_entry
.tqe_prev = (&xs->xs_watches)->tqh_last; *(&xs->
xs_watches)->tqh_last = (xsw); (&xs->xs_watches)->
tqh_last = &(xsw)->xsw_entry.tqe_next; } while (0)
;
865 mtx_leave(&xs->xs_watchlck);
866
867 if ((error = xs_cmd(&xst, XS_WATCH0x04, key, &iovp, &iov_cnt)) != 0) {
868 mtx_enter(&xs->xs_watchlck);
869 TAILQ_REMOVE(&xs->xs_watches, xsw, xsw_entry)do { if (((xsw)->xsw_entry.tqe_next) != ((void *)0)) (xsw)
->xsw_entry.tqe_next->xsw_entry.tqe_prev = (xsw)->xsw_entry
.tqe_prev; else (&xs->xs_watches)->tqh_last = (xsw)
->xsw_entry.tqe_prev; *(xsw)->xsw_entry.tqe_prev = (xsw
)->xsw_entry.tqe_next; ((xsw)->xsw_entry.tqe_prev) = ((
void *)-1); ((xsw)->xsw_entry.tqe_next) = ((void *)-1); } while
(0)
;
870 mtx_leave(&xs->xs_watchlck);
871 free(xsw, M_DEVBUF2, sizeof(*xsw));
872 return (error);
873 }
874
875 return (0);
876}
877
878static unsigned long long
879atoull(const char *cp, int *error)
880{
881 unsigned long long res, cutoff;
882 int ch;
883 int cutlim;
884
885 res = 0;
886 cutoff = ULLONG_MAX0xffffffffffffffffULL / (unsigned long long)10;
887 cutlim = ULLONG_MAX0xffffffffffffffffULL % (unsigned long long)10;
888
889 do {
890 if (*cp < '0' || *cp > '9') {
891 *error = EINVAL22;
892 return (res);
893 }
894 ch = *cp - '0';
895 if (res > cutoff || (res == cutoff && ch > cutlim)) {
896 *error = ERANGE34;
897 return (res);
898 }
899 res *= 10;
900 res += ch;
901 } while (*(++cp) != '\0');
902
903 *error = 0;
904 return (res);
905}
906
907int
908xs_getnum(void *xsc, const char *path, const char *property,
909 unsigned long long *val)
910{
911 char *buf;
912 int error = 0;
913
914 buf = malloc(XS_MAX_PAYLOAD1024, M_DEVBUF2, M_ZERO0x0008 |
915 (cold ? M_NOWAIT0x0002 : M_WAITOK0x0001));
916 if (buf == NULL((void *)0))
917 return (ENOMEM12);
918
919 error = xs_getprop(xsc, path, property, buf, XS_MAX_PAYLOAD1024);
920 if (error)
921 goto out;
922
923 *val = atoull(buf, &error);
924 if (error)
925 goto out;
926
927 out:
928 free(buf, M_DEVBUF2, XS_MAX_PAYLOAD1024);
929 return (error);
930}
931
932int
933xs_setnum(void *xsc, const char *path, const char *property,
934 unsigned long long val)
935{
936 char buf[32];
937 int ret;
938
939 ret = snprintf(buf, sizeof(buf), "%llu", val);
940 if (ret == -1 || ret >= sizeof(buf))
941 return (ERANGE34);
942
943 return (xs_setprop(xsc, path, property, buf, strlen(buf)));
944}
945
946int
947xs_getprop(void *xsc, const char *path, const char *property, char *value,
948 int size)
949{
950 struct xen_softc *sc = xsc;
951 struct xs_transaction xst;
952 struct iovec *iovp = NULL((void *)0);
953 char key[256];
954 int error, ret, iov_cnt = 0;
955
956 if (!property)
957 return (EINVAL22);
958
959 memset(&xst, 0, sizeof(xst))__builtin_memset((&xst), (0), (sizeof(xst)));
960 xst.xst_id = 0;
961 xst.xst_cookie = sc->sc_xs;
962
963 if (path)
964 ret = snprintf(key, sizeof(key), "%s/%s", path, property);
965 else
966 ret = snprintf(key, sizeof(key), "%s", property);
967 if (ret == -1 || ret >= sizeof(key))
968 return (EINVAL22);
969
970 if ((error = xs_cmd(&xst, XS_READ0x02, key, &iovp, &iov_cnt)) != 0)
971 return (error);
972
973 if (iov_cnt > 0)
974 strlcpy(value, (char *)iovp->iov_base, size);
975
976 xs_resfree(&xst, iovp, iov_cnt);
977
978 return (0);
979}
980
981int
982xs_setprop(void *xsc, const char *path, const char *property, char *value,
983 int size)
984{
985 struct xen_softc *sc = xsc;
986 struct xs_transaction xst;
987 struct iovec iov, *iovp = &iov;
988 char key[256];
989 int error, ret, iov_cnt = 0;
990
991 if (!property)
992 return (EINVAL22);
993
994 memset(&xst, 0, sizeof(xst))__builtin_memset((&xst), (0), (sizeof(xst)));
995 xst.xst_id = 0;
996 xst.xst_cookie = sc->sc_xs;
997
998 if (path)
999 ret = snprintf(key, sizeof(key), "%s/%s", path, property);
1000 else
1001 ret = snprintf(key, sizeof(key), "%s", property);
1002 if (ret == -1 || ret >= sizeof(key))
1003 return (EINVAL22);
1004
1005 iov.iov_base = value;
1006 iov.iov_len = size;
1007 iov_cnt = 1;
1008
1009 error = xs_cmd(&xst, XS_WRITE0x0b, key, &iovp, &iov_cnt);
1010
1011 return (error);
1012}
1013
1014int
1015xs_cmpprop(void *xsc, const char *path, const char *property, const char *value,
1016 int *result)
1017{
1018 struct xen_softc *sc = xsc;
1019 struct xs_transaction xst;
1020 struct iovec *iovp = NULL((void *)0);
2
'iovp' initialized to a null pointer value
1021 char key[256];
1022 int error, ret, iov_cnt = 0;
1023
1024 if (!property)
3
Assuming 'property' is non-null
4
Taking false branch
1025 return (EINVAL22);
1026
1027 memset(&xst, 0, sizeof(xst))__builtin_memset((&xst), (0), (sizeof(xst)));
1028 xst.xst_id = 0;
1029 xst.xst_cookie = sc->sc_xs;
1030
1031 if (path)
5
Assuming 'path' is null
6
Taking false branch
1032 ret = snprintf(key, sizeof(key), "%s/%s", path, property);
1033 else
1034 ret = snprintf(key, sizeof(key), "%s", property);
1035 if (ret == -1 || ret >= sizeof(key))
7
Assuming the condition is false
8
Assuming the condition is false
9
Taking false branch
1036 return (EINVAL22);
1037
1038 if ((error = xs_cmd(&xst, XS_READ0x02, key, &iovp, &iov_cnt)) != 0)
10
Calling 'xs_cmd'
27
Returning from 'xs_cmd'
28
Assuming the condition is false
29
Taking false branch
1039 return (error);
1040
1041 *result = strcmp(value, (char *)iovp->iov_base);
30
Access to field 'iov_base' results in a dereference of a null pointer (loaded from variable 'iovp')
1042
1043 xs_resfree(&xst, iovp, iov_cnt);
1044
1045 return (0);
1046}
1047
1048int
1049xs_await_transition(void *xsc, const char *path, const char *property,
1050 const char *value, int timo)
1051{
1052 struct xen_softc *sc = xsc;
1053 int error, res;
1054
1055 do {
1056 error = xs_cmpprop(xsc, path, property, value, &res);
1
Calling 'xs_cmpprop'
1057 if (error)
1058 return (error);
1059 if (timo && --timo == 0)
1060 return (ETIMEDOUT60);
1061 xs_poll(sc->sc_xs, cold);
1062 } while (res != 0);
1063
1064 return (0);
1065}
1066
1067int
1068xs_kvop(void *xsc, int op, char *key, char *value, size_t valuelen)
1069{
1070 struct xen_softc *sc = xsc;
1071 struct xs_transaction xst;
1072 struct iovec iov, *iovp = &iov;
1073 int error = 0, iov_cnt = 0, cmd, i;
1074
1075 switch (op) {
1076 case PVBUS_KVWRITE:
1077 cmd = XS_WRITE0x0b;
1078 iov.iov_base = value;
1079 iov.iov_len = strlen(value);
1080 iov_cnt = 1;
1081 break;
1082 case PVBUS_KVREAD:
1083 cmd = XS_READ0x02;
1084 break;
1085 case PVBUS_KVLS:
1086 cmd = XS_LIST0x01;
1087 break;
1088 default:
1089 return (EOPNOTSUPP45);
1090 }
1091
1092 memset(&xst, 0, sizeof(xst))__builtin_memset((&xst), (0), (sizeof(xst)));
1093 xst.xst_id = 0;
1094 xst.xst_cookie = sc->sc_xs;
1095
1096 if ((error = xs_cmd(&xst, cmd, key, &iovp, &iov_cnt)) != 0)
1097 return (error);
1098
1099 memset(value, 0, valuelen)__builtin_memset((value), (0), (valuelen));
1100
1101 switch (cmd) {
1102 case XS_READ0x02:
1103 if (iov_cnt == 1 && iovp[0].iov_len == 1) {
1104 xs_resfree(&xst, iovp, iov_cnt);
1105
1106 /*
1107 * We cannot distinguish if the returned value is
1108 * a directory or a file in the xenstore. The only
1109 * indication is that the read value of a directory
1110 * returns an empty string (single nul byte),
1111 * so try to get the directory list in this case.
1112 */
1113 return (xs_kvop(xsc, PVBUS_KVLS, key, value, valuelen));
1114 }
1115 /* FALLTHROUGH */
1116 case XS_LIST0x01:
1117 for (i = 0; i < iov_cnt; i++) {
1118 if (i && strlcat(value, "\n", valuelen) >= valuelen)
1119 break;
1120 if (strlcat(value, iovp[i].iov_base,
1121 valuelen) >= valuelen)
1122 break;
1123 }
1124 xs_resfree(&xst, iovp, iov_cnt);
1125 break;
1126 default:
1127 break;
1128 }
1129
1130 return (0);
1131}