Bug Summary

File:dev/pv/xbf.c
Warning:line 908, column 7
Although the value stored to 'error' is used in the enclosing expression, the value is never actually read from 'error'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name xbf.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pv/xbf.c
1/* $OpenBSD: xbf.c,v 1.51 2020/10/15 13:22:13 krw Exp $ */
2
3/*
4 * Copyright (c) 2016, 2017 Mike Belopuhov
5 * Copyright (c) 2009, 2011 Mark Kettenis
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#include "bio.h"
21
22#include <sys/param.h>
23#include <sys/systm.h>
24#include <sys/atomic.h>
25#include <sys/device.h>
26#include <sys/kernel.h>
27#include <sys/buf.h>
28#include <sys/malloc.h>
29#include <sys/task.h>
30
31#include <machine/bus.h>
32
33#include <dev/pv/xenreg.h>
34#include <dev/pv/xenvar.h>
35
36#include <scsi/scsi_all.h>
37#include <scsi/cd.h>
38#include <scsi/scsi_disk.h>
39#include <scsi/scsiconf.h>
40
41/* #define XBF_DEBUG */
42
43#ifdef XBF_DEBUG
44#define DPRINTF(x...) printf(x)
45#else
46#define DPRINTF(x...)
47#endif
48
49#define XBF_OP_READ0 0
50#define XBF_OP_WRITE1 1
51#define XBF_OP_BARRIER2 2 /* feature-barrier */
52#define XBF_OP_FLUSH3 3 /* feature-flush-cache */
53#define XBF_OP_DISCARD5 5 /* feature-discard */
54#define XBF_OP_INDIRECT6 6 /* feature-max-indirect-segments */
55
56#define XBF_MAX_SGE11 11
57#define XBF_MAX_ISGE8 8
58
59#define XBF_SEC_SHIFT9 9
60
61#define XBF_CDROM1 1
62#define XBF_REMOVABLE2 2
63#define XBF_READONLY4 4
64
65#define XBF_OK0 0
66#define XBF_EIO-1 -1 /* generic failure */
67#define XBF_EOPNOTSUPP-2 -2 /* only for XBF_OP_BARRIER */
68
69struct xbf_sge {
70 uint32_t sge_ref;
71 uint8_t sge_first;
72 uint8_t sge_last;
73 uint16_t sge_pad;
74} __packed__attribute__((__packed__));
75
76/* Generic I/O request */
77struct xbf_req {
78 uint8_t req_op;
79 uint8_t req_nsegs;
80 uint16_t req_unit;
81#ifdef __amd64__1
82 uint32_t req_pad;
83#endif
84 uint64_t req_id;
85 uint64_t req_sector;
86 struct xbf_sge req_sgl[XBF_MAX_SGE11];
87} __packed__attribute__((__packed__));
88
89/* Indirect I/O request */
90struct xbf_ireq {
91 uint8_t req_op;
92 uint8_t req_iop;
93 uint16_t req_nsegs;
94#ifdef __amd64__1
95 uint32_t req_pad;
96#endif
97 uint64_t req_id;
98 uint64_t req_sector;
99 uint16_t req_unit;
100 uint32_t req_gref[XBF_MAX_ISGE8];
101#ifdef __i386__
102 uint64_t req_pad;
103#endif
104} __packed__attribute__((__packed__));
105
106struct xbf_rsp {
107 uint64_t rsp_id;
108 uint8_t rsp_op;
109 uint8_t rsp_pad1;
110 int16_t rsp_status;
111#ifdef __amd64__1
112 uint32_t rsp_pad2;
113#endif
114} __packed__attribute__((__packed__));
115
116union xbf_ring_desc {
117 struct xbf_req xrd_req;
118 struct xbf_ireq xrd_ireq;
119 struct xbf_rsp xrd_rsp;
120} __packed__attribute__((__packed__));
121
122#define XBF_MIN_RING_SIZE1 1
123#define XBF_MAX_RING_SIZE8 8
124#define XBF_MAX_REQS256 256 /* must be a power of 2 */
125
126struct xbf_ring {
127 volatile uint32_t xr_prod;
128 volatile uint32_t xr_prod_event;
129 volatile uint32_t xr_cons;
130 volatile uint32_t xr_cons_event;
131 uint32_t xr_reserved[12];
132 union xbf_ring_desc xr_desc[0];
133} __packed__attribute__((__packed__));
134
135struct xbf_dma_mem {
136 bus_size_t dma_size;
137 bus_dma_tag_t dma_tag;
138 bus_dmamap_t dma_map;
139 bus_dma_segment_t *dma_seg;
140 int dma_nsegs; /* total amount */
141 int dma_rsegs; /* used amount */
142 caddr_t dma_vaddr;
143};
144
145struct xbf_ccb {
146 struct scsi_xfer *ccb_xfer; /* associated transfer */
147 bus_dmamap_t ccb_dmap; /* transfer map */
148 struct xbf_dma_mem ccb_bbuf; /* bounce buffer */
149 uint32_t ccb_first; /* first descriptor */
150 uint32_t ccb_last; /* last descriptor */
151 uint16_t ccb_want; /* expected chunks */
152 uint16_t ccb_seen; /* completed chunks */
153 TAILQ_ENTRY(xbf_ccb)struct { struct xbf_ccb *tqe_next; struct xbf_ccb **tqe_prev;
}
ccb_link;
154};
155TAILQ_HEAD(xbf_ccb_queue, xbf_ccb)struct xbf_ccb_queue { struct xbf_ccb *tqh_first; struct xbf_ccb
**tqh_last; }
;
156
157struct xbf_softc {
158 struct device sc_dev;
159 struct device *sc_parent;
160 char sc_node[XEN_MAX_NODE_LEN64];
161 char sc_backend[XEN_MAX_BACKEND_LEN128];
162 bus_dma_tag_t sc_dmat;
163 int sc_domid;
164
165 xen_intr_handle_t sc_xih;
166
167 int sc_state;
168#define XBF_CONNECTED4 4
169#define XBF_CLOSING5 5
170
171 int sc_caps;
172#define XBF_CAP_BARRIER0x0001 0x0001
173#define XBF_CAP_FLUSH0x0002 0x0002
174
175 uint32_t sc_type;
176 uint32_t sc_unit;
177 char sc_dtype[16];
178 char sc_prod[16];
179
180 uint64_t sc_disk_size;
181 uint32_t sc_block_size;
182
183 /* Ring */
184 struct xbf_ring *sc_xr;
185 uint32_t sc_xr_cons;
186 uint32_t sc_xr_prod;
187 uint32_t sc_xr_size; /* in pages */
188 struct xbf_dma_mem sc_xr_dma;
189 uint32_t sc_xr_ref[XBF_MAX_RING_SIZE8];
190 int sc_xr_ndesc;
191
192 /* Maximum number of blocks that one descriptor may refer to */
193 int sc_xrd_nblk;
194
195 /* CCBs */
196 int sc_nccb;
197 struct xbf_ccb *sc_ccbs;
198 struct xbf_ccb_queue sc_ccb_fq; /* free queue */
199 struct xbf_ccb_queue sc_ccb_sq; /* pending requests */
200 struct mutex sc_ccb_fqlck;
201 struct mutex sc_ccb_sqlck;
202
203 struct scsi_iopool sc_iopool;
204 struct device *sc_scsibus;
205};
206
207int xbf_match(struct device *, void *, void *);
208void xbf_attach(struct device *, struct device *, void *);
209int xbf_detach(struct device *, int);
210
211struct cfdriver xbf_cd = {
212 NULL((void *)0), "xbf", DV_DULL
213};
214
215const struct cfattach xbf_ca = {
216 sizeof(struct xbf_softc), xbf_match, xbf_attach, xbf_detach
217};
218
219void xbf_intr(void *);
220
221int xbf_load_cmd(struct scsi_xfer *);
222int xbf_bounce_cmd(struct scsi_xfer *);
223void xbf_reclaim_cmd(struct scsi_xfer *);
224
225void xbf_scsi_cmd(struct scsi_xfer *);
226int xbf_submit_cmd(struct scsi_xfer *);
227int xbf_poll_cmd(struct scsi_xfer *);
228void xbf_complete_cmd(struct xbf_softc *, struct xbf_ccb_queue *, int);
229
230struct scsi_adapter xbf_switch = {
231 xbf_scsi_cmd, NULL((void *)0), NULL((void *)0), NULL((void *)0), NULL((void *)0)
232};
233
234void xbf_scsi_inq(struct scsi_xfer *);
235void xbf_scsi_inquiry(struct scsi_xfer *);
236void xbf_scsi_capacity(struct scsi_xfer *);
237void xbf_scsi_capacity16(struct scsi_xfer *);
238void xbf_scsi_done(struct scsi_xfer *, int);
239
240int xbf_dma_alloc(struct xbf_softc *, struct xbf_dma_mem *,
241 bus_size_t, int, int);
242void xbf_dma_free(struct xbf_softc *, struct xbf_dma_mem *);
243
244int xbf_get_type(struct xbf_softc *);
245int xbf_init(struct xbf_softc *);
246int xbf_ring_create(struct xbf_softc *);
247void xbf_ring_destroy(struct xbf_softc *);
248void xbf_stop(struct xbf_softc *);
249
250int xbf_alloc_ccbs(struct xbf_softc *);
251void xbf_free_ccbs(struct xbf_softc *);
252void *xbf_get_ccb(void *);
253void xbf_put_ccb(void *, void *);
254
255int
256xbf_match(struct device *parent, void *match, void *aux)
257{
258 struct xen_attach_args *xa = aux;
259
260 if (strcmp("vbd", xa->xa_name))
261 return (0);
262
263 return (1);
264}
265
266void
267xbf_attach(struct device *parent, struct device *self, void *aux)
268{
269 struct xen_attach_args *xa = aux;
270 struct xbf_softc *sc = (struct xbf_softc *)self;
271 struct scsibus_attach_args saa;
272
273 sc->sc_parent = parent;
274 sc->sc_dmat = xa->xa_dmat;
275 sc->sc_domid = xa->xa_domid;
276
277 memcpy(sc->sc_node, xa->xa_node, XEN_MAX_NODE_LEN)__builtin_memcpy((sc->sc_node), (xa->xa_node), (64));
278 memcpy(sc->sc_backend, xa->xa_backend, XEN_MAX_BACKEND_LEN)__builtin_memcpy((sc->sc_backend), (xa->xa_backend), (128
))
;
279
280 if (xbf_get_type(sc))
281 return;
282
283 if (xen_intr_establish(0, &sc->sc_xih, sc->sc_domid, xbf_intr, sc,
284 sc->sc_dev.dv_xname)) {
285 printf(": failed to establish an interrupt\n");
286 return;
287 }
288 xen_intr_mask(sc->sc_xih);
289
290 printf(" backend %d channel %u: %s\n", sc->sc_domid, sc->sc_xih,
291 sc->sc_dtype);
292
293 if (xbf_init(sc))
294 goto error;
295
296 if (xen_intr_unmask(sc->sc_xih)) {
297 printf("%s: failed to enable interrupts\n",
298 sc->sc_dev.dv_xname);
299 goto error;
300 }
301
302 saa.saa_adapter = &xbf_switch;
303 saa.saa_adapter_softc = self;
304 saa.saa_adapter_buswidth = 1;
305 saa.saa_luns = 1;
306 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET0xffff;
307 saa.saa_openings = sc->sc_nccb;
308 saa.saa_pool = &sc->sc_iopool;
309 saa.saa_quirks = saa.saa_flags = 0;
310 saa.saa_wwpn = saa.saa_wwnn = 0;
311
312 sc->sc_scsibus = config_found(self, &saa, scsiprint)config_found_sm((self), (&saa), (scsiprint), ((void *)0));
313
314 xen_unplug_emulated(parent, XEN_UNPLUG_IDE0x0002 | XEN_UNPLUG_IDESEC0x0004);
315
316 return;
317
318 error:
319 xen_intr_disestablish(sc->sc_xih);
320}
321
322int
323xbf_detach(struct device *self, int flags)
324{
325 struct xbf_softc *sc = (struct xbf_softc *)self;
326 int ostate = sc->sc_state;
327
328 sc->sc_state = XBF_CLOSING5;
329
330 xen_intr_mask(sc->sc_xih);
331 xen_intr_barrier(sc->sc_xih);
332
333 if (ostate == XBF_CONNECTED4) {
334 xen_intr_disestablish(sc->sc_xih);
335 xbf_stop(sc);
336 }
337
338 if (sc->sc_scsibus)
339 return (config_detach(sc->sc_scsibus, flags | DETACH_FORCE0x01));
340
341 return (0);
342}
343
344void
345xbf_intr(void *xsc)
346{
347 struct xbf_softc *sc = xsc;
348 struct xbf_ring *xr = sc->sc_xr;
349 struct xbf_dma_mem *dma = &sc->sc_xr_dma;
350 struct xbf_ccb_queue cq;
351 struct xbf_ccb *ccb, *nccb;
352 uint32_t cons;
353 int desc, s;
354
355 TAILQ_INIT(&cq)do { (&cq)->tqh_first = ((void *)0); (&cq)->tqh_last
= &(&cq)->tqh_first; } while (0)
;
356
357 for (;;) {
358 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, dma->dma_size,(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma
->dma_map), (0), (dma->dma_size), (0x02 | 0x08))
359 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma
->dma_map), (0), (dma->dma_size), (0x02 | 0x08))
;
360
361 for (cons = sc->sc_xr_cons; cons != xr->xr_cons; cons++) {
362 desc = cons & (sc->sc_xr_ndesc - 1);
363 xbf_complete_cmd(sc, &cq, desc);
364 }
365
366 sc->sc_xr_cons = cons;
367
368 if (TAILQ_EMPTY(&cq)(((&cq)->tqh_first) == ((void *)0)))
369 break;
370
371 s = splbio()splraise(0x6);
372 KERNEL_LOCK()_kernel_lock();
373 TAILQ_FOREACH_SAFE(ccb, &cq, ccb_link, nccb)for ((ccb) = ((&cq)->tqh_first); (ccb) != ((void *)0) &&
((nccb) = ((ccb)->ccb_link.tqe_next), 1); (ccb) = (nccb))
{
374 TAILQ_REMOVE(&cq, ccb, ccb_link)do { if (((ccb)->ccb_link.tqe_next) != ((void *)0)) (ccb)->
ccb_link.tqe_next->ccb_link.tqe_prev = (ccb)->ccb_link.
tqe_prev; else (&cq)->tqh_last = (ccb)->ccb_link.tqe_prev
; *(ccb)->ccb_link.tqe_prev = (ccb)->ccb_link.tqe_next;
((ccb)->ccb_link.tqe_prev) = ((void *)-1); ((ccb)->ccb_link
.tqe_next) = ((void *)-1); } while (0)
;
375 xbf_reclaim_cmd(ccb->ccb_xfer);
376 scsi_done(ccb->ccb_xfer);
377 }
378 KERNEL_UNLOCK()_kernel_unlock();
379 splx(s)spllower(s);
380 }
381}
382
383void
384xbf_scsi_cmd(struct scsi_xfer *xs)
385{
386 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
387
388 switch (xs->cmd.opcode) {
389 case READ_COMMAND0x08:
390 case READ_100x28:
391 case READ_120xa8:
392 case READ_160x88:
393 case WRITE_COMMAND0x0a:
394 case WRITE_100x2a:
395 case WRITE_120xaa:
396 case WRITE_160x8a:
397 if (sc->sc_state != XBF_CONNECTED4) {
398 xbf_scsi_done(xs, XS_SELTIMEOUT3);
399 return;
400 }
401 break;
402 case SYNCHRONIZE_CACHE0x35:
403 if (!(sc->sc_caps & (XBF_CAP_BARRIER0x0001|XBF_CAP_FLUSH0x0002))) {
404 xbf_scsi_done(xs, XS_NOERROR0);
405 return;
406 }
407 break;
408 case INQUIRY0x12:
409 xbf_scsi_inq(xs);
410 return;
411 case READ_CAPACITY0x25:
412 xbf_scsi_capacity(xs);
413 return;
414 case READ_CAPACITY_160x9e:
415 xbf_scsi_capacity16(xs);
416 return;
417 case TEST_UNIT_READY0x00:
418 case START_STOP0x1b:
419 case PREVENT_ALLOW0x1e:
420 xbf_scsi_done(xs, XS_NOERROR0);
421 return;
422 default:
423 printf("%s cmd 0x%02x\n", __func__, xs->cmd.opcode);
424 case MODE_SENSE0x1a:
425 case MODE_SENSE_BIG0x5a:
426 case REPORT_LUNS0xa0:
427 case READ_TOC0x43:
428 xbf_scsi_done(xs, XS_DRIVER_STUFFUP2);
429 return;
430 }
431
432 if (xbf_submit_cmd(xs)) {
433 xbf_scsi_done(xs, XS_DRIVER_STUFFUP2);
434 return;
435 }
436
437 if (ISSET(xs->flags, SCSI_POLL)((xs->flags) & (0x00002)) && xbf_poll_cmd(xs)) {
438 printf("%s: op %#x timed out\n", sc->sc_dev.dv_xname,
439 xs->cmd.opcode);
440 if (sc->sc_state == XBF_CONNECTED4) {
441 xbf_reclaim_cmd(xs);
442 xbf_scsi_done(xs, XS_TIMEOUT4);
443 }
444 return;
445 }
446}
447
448int
449xbf_load_cmd(struct scsi_xfer *xs)
450{
451 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
452 struct xbf_ccb *ccb = xs->io;
453 struct xbf_sge *sge;
454 union xbf_ring_desc *xrd;
455 bus_dmamap_t map;
456 int error, mapflags, nsg, seg;
457 int desc, ndesc = 0;
458
459 map = ccb->ccb_dmap;
460
461 mapflags = (sc->sc_domid << 16);
462 if (ISSET(xs->flags, SCSI_NOSLEEP)((xs->flags) & (0x00001)))
463 mapflags |= BUS_DMA_NOWAIT0x0001;
464 else
465 mapflags |= BUS_DMA_WAITOK0x0000;
466 if (ISSET(xs->flags, SCSI_DATA_IN)((xs->flags) & (0x00800)))
467 mapflags |= BUS_DMA_READ0x0200;
468 else
469 mapflags |= BUS_DMA_WRITE0x0400;
470
471 error = bus_dmamap_load(sc->sc_dmat, map, xs->data, xs->datalen,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (map),
(xs->data), (xs->datalen), (((void *)0)), (mapflags))
472 NULL, mapflags)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (map),
(xs->data), (xs->datalen), (((void *)0)), (mapflags))
;
473 if (error) {
474 printf("%s: failed to load %d bytes of data\n",
475 sc->sc_dev.dv_xname, xs->datalen);
476 return (error);
477 }
478
479 xrd = &sc->sc_xr->xr_desc[ccb->ccb_first];
480 /* seg is the segment map iterator, nsg is the s-g list iterator */
481 for (seg = 0, nsg = 0; seg < map->dm_nsegs; seg++, nsg++) {
482 if (nsg == XBF_MAX_SGE11) {
483 /* Number of segments so far */
484 xrd->xrd_req.req_nsegs = nsg;
485 /* Pick next descriptor */
486 ndesc++;
487 desc = (sc->sc_xr_prod + ndesc) & (sc->sc_xr_ndesc - 1);
488 xrd = &sc->sc_xr->xr_desc[desc];
489 nsg = 0;
490 }
491 sge = &xrd->xrd_req.req_sgl[nsg];
492 sge->sge_ref = map->dm_segs[seg].ds_addr;
493 sge->sge_first = nsg > 0 ? 0 :
494 (((vaddr_t)xs->data + ndesc * sc->sc_xrd_nblk *
495 (1 << XBF_SEC_SHIFT9)) & PAGE_MASK((1 << 12) - 1)) >> XBF_SEC_SHIFT9;
496 sge->sge_last = sge->sge_first +
497 (map->dm_segs[seg].ds_len >> XBF_SEC_SHIFT9) - 1;
498
499 DPRINTF("%s: seg %d/%d ref %lu len %lu first %u last %u\n",
500 sc->sc_dev.dv_xname, nsg + 1, map->dm_nsegs,
501 map->dm_segs[seg].ds_addr, map->dm_segs[seg].ds_len,
502 sge->sge_first, sge->sge_last);
503
504 KASSERT(sge->sge_last <= 7)((sge->sge_last <= 7) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pv/xbf.c", 504, "sge->sge_last <= 7"
))
;
505 }
506
507 xrd->xrd_req.req_nsegs = nsg;
508
509 return (0);
510}
511
512int
513xbf_bounce_cmd(struct scsi_xfer *xs)
514{
515 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
516 struct xbf_ccb *ccb = xs->io;
517 struct xbf_sge *sge;
518 struct xbf_dma_mem *dma;
519 union xbf_ring_desc *xrd;
520 bus_dmamap_t map;
521 bus_size_t size;
522 int error, mapflags, nsg, seg;
523 int desc, ndesc = 0;
524
525 size = roundup(xs->datalen, PAGE_SIZE)((((xs->datalen)+(((1 << 12))-1))/((1 << 12)))
*((1 << 12)))
;
526 if (size > MAXPHYS(64 * 1024))
527 return (EFBIG27);
528
529 mapflags = (sc->sc_domid << 16);
530 if (ISSET(xs->flags, SCSI_NOSLEEP)((xs->flags) & (0x00001)))
531 mapflags |= BUS_DMA_NOWAIT0x0001;
532 else
533 mapflags |= BUS_DMA_WAITOK0x0000;
534 if (ISSET(xs->flags, SCSI_DATA_IN)((xs->flags) & (0x00800)))
535 mapflags |= BUS_DMA_READ0x0200;
536 else
537 mapflags |= BUS_DMA_WRITE0x0400;
538
539 dma = &ccb->ccb_bbuf;
540 error = xbf_dma_alloc(sc, dma, size, size / PAGE_SIZE(1 << 12), mapflags);
541 if (error) {
542 DPRINTF("%s: failed to allocate a %lu byte bounce buffer\n",
543 sc->sc_dev.dv_xname, size);
544 return (error);
545 }
546
547 map = dma->dma_map;
548
549 DPRINTF("%s: bouncing %d bytes via %lu size map with %d segments\n",
550 sc->sc_dev.dv_xname, xs->datalen, size, map->dm_nsegs);
551
552 if (ISSET(xs->flags, SCSI_DATA_OUT)((xs->flags) & (0x01000)))
553 memcpy(dma->dma_vaddr, xs->data, xs->datalen)__builtin_memcpy((dma->dma_vaddr), (xs->data), (xs->
datalen))
;
554
555 xrd = &sc->sc_xr->xr_desc[ccb->ccb_first];
556 /* seg is the map segment iterator, nsg is the s-g element iterator */
557 for (seg = 0, nsg = 0; seg < map->dm_nsegs; seg++, nsg++) {
558 if (nsg == XBF_MAX_SGE11) {
559 /* Number of segments so far */
560 xrd->xrd_req.req_nsegs = nsg;
561 /* Pick next descriptor */
562 ndesc++;
563 desc = (sc->sc_xr_prod + ndesc) & (sc->sc_xr_ndesc - 1);
564 xrd = &sc->sc_xr->xr_desc[desc];
565 nsg = 0;
566 }
567 sge = &xrd->xrd_req.req_sgl[nsg];
568 sge->sge_ref = map->dm_segs[seg].ds_addr;
569 sge->sge_first = nsg > 0 ? 0 :
570 (((vaddr_t)dma->dma_vaddr + ndesc * sc->sc_xrd_nblk *
571 (1 << XBF_SEC_SHIFT9)) & PAGE_MASK((1 << 12) - 1)) >> XBF_SEC_SHIFT9;
572 sge->sge_last = sge->sge_first +
573 (map->dm_segs[seg].ds_len >> XBF_SEC_SHIFT9) - 1;
574
575 DPRINTF("%s: seg %d/%d ref %lu len %lu first %u last %u\n",
576 sc->sc_dev.dv_xname, nsg + 1, map->dm_nsegs,
577 map->dm_segs[seg].ds_addr, map->dm_segs[seg].ds_len,
578 sge->sge_first, sge->sge_last);
579
580 KASSERT(sge->sge_last <= 7)((sge->sge_last <= 7) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pv/xbf.c", 580, "sge->sge_last <= 7"
))
;
581 }
582
583 xrd->xrd_req.req_nsegs = nsg;
584
585 return (0);
586}
587
588void
589xbf_reclaim_cmd(struct scsi_xfer *xs)
590{
591 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
592 struct xbf_ccb *ccb = xs->io;
593 struct xbf_dma_mem *dma = &ccb->ccb_bbuf;
594
595 if (dma->dma_size == 0)
596 return;
597
598 if (ISSET(xs->flags, SCSI_DATA_IN)((xs->flags) & (0x00800)))
599 memcpy(xs->data, (caddr_t)dma->dma_vaddr, xs->datalen)__builtin_memcpy((xs->data), ((caddr_t)dma->dma_vaddr),
(xs->datalen))
;
600
601 xbf_dma_free(sc, &ccb->ccb_bbuf);
602}
603
604int
605xbf_submit_cmd(struct scsi_xfer *xs)
606{
607 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
608 struct xbf_ccb *ccb = xs->io;
609 union xbf_ring_desc *xrd;
610 struct scsi_rw *rw;
611 struct scsi_rw_10 *rw10;
612 struct scsi_rw_12 *rw12;
613 struct scsi_rw_16 *rw16;
614 uint64_t lba = 0;
615 uint32_t nblk = 0;
616 uint8_t operation = 0;
617 unsigned int ndesc = 0;
618 int desc, error;
619
620 switch (xs->cmd.opcode) {
621 case READ_COMMAND0x08:
622 case READ_100x28:
623 case READ_120xa8:
624 case READ_160x88:
625 operation = XBF_OP_READ0;
626 break;
627
628 case WRITE_COMMAND0x0a:
629 case WRITE_100x2a:
630 case WRITE_120xaa:
631 case WRITE_160x8a:
632 operation = XBF_OP_WRITE1;
633 break;
634
635 case SYNCHRONIZE_CACHE0x35:
636 if (sc->sc_caps & XBF_CAP_FLUSH0x0002)
637 operation = XBF_OP_FLUSH3;
638 else if (sc->sc_caps & XBF_CAP_BARRIER0x0001)
639 operation = XBF_OP_BARRIER2;
640 break;
641 }
642
643 /*
644 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE
645 * has the same layout as 10-byte READ/WRITE commands.
646 */
647 if (xs->cmdlen == 6) {
648 rw = (struct scsi_rw *)&xs->cmd;
649 lba = _3btol(rw->addr) & (SRW_TOPADDR0x1F << 16 | 0xffff);
650 nblk = rw->length ? rw->length : 0x100;
651 } else if (xs->cmdlen == 10) {
652 rw10 = (struct scsi_rw_10 *)&xs->cmd;
653 lba = _4btol(rw10->addr);
654 nblk = _2btol(rw10->length);
655 } else if (xs->cmdlen == 12) {
656 rw12 = (struct scsi_rw_12 *)&xs->cmd;
657 lba = _4btol(rw12->addr);
658 nblk = _4btol(rw12->length);
659 } else if (xs->cmdlen == 16) {
660 rw16 = (struct scsi_rw_16 *)&xs->cmd;
661 lba = _8btol(rw16->addr);
662 nblk = _4btol(rw16->length);
663 }
664
665 ccb->ccb_want = ccb->ccb_seen = 0;
666
667 do {
668 desc = (sc->sc_xr_prod + ndesc) & (sc->sc_xr_ndesc - 1);
669 if (ndesc == 0)
670 ccb->ccb_first = desc;
671
672 xrd = &sc->sc_xr->xr_desc[desc];
673 xrd->xrd_req.req_op = operation;
674 xrd->xrd_req.req_unit = (uint16_t)sc->sc_unit;
675 xrd->xrd_req.req_sector = lba + ndesc * sc->sc_xrd_nblk;
676
677 ccb->ccb_want |= 1 << ndesc;
678 ndesc++;
679 } while (ndesc * sc->sc_xrd_nblk < nblk);
680
681 ccb->ccb_last = desc;
682
683 if (operation == XBF_OP_READ0 || operation == XBF_OP_WRITE1) {
684 DPRINTF("%s: desc %u,%u %s%s lba %llu nsec %u "
685 "len %d\n", sc->sc_dev.dv_xname, ccb->ccb_first,
686 ccb->ccb_last, operation == XBF_OP_READ ? "read" :
687 "write", ISSET(xs->flags, SCSI_POLL) ? "-poll" : "",
688 lba, nblk, xs->datalen);
689
690 if (((vaddr_t)xs->data & ((1 << XBF_SEC_SHIFT9) - 1)) == 0)
691 error = xbf_load_cmd(xs);
692 else
693 error = xbf_bounce_cmd(xs);
694 if (error)
695 return (-1);
696 } else {
697 DPRINTF("%s: desc %u %s%s lba %llu\n", sc->sc_dev.dv_xname,
698 ccb->ccb_first, operation == XBF_OP_FLUSH ? "flush" :
699 "barrier", ISSET(xs->flags, SCSI_POLL) ? "-poll" : "",
700 lba);
701 xrd->xrd_req.req_nsegs = 0;
702 }
703
704 ccb->ccb_xfer = xs;
705
706 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmap), (0), (ccb->ccb_dmap->dm_mapsize), (0x01 | 0x04
))
707 ccb->ccb_dmap->dm_mapsize, BUS_DMASYNC_PREREAD |(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmap), (0), (ccb->ccb_dmap->dm_mapsize), (0x01 | 0x04
))
708 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmap), (0), (ccb->ccb_dmap->dm_mapsize), (0x01 | 0x04
))
;
709
710 mtx_enter(&sc->sc_ccb_sqlck);
711 TAILQ_INSERT_TAIL(&sc->sc_ccb_sq, ccb, ccb_link)do { (ccb)->ccb_link.tqe_next = ((void *)0); (ccb)->ccb_link
.tqe_prev = (&sc->sc_ccb_sq)->tqh_last; *(&sc->
sc_ccb_sq)->tqh_last = (ccb); (&sc->sc_ccb_sq)->
tqh_last = &(ccb)->ccb_link.tqe_next; } while (0)
;
712 mtx_leave(&sc->sc_ccb_sqlck);
713
714 sc->sc_xr_prod += ndesc;
715 sc->sc_xr->xr_prod = sc->sc_xr_prod;
716 sc->sc_xr->xr_cons_event = sc->sc_xr_prod;
717
718 bus_dmamap_sync(sc->sc_dmat, sc->sc_xr_dma.dma_map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_xr_dma.dma_map), (0), (sc->sc_xr_dma.dma_map->dm_mapsize
), (0x01 | 0x04))
719 sc->sc_xr_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD |(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_xr_dma.dma_map), (0), (sc->sc_xr_dma.dma_map->dm_mapsize
), (0x01 | 0x04))
720 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_xr_dma.dma_map), (0), (sc->sc_xr_dma.dma_map->dm_mapsize
), (0x01 | 0x04))
;
721
722 xen_intr_signal(sc->sc_xih);
723
724 return (0);
725}
726
727int
728xbf_poll_cmd(struct scsi_xfer *xs)
729{
730 int timo = 1000;
731
732 do {
733 if (ISSET(xs->flags, ITSDONE)((xs->flags) & (0x00008)))
734 break;
735 if (ISSET(xs->flags, SCSI_NOSLEEP)((xs->flags) & (0x00001)))
736 delay(10)(*delay_func)(10);
737 else
738 tsleep_nsec(xs, PRIBIO16, "xbfpoll", USEC_TO_NSEC(10));
739 xbf_intr(xs->sc_link->bus->sb_adapter_softc);
740 } while(--timo > 0);
741
742 return (0);
743}
744
745void
746xbf_complete_cmd(struct xbf_softc *sc, struct xbf_ccb_queue *cq, int desc)
747{
748 struct xbf_ccb *ccb;
749 union xbf_ring_desc *xrd;
750 bus_dmamap_t map;
751 uint32_t id, chunk;
752 int error;
753
754 xrd = &sc->sc_xr->xr_desc[desc];
755 error = xrd->xrd_rsp.rsp_status == XBF_OK0 ? XS_NOERROR0 :
756 XS_DRIVER_STUFFUP2;
757
758 mtx_enter(&sc->sc_ccb_sqlck);
759
760 /*
761 * To find a CCB for id equal to x within an interval [a, b] we must
762 * locate a CCB such that (x - a) mod N <= (b - a) mod N, where a is
763 * the first descriptor, b is the last one and N is the ring size.
764 */
765 id = (uint32_t)xrd->xrd_rsp.rsp_id;
766 TAILQ_FOREACH(ccb, &sc->sc_ccb_sq, ccb_link)for((ccb) = ((&sc->sc_ccb_sq)->tqh_first); (ccb) !=
((void *)0); (ccb) = ((ccb)->ccb_link.tqe_next))
{
767 if (((id - ccb->ccb_first) & (sc->sc_xr_ndesc - 1)) <=
768 ((ccb->ccb_last - ccb->ccb_first) & (sc->sc_xr_ndesc - 1)))
769 break;
770 }
771 KASSERT(ccb != NULL)((ccb != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pv/xbf.c"
, 771, "ccb != NULL"))
;
772
773 /* Assert that this chunk belongs to this CCB */
774 chunk = 1 << ((id - ccb->ccb_first) & (sc->sc_xr_ndesc - 1));
775 KASSERT((ccb->ccb_want & chunk) != 0)(((ccb->ccb_want & chunk) != 0) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pv/xbf.c", 775, "(ccb->ccb_want & chunk) != 0"
))
;
776 KASSERT((ccb->ccb_seen & chunk) == 0)(((ccb->ccb_seen & chunk) == 0) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pv/xbf.c", 776, "(ccb->ccb_seen & chunk) == 0"
))
;
777
778 /* When all chunks are collected remove the CCB from the queue */
779 ccb->ccb_seen |= chunk;
780 if (ccb->ccb_seen == ccb->ccb_want)
781 TAILQ_REMOVE(&sc->sc_ccb_sq, ccb, ccb_link)do { if (((ccb)->ccb_link.tqe_next) != ((void *)0)) (ccb)->
ccb_link.tqe_next->ccb_link.tqe_prev = (ccb)->ccb_link.
tqe_prev; else (&sc->sc_ccb_sq)->tqh_last = (ccb)->
ccb_link.tqe_prev; *(ccb)->ccb_link.tqe_prev = (ccb)->ccb_link
.tqe_next; ((ccb)->ccb_link.tqe_prev) = ((void *)-1); ((ccb
)->ccb_link.tqe_next) = ((void *)-1); } while (0)
;
782
783 mtx_leave(&sc->sc_ccb_sqlck);
784
785 DPRINTF("%s: completing desc %d(%llu) op %u with error %d\n",
786 sc->sc_dev.dv_xname, desc, xrd->xrd_rsp.rsp_id,
787 xrd->xrd_rsp.rsp_op, xrd->xrd_rsp.rsp_status);
788
789 memset(xrd, 0, sizeof(*xrd))__builtin_memset((xrd), (0), (sizeof(*xrd)));
790 xrd->xrd_req.req_id = desc;
791
792 if (ccb->ccb_seen != ccb->ccb_want)
793 return;
794
795 if (ccb->ccb_bbuf.dma_size > 0)
796 map = ccb->ccb_bbuf.dma_map;
797 else
798 map = ccb->ccb_dmap;
799
800 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x02 | 0x08))
801 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x02 | 0x08))
;
802 bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map
))
;
803
804 ccb->ccb_xfer->resid = 0;
805 ccb->ccb_xfer->error = error;
806 TAILQ_INSERT_TAIL(cq, ccb, ccb_link)do { (ccb)->ccb_link.tqe_next = ((void *)0); (ccb)->ccb_link
.tqe_prev = (cq)->tqh_last; *(cq)->tqh_last = (ccb); (cq
)->tqh_last = &(ccb)->ccb_link.tqe_next; } while (0
)
;
807}
808
809void
810xbf_scsi_inq(struct scsi_xfer *xs)
811{
812 struct scsi_inquiry *inq = (struct scsi_inquiry *)&xs->cmd;
813
814 if (ISSET(inq->flags, SI_EVPD)((inq->flags) & (0x01)))
815 xbf_scsi_done(xs, XS_DRIVER_STUFFUP2);
816 else
817 xbf_scsi_inquiry(xs);
818}
819
820void
821xbf_scsi_inquiry(struct scsi_xfer *xs)
822{
823 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
824 struct scsi_inquiry_data inq;
825
826 bzero(&inq, sizeof(inq))__builtin_bzero((&inq), (sizeof(inq)));
827
828 switch (sc->sc_type) {
829 case XBF_CDROM1:
830 inq.device = T_CDROM0x05;
831 break;
832 default:
833 inq.device = T_DIRECT0x00;
834 break;
835 }
836
837 inq.version = SCSI_REV_SPC30x05;
838 inq.response_format = SID_SCSI2_RESPONSE0x02;
839 inq.additional_length = SID_SCSI2_ALEN31;
840 inq.flags |= SID_CmdQue0x02;
841 bcopy("Xen ", inq.vendor, sizeof(inq.vendor));
842 bcopy(sc->sc_prod, inq.product, sizeof(inq.product));
843 bcopy("0000", inq.revision, sizeof(inq.revision));
844
845 scsi_copy_internal_data(xs, &inq, sizeof(inq));
846
847 xbf_scsi_done(xs, XS_NOERROR0);
848}
849
850void
851xbf_scsi_capacity(struct scsi_xfer *xs)
852{
853 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
854 struct scsi_read_cap_data rcd;
855 uint64_t capacity;
856
857 bzero(&rcd, sizeof(rcd))__builtin_bzero((&rcd), (sizeof(rcd)));
858
859 capacity = sc->sc_disk_size - 1;
860 if (capacity > 0xffffffff)
861 capacity = 0xffffffff;
862
863 _lto4b(capacity, rcd.addr);
864 _lto4b(sc->sc_block_size, rcd.length);
865
866 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen)(((sizeof(rcd))<(xs->datalen))?(sizeof(rcd)):(xs->datalen
))
);
867
868 xbf_scsi_done(xs, XS_NOERROR0);
869}
870
871void
872xbf_scsi_capacity16(struct scsi_xfer *xs)
873{
874 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
875 struct scsi_read_cap_data_16 rcd;
876
877 bzero(&rcd, sizeof(rcd))__builtin_bzero((&rcd), (sizeof(rcd)));
878
879 _lto8b(sc->sc_disk_size - 1, rcd.addr);
880 _lto4b(sc->sc_block_size, rcd.length);
881
882 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen)(((sizeof(rcd))<(xs->datalen))?(sizeof(rcd)):(xs->datalen
))
);
883
884 xbf_scsi_done(xs, XS_NOERROR0);
885}
886
887void
888xbf_scsi_done(struct scsi_xfer *xs, int error)
889{
890 int s;
891
892 xs->error = error;
893
894 s = splbio()splraise(0x6);
895 scsi_done(xs);
896 splx(s)spllower(s);
897}
898
899int
900xbf_get_type(struct xbf_softc *sc)
901{
902 unsigned long long res;
903 const char *prop;
904 char val[32];
905 int error;
906
907 prop = "type";
908 if ((error = xs_getprop(sc->sc_parent, sc->sc_backend, prop, val,
Although the value stored to 'error' is used in the enclosing expression, the value is never actually read from 'error'
909 sizeof(val))) != 0)
910 goto errout;
911 snprintf(sc->sc_prod, sizeof(sc->sc_prod), "%s", val);
912
913 prop = "dev";
914 if ((error = xs_getprop(sc->sc_parent, sc->sc_backend, prop, val,
915 sizeof(val))) != 0)
916 goto errout;
917 snprintf(sc->sc_prod, sizeof(sc->sc_prod), "%s %s", sc->sc_prod, val);
918
919 prop = "virtual-device";
920 if ((error = xs_getnum(sc->sc_parent, sc->sc_node, prop, &res)) != 0)
921 goto errout;
922 sc->sc_unit = (uint32_t)res;
923 snprintf(sc->sc_prod, sizeof(sc->sc_prod), "%s %llu", sc->sc_prod, res);
924
925 prop = "device-type";
926 if ((error = xs_getprop(sc->sc_parent, sc->sc_node, prop,
927 sc->sc_dtype, sizeof(sc->sc_dtype))) != 0)
928 goto errout;
929 if (!strcmp(sc->sc_dtype, "cdrom"))
930 sc->sc_type = XBF_CDROM1;
931
932 return (0);
933
934 errout:
935 printf("%s: failed to read \"%s\" property\n", sc->sc_dev.dv_xname,
936 prop);
937 return (-1);
938}
939
940int
941xbf_init(struct xbf_softc *sc)
942{
943 unsigned long long res;
944 const char *action, *prop;
945 char pbuf[sizeof("ring-refXX")];
946 unsigned int i;
947 int error;
948
949 prop = "max-ring-page-order";
950 error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res);
951 if (error == 0)
952 sc->sc_xr_size = 1 << res;
953 if (error == ENOENT2) {
954 prop = "max-ring-pages";
955 error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res);
956 if (error == 0)
957 sc->sc_xr_size = res;
958 }
959 /* Fallback to the known minimum */
960 if (error)
961 sc->sc_xr_size = XBF_MIN_RING_SIZE1;
962
963 if (sc->sc_xr_size < XBF_MIN_RING_SIZE1)
964 sc->sc_xr_size = XBF_MIN_RING_SIZE1;
965 if (sc->sc_xr_size > XBF_MAX_RING_SIZE8)
966 sc->sc_xr_size = XBF_MAX_RING_SIZE8;
967 if (!powerof2(sc->sc_xr_size)((((sc->sc_xr_size)-1)&(sc->sc_xr_size))==0))
968 sc->sc_xr_size = 1 << (fls(sc->sc_xr_size) - 1);
969
970 sc->sc_xr_ndesc = ((sc->sc_xr_size * PAGE_SIZE(1 << 12)) -
971 sizeof(struct xbf_ring)) / sizeof(union xbf_ring_desc);
972 if (!powerof2(sc->sc_xr_ndesc)((((sc->sc_xr_ndesc)-1)&(sc->sc_xr_ndesc))==0))
973 sc->sc_xr_ndesc = 1 << (fls(sc->sc_xr_ndesc) - 1);
974 if (sc->sc_xr_ndesc > XBF_MAX_REQS256)
975 sc->sc_xr_ndesc = XBF_MAX_REQS256;
976
977 DPRINTF("%s: %u ring pages, %d requests\n",
978 sc->sc_dev.dv_xname, sc->sc_xr_size, sc->sc_xr_ndesc);
979
980 if (xbf_ring_create(sc))
981 return (-1);
982
983 action = "set";
984
985 for (i = 0; i < sc->sc_xr_size; i++) {
986 if (i == 0 && sc->sc_xr_size == 1)
987 snprintf(pbuf, sizeof(pbuf), "ring-ref");
988 else
989 snprintf(pbuf, sizeof(pbuf), "ring-ref%d", i);
990 prop = pbuf;
991 if (xs_setnum(sc->sc_parent, sc->sc_node, prop,
992 sc->sc_xr_ref[i]))
993 goto errout;
994 }
995
996 if (sc->sc_xr_size > 1) {
997 prop = "num-ring-pages";
998 if (xs_setnum(sc->sc_parent, sc->sc_node, prop,
999 sc->sc_xr_size))
1000 goto errout;
1001 prop = "ring-page-order";
1002 if (xs_setnum(sc->sc_parent, sc->sc_node, prop,
1003 fls(sc->sc_xr_size) - 1))
1004 goto errout;
1005 }
1006
1007 prop = "event-channel";
1008 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_xih))
1009 goto errout;
1010
1011 prop = "protocol";
1012#ifdef __amd64__1
1013 if (xs_setprop(sc->sc_parent, sc->sc_node, prop, "x86_64-abi",
1014 strlen("x86_64-abi")))
1015 goto errout;
1016#else
1017 if (xs_setprop(sc->sc_parent, sc->sc_node, prop, "x86_32-abi",
1018 strlen("x86_32-abi")))
1019 goto errout;
1020#endif
1021
1022 if (xs_setprop(sc->sc_parent, sc->sc_node, "state",
1023 XEN_STATE_INITIALIZED"3", strlen(XEN_STATE_INITIALIZED"3"))) {
1024 printf("%s: failed to set state to INITIALIZED\n",
1025 sc->sc_dev.dv_xname);
1026 xbf_ring_destroy(sc);
1027 return (-1);
1028 }
1029
1030 if (xs_await_transition(sc->sc_parent, sc->sc_backend, "state",
1031 XEN_STATE_CONNECTED"4", 10000)) {
1032 printf("%s: timed out waiting for backend to connect\n",
1033 sc->sc_dev.dv_xname);
1034 xbf_ring_destroy(sc);
1035 return (-1);
1036 }
1037
1038 action = "read";
1039
1040 prop = "sectors";
1041 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0)
1042 goto errout;
1043 sc->sc_disk_size = res;
1044
1045 prop = "sector-size";
1046 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0)
1047 goto errout;
1048 sc->sc_block_size = res;
1049
1050 prop = "feature-barrier";
1051 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1052 && error != ENOENT2)
1053 goto errout;
1054 if (error == 0 && res == 1)
1055 sc->sc_caps |= XBF_CAP_BARRIER0x0001;
1056
1057 prop = "feature-flush-cache";
1058 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1059 && error != ENOENT2)
1060 goto errout;
1061 if (error == 0 && res == 1)
1062 sc->sc_caps |= XBF_CAP_FLUSH0x0002;
1063
1064#ifdef XBF_DEBUG
1065 if (sc->sc_caps) {
1066 printf("%s: features:", sc->sc_dev.dv_xname);
1067 if (sc->sc_caps & XBF_CAP_BARRIER0x0001)
1068 printf(" BARRIER");
1069 if (sc->sc_caps & XBF_CAP_FLUSH0x0002)
1070 printf(" FLUSH");
1071 printf("\n");
1072 }
1073#endif
1074
1075 if (xs_setprop(sc->sc_parent, sc->sc_node, "state",
1076 XEN_STATE_CONNECTED"4", strlen(XEN_STATE_CONNECTED"4"))) {
1077 printf("%s: failed to set state to CONNECTED\n",
1078 sc->sc_dev.dv_xname);
1079 return (-1);
1080 }
1081
1082 sc->sc_state = XBF_CONNECTED4;
1083
1084 return (0);
1085
1086 errout:
1087 printf("%s: failed to %s \"%s\" property (%d)\n", sc->sc_dev.dv_xname,
1088 action, prop, error);
1089 xbf_ring_destroy(sc);
1090 return (-1);
1091}
1092
1093int
1094xbf_dma_alloc(struct xbf_softc *sc, struct xbf_dma_mem *dma,
1095 bus_size_t size, int nsegs, int mapflags)
1096{
1097 int error;
1098
1099 dma->dma_tag = sc->sc_dmat;
1100
1101 dma->dma_seg = mallocarray(nsegs, sizeof(bus_dma_segment_t), M_DEVBUF2,
1102 M_ZERO0x0008 | M_NOWAIT0x0002);
1103 if (dma->dma_seg == NULL((void *)0)) {
1104 printf("%s: failed to allocate a segment array\n",
1105 sc->sc_dev.dv_xname);
1106 return (ENOMEM12);
1107 }
1108
1109 error = bus_dmamap_create(dma->dma_tag, size, nsegs, PAGE_SIZE, 0,(*(dma->dma_tag)->_dmamap_create)((dma->dma_tag), (size
), (nsegs), ((1 << 12)), (0), (0x0001), (&dma->dma_map
))
1110 BUS_DMA_NOWAIT, &dma->dma_map)(*(dma->dma_tag)->_dmamap_create)((dma->dma_tag), (size
), (nsegs), ((1 << 12)), (0), (0x0001), (&dma->dma_map
))
;
1111 if (error) {
1112 printf("%s: failed to create a memory map (%d)\n",
1113 sc->sc_dev.dv_xname, error);
1114 goto errout;
1115 }
1116
1117 error = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0,(*(dma->dma_tag)->_dmamem_alloc)((dma->dma_tag), (size
), ((1 << 12)), (0), (dma->dma_seg), (nsegs), (&
dma->dma_rsegs), (0x1000 | 0x0001))
1118 dma->dma_seg, nsegs, &dma->dma_rsegs, BUS_DMA_ZERO |(*(dma->dma_tag)->_dmamem_alloc)((dma->dma_tag), (size
), ((1 << 12)), (0), (dma->dma_seg), (nsegs), (&
dma->dma_rsegs), (0x1000 | 0x0001))
1119 BUS_DMA_NOWAIT)(*(dma->dma_tag)->_dmamem_alloc)((dma->dma_tag), (size
), ((1 << 12)), (0), (dma->dma_seg), (nsegs), (&
dma->dma_rsegs), (0x1000 | 0x0001))
;
1120 if (error) {
1121 printf("%s: failed to allocate DMA memory (%d)\n",
1122 sc->sc_dev.dv_xname, error);
1123 goto destroy;
1124 }
1125
1126 error = bus_dmamem_map(dma->dma_tag, dma->dma_seg, dma->dma_rsegs,(*(dma->dma_tag)->_dmamem_map)((dma->dma_tag), (dma->
dma_seg), (dma->dma_rsegs), (size), (&dma->dma_vaddr
), (0x0001))
1127 size, &dma->dma_vaddr, BUS_DMA_NOWAIT)(*(dma->dma_tag)->_dmamem_map)((dma->dma_tag), (dma->
dma_seg), (dma->dma_rsegs), (size), (&dma->dma_vaddr
), (0x0001))
;
1128 if (error) {
1129 printf("%s: failed to map DMA memory (%d)\n",
1130 sc->sc_dev.dv_xname, error);
1131 goto free;
1132 }
1133
1134 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,(*(dma->dma_tag)->_dmamap_load)((dma->dma_tag), (dma
->dma_map), (dma->dma_vaddr), (size), (((void *)0)), (mapflags
| 0x0001))
1135 size, NULL, mapflags | BUS_DMA_NOWAIT)(*(dma->dma_tag)->_dmamap_load)((dma->dma_tag), (dma
->dma_map), (dma->dma_vaddr), (size), (((void *)0)), (mapflags
| 0x0001))
;
1136 if (error) {
1137 printf("%s: failed to load DMA memory (%d)\n",
1138 sc->sc_dev.dv_xname, error);
1139 goto unmap;
1140 }
1141
1142 dma->dma_size = size;
1143 dma->dma_nsegs = nsegs;
1144 return (0);
1145
1146 unmap:
1147 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size)(*(dma->dma_tag)->_dmamem_unmap)((dma->dma_tag), (dma
->dma_vaddr), (size))
;
1148 free:
1149 bus_dmamem_free(dma->dma_tag, dma->dma_seg, dma->dma_rsegs)(*(dma->dma_tag)->_dmamem_free)((dma->dma_tag), (dma
->dma_seg), (dma->dma_rsegs))
;
1150 destroy:
1151 bus_dmamap_destroy(dma->dma_tag, dma->dma_map)(*(dma->dma_tag)->_dmamap_destroy)((dma->dma_tag), (
dma->dma_map))
;
1152 errout:
1153 free(dma->dma_seg, M_DEVBUF2, nsegs * sizeof(bus_dma_segment_t));
1154 dma->dma_map = NULL((void *)0);
1155 dma->dma_tag = NULL((void *)0);
1156 return (error);
1157}
1158
1159void
1160xbf_dma_free(struct xbf_softc *sc, struct xbf_dma_mem *dma)
1161{
1162 if (dma->dma_tag == NULL((void *)0) || dma->dma_map == NULL((void *)0))
1163 return;
1164 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, dma->dma_size,(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma
->dma_map), (0), (dma->dma_size), (0x02 | 0x08))
1165 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma
->dma_map), (0), (dma->dma_size), (0x02 | 0x08))
;
1166 bus_dmamap_unload(dma->dma_tag, dma->dma_map)(*(dma->dma_tag)->_dmamap_unload)((dma->dma_tag), (dma
->dma_map))
;
1167 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size)(*(dma->dma_tag)->_dmamem_unmap)((dma->dma_tag), (dma
->dma_vaddr), (dma->dma_size))
;
1168 bus_dmamem_free(dma->dma_tag, dma->dma_seg, dma->dma_rsegs)(*(dma->dma_tag)->_dmamem_free)((dma->dma_tag), (dma
->dma_seg), (dma->dma_rsegs))
;
1169 bus_dmamap_destroy(dma->dma_tag, dma->dma_map)(*(dma->dma_tag)->_dmamap_destroy)((dma->dma_tag), (
dma->dma_map))
;
1170 free(dma->dma_seg, M_DEVBUF2, dma->dma_nsegs * sizeof(bus_dma_segment_t));
1171 dma->dma_seg = NULL((void *)0);
1172 dma->dma_map = NULL((void *)0);
1173 dma->dma_size = 0;
1174}
1175
1176int
1177xbf_ring_create(struct xbf_softc *sc)
1178{
1179 int i;
1180
1181 if (xbf_dma_alloc(sc, &sc->sc_xr_dma, sc->sc_xr_size * PAGE_SIZE(1 << 12),
1182 sc->sc_xr_size, sc->sc_domid << 16))
1183 return (-1);
1184 for (i = 0; i < sc->sc_xr_dma.dma_map->dm_nsegs; i++)
1185 sc->sc_xr_ref[i] = sc->sc_xr_dma.dma_map->dm_segs[i].ds_addr;
1186
1187 sc->sc_xr = (struct xbf_ring *)sc->sc_xr_dma.dma_vaddr;
1188
1189 sc->sc_xr->xr_prod_event = sc->sc_xr->xr_cons_event = 1;
1190
1191 for (i = 0; i < sc->sc_xr_ndesc; i++)
1192 sc->sc_xr->xr_desc[i].xrd_req.req_id = i;
1193
1194 /* The number of contiguous blocks addressable by one descriptor */
1195 sc->sc_xrd_nblk = (PAGE_SIZE(1 << 12) * XBF_MAX_SGE11) / (1 << XBF_SEC_SHIFT9);
1196
1197 if (xbf_alloc_ccbs(sc)) {
1198 xbf_ring_destroy(sc);
1199 return (-1);
1200 }
1201
1202 return (0);
1203}
1204
1205void
1206xbf_ring_destroy(struct xbf_softc *sc)
1207{
1208 xbf_free_ccbs(sc);
1209 xbf_dma_free(sc, &sc->sc_xr_dma);
1210 sc->sc_xr = NULL((void *)0);
1211}
1212
1213void
1214xbf_stop(struct xbf_softc *sc)
1215{
1216 struct xbf_ccb *ccb, *nccb;
1217 bus_dmamap_t map;
1218
1219 bus_dmamap_sync(sc->sc_dmat, sc->sc_xr_dma.dma_map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_xr_dma.dma_map), (0), (sc->sc_xr_dma.dma_map->dm_mapsize
), (0x02 | 0x08))
1220 sc->sc_xr_dma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD |(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_xr_dma.dma_map), (0), (sc->sc_xr_dma.dma_map->dm_mapsize
), (0x02 | 0x08))
1221 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_xr_dma.dma_map), (0), (sc->sc_xr_dma.dma_map->dm_mapsize
), (0x02 | 0x08))
;
1222
1223 TAILQ_FOREACH_SAFE(ccb, &sc->sc_ccb_sq, ccb_link, nccb)for ((ccb) = ((&sc->sc_ccb_sq)->tqh_first); (ccb) !=
((void *)0) && ((nccb) = ((ccb)->ccb_link.tqe_next
), 1); (ccb) = (nccb))
{
1224 TAILQ_REMOVE(&sc->sc_ccb_sq, ccb, ccb_link)do { if (((ccb)->ccb_link.tqe_next) != ((void *)0)) (ccb)->
ccb_link.tqe_next->ccb_link.tqe_prev = (ccb)->ccb_link.
tqe_prev; else (&sc->sc_ccb_sq)->tqh_last = (ccb)->
ccb_link.tqe_prev; *(ccb)->ccb_link.tqe_prev = (ccb)->ccb_link
.tqe_next; ((ccb)->ccb_link.tqe_prev) = ((void *)-1); ((ccb
)->ccb_link.tqe_next) = ((void *)-1); } while (0)
;
1225
1226 if (ccb->ccb_bbuf.dma_size > 0)
1227 map = ccb->ccb_bbuf.dma_map;
1228 else
1229 map = ccb->ccb_dmap;
1230 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x02 | 0x08))
1231 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x02 | 0x08))
;
1232 bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map
))
;
1233
1234 xbf_reclaim_cmd(ccb->ccb_xfer);
1235 xbf_scsi_done(ccb->ccb_xfer, XS_SELTIMEOUT3);
1236 }
1237
1238 xbf_ring_destroy(sc);
1239}
1240
1241int
1242xbf_alloc_ccbs(struct xbf_softc *sc)
1243{
1244 int i, error;
1245
1246 TAILQ_INIT(&sc->sc_ccb_fq)do { (&sc->sc_ccb_fq)->tqh_first = ((void *)0); (&
sc->sc_ccb_fq)->tqh_last = &(&sc->sc_ccb_fq)
->tqh_first; } while (0)
;
1247 TAILQ_INIT(&sc->sc_ccb_sq)do { (&sc->sc_ccb_sq)->tqh_first = ((void *)0); (&
sc->sc_ccb_sq)->tqh_last = &(&sc->sc_ccb_sq)
->tqh_first; } while (0)
;
1248 mtx_init(&sc->sc_ccb_fqlck, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_ccb_fqlck), ((((0x6)) > 0x0 && ((0x6)) < 0x9
) ? 0x9 : ((0x6)))); } while (0)
;
1249 mtx_init(&sc->sc_ccb_sqlck, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_ccb_sqlck), ((((0x6)) > 0x0 && ((0x6)) < 0x9
) ? 0x9 : ((0x6)))); } while (0)
;
1250
1251 sc->sc_nccb = sc->sc_xr_ndesc / 2;
1252
1253 sc->sc_ccbs = mallocarray(sc->sc_nccb, sizeof(struct xbf_ccb),
1254 M_DEVBUF2, M_ZERO0x0008 | M_NOWAIT0x0002);
1255 if (sc->sc_ccbs == NULL((void *)0)) {
1256 printf("%s: failed to allocate CCBs\n", sc->sc_dev.dv_xname);
1257 return (-1);
1258 }
1259
1260 for (i = 0; i < sc->sc_nccb; i++) {
1261 /*
1262 * Each CCB is set up to use up to 2 descriptors and
1263 * each descriptor can transfer XBF_MAX_SGE number of
1264 * pages.
1265 */
1266 error = bus_dmamap_create(sc->sc_dmat, MAXPHYS, 2 *(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (2 * 11), ((1 << 12)), ((1 << 12)), (0x0001
), (&sc->sc_ccbs[i].ccb_dmap))
1267 XBF_MAX_SGE, PAGE_SIZE, PAGE_SIZE, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (2 * 11), ((1 << 12)), ((1 << 12)), (0x0001
), (&sc->sc_ccbs[i].ccb_dmap))
1268 &sc->sc_ccbs[i].ccb_dmap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (2 * 11), ((1 << 12)), ((1 << 12)), (0x0001
), (&sc->sc_ccbs[i].ccb_dmap))
;
1269 if (error) {
1270 printf("%s: failed to create a memory map for "
1271 "the xfer %d (%d)\n", sc->sc_dev.dv_xname, i,
1272 error);
1273 goto errout;
1274 }
1275
1276 xbf_put_ccb(sc, &sc->sc_ccbs[i]);
1277 }
1278
1279 scsi_iopool_init(&sc->sc_iopool, sc, xbf_get_ccb, xbf_put_ccb);
1280
1281 return (0);
1282
1283 errout:
1284 xbf_free_ccbs(sc);
1285 return (-1);
1286}
1287
1288void
1289xbf_free_ccbs(struct xbf_softc *sc)
1290{
1291 struct xbf_ccb *ccb;
1292 int i;
1293
1294 for (i = 0; i < sc->sc_nccb; i++) {
1295 ccb = &sc->sc_ccbs[i];
1296 if (ccb->ccb_dmap == NULL((void *)0))
1297 continue;
1298 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmap, 0, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmap), (0), (0), (0x02 | 0x08))
1299 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmap), (0), (0), (0x02 | 0x08))
;
1300 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (ccb
->ccb_dmap))
;
1301 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ccb
->ccb_dmap))
;
1302 }
1303
1304 free(sc->sc_ccbs, M_DEVBUF2, sc->sc_nccb * sizeof(struct xbf_ccb));
1305 sc->sc_ccbs = NULL((void *)0);
1306 sc->sc_nccb = 0;
1307}
1308
1309void *
1310xbf_get_ccb(void *xsc)
1311{
1312 struct xbf_softc *sc = xsc;
1313 struct xbf_ccb *ccb;
1314
1315 if (sc->sc_state != XBF_CONNECTED4 &&
1316 sc->sc_state != XBF_CLOSING5)
1317 return (NULL((void *)0));
1318
1319 mtx_enter(&sc->sc_ccb_fqlck);
1320 ccb = TAILQ_FIRST(&sc->sc_ccb_fq)((&sc->sc_ccb_fq)->tqh_first);
1321 if (ccb != NULL((void *)0))
1322 TAILQ_REMOVE(&sc->sc_ccb_fq, ccb, ccb_link)do { if (((ccb)->ccb_link.tqe_next) != ((void *)0)) (ccb)->
ccb_link.tqe_next->ccb_link.tqe_prev = (ccb)->ccb_link.
tqe_prev; else (&sc->sc_ccb_fq)->tqh_last = (ccb)->
ccb_link.tqe_prev; *(ccb)->ccb_link.tqe_prev = (ccb)->ccb_link
.tqe_next; ((ccb)->ccb_link.tqe_prev) = ((void *)-1); ((ccb
)->ccb_link.tqe_next) = ((void *)-1); } while (0)
;
1323 mtx_leave(&sc->sc_ccb_fqlck);
1324
1325 return (ccb);
1326}
1327
1328void
1329xbf_put_ccb(void *xsc, void *io)
1330{
1331 struct xbf_softc *sc = xsc;
1332 struct xbf_ccb *ccb = io;
1333
1334 ccb->ccb_xfer = NULL((void *)0);
1335
1336 mtx_enter(&sc->sc_ccb_fqlck);
1337 TAILQ_INSERT_HEAD(&sc->sc_ccb_fq, ccb, ccb_link)do { if (((ccb)->ccb_link.tqe_next = (&sc->sc_ccb_fq
)->tqh_first) != ((void *)0)) (&sc->sc_ccb_fq)->
tqh_first->ccb_link.tqe_prev = &(ccb)->ccb_link.tqe_next
; else (&sc->sc_ccb_fq)->tqh_last = &(ccb)->
ccb_link.tqe_next; (&sc->sc_ccb_fq)->tqh_first = (ccb
); (ccb)->ccb_link.tqe_prev = &(&sc->sc_ccb_fq)
->tqh_first; } while (0)
;
1338 mtx_leave(&sc->sc_ccb_fqlck);
1339}