Bug Summary

File:dev/pv/xbf.c
Warning:line 935, column 7
Although the value stored to 'error' is used in the enclosing expression, the value is never actually read from 'error'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name xbf.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pv/xbf.c
1/* $OpenBSD: xbf.c,v 1.53 2023/11/08 12:01:21 krw Exp $ */
2
3/*
4 * Copyright (c) 2016, 2017 Mike Belopuhov
5 * Copyright (c) 2009, 2011 Mark Kettenis
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#include "bio.h"
21
22#include <sys/param.h>
23#include <sys/systm.h>
24#include <sys/atomic.h>
25#include <sys/device.h>
26#include <sys/kernel.h>
27#include <sys/buf.h>
28#include <sys/malloc.h>
29#include <sys/task.h>
30
31#include <machine/bus.h>
32
33#include <dev/pv/xenreg.h>
34#include <dev/pv/xenvar.h>
35
36#include <scsi/scsi_all.h>
37#include <scsi/cd.h>
38#include <scsi/scsi_disk.h>
39#include <scsi/scsiconf.h>
40
41/* #define XBF_DEBUG */
42
43#ifdef XBF_DEBUG
44#define DPRINTF(x...) printf(x)
45#else
46#define DPRINTF(x...)
47#endif
48
49#define XBF_OP_READ0 0
50#define XBF_OP_WRITE1 1
51#define XBF_OP_BARRIER2 2 /* feature-barrier */
52#define XBF_OP_FLUSH3 3 /* feature-flush-cache */
53#define XBF_OP_DISCARD5 5 /* feature-discard */
54#define XBF_OP_INDIRECT6 6 /* feature-max-indirect-segments */
55
56#define XBF_MAX_SGE11 11
57#define XBF_MAX_ISGE8 8
58
59#define XBF_SEC_SHIFT9 9
60#define XBF_SEC_SIZE(1 << 9) (1 << XBF_SEC_SHIFT9)
61
62#define XBF_CDROM1 1
63#define XBF_REMOVABLE2 2
64#define XBF_READONLY4 4
65
66#define XBF_OK0 0
67#define XBF_EIO-1 -1 /* generic failure */
68#define XBF_EOPNOTSUPP-2 -2 /* only for XBF_OP_BARRIER */
69
70struct xbf_sge {
71 uint32_t sge_ref;
72 uint8_t sge_first;
73 uint8_t sge_last;
74 uint16_t sge_pad;
75} __packed__attribute__((__packed__));
76
77/* Generic I/O request */
78struct xbf_req {
79 uint8_t req_op;
80 uint8_t req_nsegs;
81 uint16_t req_unit;
82#ifdef __amd64__1
83 uint32_t req_pad;
84#endif
85 uint64_t req_id;
86 uint64_t req_sector;
87 struct xbf_sge req_sgl[XBF_MAX_SGE11];
88} __packed__attribute__((__packed__));
89
90/* Indirect I/O request */
91struct xbf_ireq {
92 uint8_t req_op;
93 uint8_t req_iop;
94 uint16_t req_nsegs;
95#ifdef __amd64__1
96 uint32_t req_pad;
97#endif
98 uint64_t req_id;
99 uint64_t req_sector;
100 uint16_t req_unit;
101 uint32_t req_gref[XBF_MAX_ISGE8];
102#ifdef __i386__
103 uint64_t req_pad;
104#endif
105} __packed__attribute__((__packed__));
106
107struct xbf_rsp {
108 uint64_t rsp_id;
109 uint8_t rsp_op;
110 uint8_t rsp_pad1;
111 int16_t rsp_status;
112#ifdef __amd64__1
113 uint32_t rsp_pad2;
114#endif
115} __packed__attribute__((__packed__));
116
117union xbf_ring_desc {
118 struct xbf_req xrd_req;
119 struct xbf_ireq xrd_ireq;
120 struct xbf_rsp xrd_rsp;
121} __packed__attribute__((__packed__));
122
123#define XBF_MIN_RING_SIZE1 1
124#define XBF_MAX_RING_SIZE8 8
125#define XBF_MAX_REQS256 256 /* must be a power of 2 */
126
127struct xbf_ring {
128 volatile uint32_t xr_prod;
129 volatile uint32_t xr_prod_event;
130 volatile uint32_t xr_cons;
131 volatile uint32_t xr_cons_event;
132 uint32_t xr_reserved[12];
133 union xbf_ring_desc xr_desc[0];
134} __packed__attribute__((__packed__));
135
136struct xbf_dma_mem {
137 bus_size_t dma_size;
138 bus_dma_tag_t dma_tag;
139 bus_dmamap_t dma_map;
140 bus_dma_segment_t *dma_seg;
141 int dma_nsegs; /* total amount */
142 int dma_rsegs; /* used amount */
143 caddr_t dma_vaddr;
144};
145
146struct xbf_ccb {
147 struct scsi_xfer *ccb_xfer; /* associated transfer */
148 bus_dmamap_t ccb_dmap; /* transfer map */
149 struct xbf_dma_mem ccb_bbuf; /* bounce buffer */
150 uint32_t ccb_first; /* first descriptor */
151 uint32_t ccb_last; /* last descriptor */
152 uint16_t ccb_want; /* expected chunks */
153 uint16_t ccb_seen; /* completed chunks */
154 TAILQ_ENTRY(xbf_ccb)struct { struct xbf_ccb *tqe_next; struct xbf_ccb **tqe_prev;
}
ccb_link;
155};
156TAILQ_HEAD(xbf_ccb_queue, xbf_ccb)struct xbf_ccb_queue { struct xbf_ccb *tqh_first; struct xbf_ccb
**tqh_last; }
;
157
158struct xbf_softc {
159 struct device sc_dev;
160 struct device *sc_parent;
161 char sc_node[XEN_MAX_NODE_LEN64];
162 char sc_backend[XEN_MAX_BACKEND_LEN128];
163 bus_dma_tag_t sc_dmat;
164 int sc_domid;
165
166 xen_intr_handle_t sc_xih;
167
168 int sc_state;
169#define XBF_CONNECTED4 4
170#define XBF_CLOSING5 5
171
172 int sc_caps;
173#define XBF_CAP_BARRIER0x0001 0x0001
174#define XBF_CAP_FLUSH0x0002 0x0002
175
176 uint32_t sc_type;
177 uint32_t sc_unit;
178 char sc_dtype[16];
179 char sc_prod[16];
180
181 uint64_t sc_disk_size;
182 uint32_t sc_block_size;
183
184 /* Ring */
185 struct xbf_ring *sc_xr;
186 uint32_t sc_xr_cons;
187 uint32_t sc_xr_prod;
188 uint32_t sc_xr_size; /* in pages */
189 struct xbf_dma_mem sc_xr_dma;
190 uint32_t sc_xr_ref[XBF_MAX_RING_SIZE8];
191 int sc_xr_ndesc;
192
193 /* Maximum number of blocks that one descriptor may refer to */
194 int sc_xrd_nblk;
195
196 /* CCBs */
197 int sc_nccb;
198 struct xbf_ccb *sc_ccbs;
199 struct xbf_ccb_queue sc_ccb_fq; /* free queue */
200 struct xbf_ccb_queue sc_ccb_sq; /* pending requests */
201 struct mutex sc_ccb_fqlck;
202 struct mutex sc_ccb_sqlck;
203
204 struct scsi_iopool sc_iopool;
205 struct device *sc_scsibus;
206};
207
208int xbf_match(struct device *, void *, void *);
209void xbf_attach(struct device *, struct device *, void *);
210int xbf_detach(struct device *, int);
211
212struct cfdriver xbf_cd = {
213 NULL((void *)0), "xbf", DV_DULL
214};
215
216const struct cfattach xbf_ca = {
217 sizeof(struct xbf_softc), xbf_match, xbf_attach, xbf_detach
218};
219
220void xbf_intr(void *);
221
222int xbf_load_cmd(struct scsi_xfer *);
223int xbf_bounce_cmd(struct scsi_xfer *);
224void xbf_reclaim_cmd(struct scsi_xfer *);
225
226void xbf_scsi_cmd(struct scsi_xfer *);
227int xbf_submit_cmd(struct scsi_xfer *);
228int xbf_poll_cmd(struct scsi_xfer *);
229void xbf_complete_cmd(struct xbf_softc *, struct xbf_ccb_queue *, int);
230
231const struct scsi_adapter xbf_switch = {
232 xbf_scsi_cmd, NULL((void *)0), NULL((void *)0), NULL((void *)0), NULL((void *)0)
233};
234
235void xbf_scsi_inq(struct scsi_xfer *);
236void xbf_scsi_inquiry(struct scsi_xfer *);
237void xbf_scsi_capacity(struct scsi_xfer *);
238void xbf_scsi_capacity16(struct scsi_xfer *);
239void xbf_scsi_done(struct scsi_xfer *, int);
240
241int xbf_dma_alloc(struct xbf_softc *, struct xbf_dma_mem *,
242 bus_size_t, int, int);
243void xbf_dma_free(struct xbf_softc *, struct xbf_dma_mem *);
244
245int xbf_get_type(struct xbf_softc *);
246int xbf_init(struct xbf_softc *);
247int xbf_ring_create(struct xbf_softc *);
248void xbf_ring_destroy(struct xbf_softc *);
249void xbf_stop(struct xbf_softc *);
250
251int xbf_alloc_ccbs(struct xbf_softc *);
252void xbf_free_ccbs(struct xbf_softc *);
253void *xbf_get_ccb(void *);
254void xbf_put_ccb(void *, void *);
255
256int
257xbf_match(struct device *parent, void *match, void *aux)
258{
259 struct xen_attach_args *xa = aux;
260
261 if (strcmp("vbd", xa->xa_name))
262 return (0);
263
264 return (1);
265}
266
267void
268xbf_attach(struct device *parent, struct device *self, void *aux)
269{
270 struct xen_attach_args *xa = aux;
271 struct xbf_softc *sc = (struct xbf_softc *)self;
272 struct scsibus_attach_args saa;
273
274 sc->sc_parent = parent;
275 sc->sc_dmat = xa->xa_dmat;
276 sc->sc_domid = xa->xa_domid;
277
278 memcpy(sc->sc_node, xa->xa_node, XEN_MAX_NODE_LEN)__builtin_memcpy((sc->sc_node), (xa->xa_node), (64));
279 memcpy(sc->sc_backend, xa->xa_backend, XEN_MAX_BACKEND_LEN)__builtin_memcpy((sc->sc_backend), (xa->xa_backend), (128
))
;
280
281 if (xbf_get_type(sc))
282 return;
283
284 if (xen_intr_establish(0, &sc->sc_xih, sc->sc_domid, xbf_intr, sc,
285 sc->sc_dev.dv_xname)) {
286 printf(": failed to establish an interrupt\n");
287 return;
288 }
289 xen_intr_mask(sc->sc_xih);
290
291 printf(" backend %d channel %u: %s\n", sc->sc_domid, sc->sc_xih,
292 sc->sc_dtype);
293
294 if (xbf_init(sc))
295 goto error;
296
297 if (xen_intr_unmask(sc->sc_xih)) {
298 printf("%s: failed to enable interrupts\n",
299 sc->sc_dev.dv_xname);
300 goto error;
301 }
302
303 saa.saa_adapter = &xbf_switch;
304 saa.saa_adapter_softc = self;
305 saa.saa_adapter_buswidth = 1;
306 saa.saa_luns = 1;
307 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET0xffff;
308 saa.saa_openings = sc->sc_nccb;
309 saa.saa_pool = &sc->sc_iopool;
310 saa.saa_quirks = saa.saa_flags = 0;
311 saa.saa_wwpn = saa.saa_wwnn = 0;
312
313 sc->sc_scsibus = config_found(self, &saa, scsiprint)config_found_sm((self), (&saa), (scsiprint), ((void *)0));
314
315 xen_unplug_emulated(parent, XEN_UNPLUG_IDE0x0002 | XEN_UNPLUG_IDESEC0x0004);
316
317 return;
318
319 error:
320 xen_intr_disestablish(sc->sc_xih);
321}
322
323int
324xbf_detach(struct device *self, int flags)
325{
326 struct xbf_softc *sc = (struct xbf_softc *)self;
327 int ostate = sc->sc_state;
328
329 sc->sc_state = XBF_CLOSING5;
330
331 xen_intr_mask(sc->sc_xih);
332 xen_intr_barrier(sc->sc_xih);
333
334 if (ostate == XBF_CONNECTED4) {
335 xen_intr_disestablish(sc->sc_xih);
336 xbf_stop(sc);
337 }
338
339 if (sc->sc_scsibus)
340 return (config_detach(sc->sc_scsibus, flags | DETACH_FORCE0x01));
341
342 return (0);
343}
344
345void
346xbf_intr(void *xsc)
347{
348 struct xbf_softc *sc = xsc;
349 struct xbf_ring *xr = sc->sc_xr;
350 struct xbf_dma_mem *dma = &sc->sc_xr_dma;
351 struct xbf_ccb_queue cq;
352 struct xbf_ccb *ccb, *nccb;
353 uint32_t cons;
354 int desc, s;
355
356 TAILQ_INIT(&cq)do { (&cq)->tqh_first = ((void *)0); (&cq)->tqh_last
= &(&cq)->tqh_first; } while (0)
;
357
358 for (;;) {
359 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, dma->dma_size,(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma
->dma_map), (0), (dma->dma_size), (0x02 | 0x08))
360 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma
->dma_map), (0), (dma->dma_size), (0x02 | 0x08))
;
361
362 for (cons = sc->sc_xr_cons; cons != xr->xr_cons; cons++) {
363 desc = cons & (sc->sc_xr_ndesc - 1);
364 xbf_complete_cmd(sc, &cq, desc);
365 }
366
367 sc->sc_xr_cons = cons;
368
369 if (TAILQ_EMPTY(&cq)(((&cq)->tqh_first) == ((void *)0)))
370 break;
371
372 s = splbio()splraise(0x3);
373 KERNEL_LOCK()_kernel_lock();
374 TAILQ_FOREACH_SAFE(ccb, &cq, ccb_link, nccb)for ((ccb) = ((&cq)->tqh_first); (ccb) != ((void *)0) &&
((nccb) = ((ccb)->ccb_link.tqe_next), 1); (ccb) = (nccb))
{
375 TAILQ_REMOVE(&cq, ccb, ccb_link)do { if (((ccb)->ccb_link.tqe_next) != ((void *)0)) (ccb)->
ccb_link.tqe_next->ccb_link.tqe_prev = (ccb)->ccb_link.
tqe_prev; else (&cq)->tqh_last = (ccb)->ccb_link.tqe_prev
; *(ccb)->ccb_link.tqe_prev = (ccb)->ccb_link.tqe_next;
((ccb)->ccb_link.tqe_prev) = ((void *)-1); ((ccb)->ccb_link
.tqe_next) = ((void *)-1); } while (0)
;
376 xbf_reclaim_cmd(ccb->ccb_xfer);
377 scsi_done(ccb->ccb_xfer);
378 }
379 KERNEL_UNLOCK()_kernel_unlock();
380 splx(s)spllower(s);
381 }
382}
383
384void
385xbf_scsi_cmd(struct scsi_xfer *xs)
386{
387 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
388
389 switch (xs->cmd.opcode) {
390 case READ_COMMAND0x08:
391 case READ_100x28:
392 case READ_120xa8:
393 case READ_160x88:
394 case WRITE_COMMAND0x0a:
395 case WRITE_100x2a:
396 case WRITE_120xaa:
397 case WRITE_160x8a:
398 if (sc->sc_state != XBF_CONNECTED4) {
399 xbf_scsi_done(xs, XS_SELTIMEOUT3);
400 return;
401 }
402 break;
403 case SYNCHRONIZE_CACHE0x35:
404 if (!(sc->sc_caps & (XBF_CAP_BARRIER0x0001|XBF_CAP_FLUSH0x0002))) {
405 xbf_scsi_done(xs, XS_NOERROR0);
406 return;
407 }
408 break;
409 case INQUIRY0x12:
410 xbf_scsi_inq(xs);
411 return;
412 case READ_CAPACITY0x25:
413 xbf_scsi_capacity(xs);
414 return;
415 case READ_CAPACITY_160x9e:
416 xbf_scsi_capacity16(xs);
417 return;
418 case TEST_UNIT_READY0x00:
419 case START_STOP0x1b:
420 case PREVENT_ALLOW0x1e:
421 xbf_scsi_done(xs, XS_NOERROR0);
422 return;
423 default:
424 printf("%s cmd 0x%02x\n", __func__, xs->cmd.opcode);
425 case MODE_SENSE0x1a:
426 case MODE_SENSE_BIG0x5a:
427 case REPORT_LUNS0xa0:
428 case READ_TOC0x43:
429 xbf_scsi_done(xs, XS_DRIVER_STUFFUP2);
430 return;
431 }
432
433 if (xbf_submit_cmd(xs)) {
434 xbf_scsi_done(xs, XS_DRIVER_STUFFUP2);
435 return;
436 }
437
438 if (ISSET(xs->flags, SCSI_POLL)((xs->flags) & (0x00002)) && xbf_poll_cmd(xs)) {
439 printf("%s: op %#x timed out\n", sc->sc_dev.dv_xname,
440 xs->cmd.opcode);
441 if (sc->sc_state == XBF_CONNECTED4) {
442 xbf_reclaim_cmd(xs);
443 xbf_scsi_done(xs, XS_TIMEOUT4);
444 }
445 return;
446 }
447}
448
449int
450xbf_load_cmd(struct scsi_xfer *xs)
451{
452 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
453 struct xbf_ccb *ccb = xs->io;
454 struct xbf_sge *sge;
455 union xbf_ring_desc *xrd;
456 bus_dmamap_t map;
457 int error, mapflags, nsg, seg;
458 int desc, ndesc = 0;
459
460 map = ccb->ccb_dmap;
461
462 mapflags = (sc->sc_domid << 16);
463 if (ISSET(xs->flags, SCSI_NOSLEEP)((xs->flags) & (0x00001)))
464 mapflags |= BUS_DMA_NOWAIT0x0001;
465 else
466 mapflags |= BUS_DMA_WAITOK0x0000;
467 if (ISSET(xs->flags, SCSI_DATA_IN)((xs->flags) & (0x00800)))
468 mapflags |= BUS_DMA_READ0x0200;
469 else
470 mapflags |= BUS_DMA_WRITE0x0400;
471
472 error = bus_dmamap_load(sc->sc_dmat, map, xs->data, xs->datalen,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (map),
(xs->data), (xs->datalen), (((void *)0)), (mapflags))
473 NULL, mapflags)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (map),
(xs->data), (xs->datalen), (((void *)0)), (mapflags))
;
474 if (error) {
475 printf("%s: failed to load %d bytes of data\n",
476 sc->sc_dev.dv_xname, xs->datalen);
477 return (error);
478 }
479
480 xrd = &sc->sc_xr->xr_desc[ccb->ccb_first];
481 /* seg is the segment map iterator, nsg is the s-g list iterator */
482 for (seg = 0, nsg = 0; seg < map->dm_nsegs; seg++, nsg++) {
483 if (nsg == XBF_MAX_SGE11) {
484 /* Number of segments so far */
485 xrd->xrd_req.req_nsegs = nsg;
486 /* Pick next descriptor */
487 ndesc++;
488 desc = (sc->sc_xr_prod + ndesc) & (sc->sc_xr_ndesc - 1);
489 xrd = &sc->sc_xr->xr_desc[desc];
490 nsg = 0;
491 }
492 sge = &xrd->xrd_req.req_sgl[nsg];
493 sge->sge_ref = map->dm_segs[seg].ds_addr;
494 sge->sge_first = nsg > 0 ? 0 :
495 (((vaddr_t)xs->data + ndesc * sc->sc_xrd_nblk *
496 (1 << XBF_SEC_SHIFT9)) & PAGE_MASK((1 << 12) - 1)) >> XBF_SEC_SHIFT9;
497 sge->sge_last = sge->sge_first +
498 (map->dm_segs[seg].ds_len >> XBF_SEC_SHIFT9) - 1;
499
500 DPRINTF("%s: seg %d/%d ref %lu len %lu first %u last %u\n",
501 sc->sc_dev.dv_xname, nsg + 1, map->dm_nsegs,
502 map->dm_segs[seg].ds_addr, map->dm_segs[seg].ds_len,
503 sge->sge_first, sge->sge_last);
504
505 KASSERT(sge->sge_last <= 7)((sge->sge_last <= 7) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pv/xbf.c", 505, "sge->sge_last <= 7"
))
;
506 }
507
508 xrd->xrd_req.req_nsegs = nsg;
509
510 return (0);
511}
512
513int
514xbf_bounce_cmd(struct scsi_xfer *xs)
515{
516 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
517 struct xbf_ccb *ccb = xs->io;
518 struct xbf_sge *sge;
519 struct xbf_dma_mem *dma;
520 union xbf_ring_desc *xrd;
521 bus_dmamap_t map;
522 bus_size_t size;
523 int error, mapflags, nsg, seg;
524 int desc, ndesc = 0;
525
526 size = roundup(xs->datalen, PAGE_SIZE)((((xs->datalen)+(((1 << 12))-1))/((1 << 12)))
*((1 << 12)))
;
527 if (size > MAXPHYS(64 * 1024))
528 return (EFBIG27);
529
530 mapflags = (sc->sc_domid << 16);
531 if (ISSET(xs->flags, SCSI_NOSLEEP)((xs->flags) & (0x00001)))
532 mapflags |= BUS_DMA_NOWAIT0x0001;
533 else
534 mapflags |= BUS_DMA_WAITOK0x0000;
535 if (ISSET(xs->flags, SCSI_DATA_IN)((xs->flags) & (0x00800)))
536 mapflags |= BUS_DMA_READ0x0200;
537 else
538 mapflags |= BUS_DMA_WRITE0x0400;
539
540 dma = &ccb->ccb_bbuf;
541 error = xbf_dma_alloc(sc, dma, size, size / PAGE_SIZE(1 << 12), mapflags);
542 if (error) {
543 DPRINTF("%s: failed to allocate a %lu byte bounce buffer\n",
544 sc->sc_dev.dv_xname, size);
545 return (error);
546 }
547
548 map = dma->dma_map;
549
550 DPRINTF("%s: bouncing %d bytes via %lu size map with %d segments\n",
551 sc->sc_dev.dv_xname, xs->datalen, size, map->dm_nsegs);
552
553 if (ISSET(xs->flags, SCSI_DATA_OUT)((xs->flags) & (0x01000)))
554 memcpy(dma->dma_vaddr, xs->data, xs->datalen)__builtin_memcpy((dma->dma_vaddr), (xs->data), (xs->
datalen))
;
555
556 xrd = &sc->sc_xr->xr_desc[ccb->ccb_first];
557 /* seg is the map segment iterator, nsg is the s-g element iterator */
558 for (seg = 0, nsg = 0; seg < map->dm_nsegs; seg++, nsg++) {
559 if (nsg == XBF_MAX_SGE11) {
560 /* Number of segments so far */
561 xrd->xrd_req.req_nsegs = nsg;
562 /* Pick next descriptor */
563 ndesc++;
564 desc = (sc->sc_xr_prod + ndesc) & (sc->sc_xr_ndesc - 1);
565 xrd = &sc->sc_xr->xr_desc[desc];
566 nsg = 0;
567 }
568 sge = &xrd->xrd_req.req_sgl[nsg];
569 sge->sge_ref = map->dm_segs[seg].ds_addr;
570 sge->sge_first = nsg > 0 ? 0 :
571 (((vaddr_t)dma->dma_vaddr + ndesc * sc->sc_xrd_nblk *
572 (1 << XBF_SEC_SHIFT9)) & PAGE_MASK((1 << 12) - 1)) >> XBF_SEC_SHIFT9;
573 sge->sge_last = sge->sge_first +
574 (map->dm_segs[seg].ds_len >> XBF_SEC_SHIFT9) - 1;
575
576 DPRINTF("%s: seg %d/%d ref %lu len %lu first %u last %u\n",
577 sc->sc_dev.dv_xname, nsg + 1, map->dm_nsegs,
578 map->dm_segs[seg].ds_addr, map->dm_segs[seg].ds_len,
579 sge->sge_first, sge->sge_last);
580
581 KASSERT(sge->sge_last <= 7)((sge->sge_last <= 7) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pv/xbf.c", 581, "sge->sge_last <= 7"
))
;
582 }
583
584 xrd->xrd_req.req_nsegs = nsg;
585
586 return (0);
587}
588
589void
590xbf_reclaim_cmd(struct scsi_xfer *xs)
591{
592 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
593 struct xbf_ccb *ccb = xs->io;
594 struct xbf_dma_mem *dma = &ccb->ccb_bbuf;
595
596 if (dma->dma_size == 0)
597 return;
598
599 if (ISSET(xs->flags, SCSI_DATA_IN)((xs->flags) & (0x00800)))
600 memcpy(xs->data, (caddr_t)dma->dma_vaddr, xs->datalen)__builtin_memcpy((xs->data), ((caddr_t)dma->dma_vaddr),
(xs->datalen))
;
601
602 xbf_dma_free(sc, &ccb->ccb_bbuf);
603}
604
605int
606xbf_submit_cmd(struct scsi_xfer *xs)
607{
608 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
609 struct xbf_ccb *ccb = xs->io;
610 union xbf_ring_desc *xrd;
611 struct scsi_rw *rw;
612 struct scsi_rw_10 *rw10;
613 struct scsi_rw_12 *rw12;
614 struct scsi_rw_16 *rw16;
615 uint64_t lba = 0;
616 uint32_t nblk = 0;
617 uint8_t operation = 0;
618 unsigned int ndesc = 0;
619 int desc, error;
620
621 switch (xs->cmd.opcode) {
622 case READ_COMMAND0x08:
623 case READ_100x28:
624 case READ_120xa8:
625 case READ_160x88:
626 operation = XBF_OP_READ0;
627 break;
628
629 case WRITE_COMMAND0x0a:
630 case WRITE_100x2a:
631 case WRITE_120xaa:
632 case WRITE_160x8a:
633 operation = XBF_OP_WRITE1;
634 break;
635
636 case SYNCHRONIZE_CACHE0x35:
637 if (sc->sc_caps & XBF_CAP_FLUSH0x0002)
638 operation = XBF_OP_FLUSH3;
639 else if (sc->sc_caps & XBF_CAP_BARRIER0x0001)
640 operation = XBF_OP_BARRIER2;
641 break;
642 }
643
644 /*
645 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE
646 * has the same layout as 10-byte READ/WRITE commands.
647 */
648 if (xs->cmdlen == 6) {
649 rw = (struct scsi_rw *)&xs->cmd;
650 lba = _3btol(rw->addr) & (SRW_TOPADDR0x1F << 16 | 0xffff);
651 nblk = rw->length ? rw->length : 0x100;
652 } else if (xs->cmdlen == 10) {
653 rw10 = (struct scsi_rw_10 *)&xs->cmd;
654 lba = _4btol(rw10->addr);
655 nblk = _2btol(rw10->length);
656 } else if (xs->cmdlen == 12) {
657 rw12 = (struct scsi_rw_12 *)&xs->cmd;
658 lba = _4btol(rw12->addr);
659 nblk = _4btol(rw12->length);
660 } else if (xs->cmdlen == 16) {
661 rw16 = (struct scsi_rw_16 *)&xs->cmd;
662 lba = _8btol(rw16->addr);
663 nblk = _4btol(rw16->length);
664 }
665
666 /* SCSI lba/nblk are sc_block_size. ccb's need XBF_SEC_SIZE. */
667 lba *= sc->sc_block_size / XBF_SEC_SIZE(1 << 9);
668 nblk *= sc->sc_block_size / XBF_SEC_SIZE(1 << 9);
669
670 ccb->ccb_want = ccb->ccb_seen = 0;
671
672 do {
673 desc = (sc->sc_xr_prod + ndesc) & (sc->sc_xr_ndesc - 1);
674 if (ndesc == 0)
675 ccb->ccb_first = desc;
676
677 xrd = &sc->sc_xr->xr_desc[desc];
678 xrd->xrd_req.req_op = operation;
679 xrd->xrd_req.req_unit = (uint16_t)sc->sc_unit;
680 xrd->xrd_req.req_sector = lba + ndesc * sc->sc_xrd_nblk;
681
682 ccb->ccb_want |= 1 << ndesc;
683 ndesc++;
684 } while (ndesc * sc->sc_xrd_nblk < nblk);
685
686 ccb->ccb_last = desc;
687
688 if (operation == XBF_OP_READ0 || operation == XBF_OP_WRITE1) {
689 DPRINTF("%s: desc %u,%u %s%s lba %llu nsec %u "
690 "len %d\n", sc->sc_dev.dv_xname, ccb->ccb_first,
691 ccb->ccb_last, operation == XBF_OP_READ ? "read" :
692 "write", ISSET(xs->flags, SCSI_POLL) ? "-poll" : "",
693 lba, nblk, xs->datalen);
694
695 if (((vaddr_t)xs->data & ((1 << XBF_SEC_SHIFT9) - 1)) == 0)
696 error = xbf_load_cmd(xs);
697 else
698 error = xbf_bounce_cmd(xs);
699 if (error)
700 return (-1);
701 } else {
702 DPRINTF("%s: desc %u %s%s lba %llu\n", sc->sc_dev.dv_xname,
703 ccb->ccb_first, operation == XBF_OP_FLUSH ? "flush" :
704 "barrier", ISSET(xs->flags, SCSI_POLL) ? "-poll" : "",
705 lba);
706 xrd->xrd_req.req_nsegs = 0;
707 }
708
709 ccb->ccb_xfer = xs;
710
711 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmap), (0), (ccb->ccb_dmap->dm_mapsize), (0x01 | 0x04
))
712 ccb->ccb_dmap->dm_mapsize, BUS_DMASYNC_PREREAD |(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmap), (0), (ccb->ccb_dmap->dm_mapsize), (0x01 | 0x04
))
713 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmap), (0), (ccb->ccb_dmap->dm_mapsize), (0x01 | 0x04
))
;
714
715 mtx_enter(&sc->sc_ccb_sqlck);
716 TAILQ_INSERT_TAIL(&sc->sc_ccb_sq, ccb, ccb_link)do { (ccb)->ccb_link.tqe_next = ((void *)0); (ccb)->ccb_link
.tqe_prev = (&sc->sc_ccb_sq)->tqh_last; *(&sc->
sc_ccb_sq)->tqh_last = (ccb); (&sc->sc_ccb_sq)->
tqh_last = &(ccb)->ccb_link.tqe_next; } while (0)
;
717 mtx_leave(&sc->sc_ccb_sqlck);
718
719 sc->sc_xr_prod += ndesc;
720 sc->sc_xr->xr_prod = sc->sc_xr_prod;
721 sc->sc_xr->xr_cons_event = sc->sc_xr_prod;
722
723 bus_dmamap_sync(sc->sc_dmat, sc->sc_xr_dma.dma_map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_xr_dma.dma_map), (0), (sc->sc_xr_dma.dma_map->dm_mapsize
), (0x01 | 0x04))
724 sc->sc_xr_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD |(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_xr_dma.dma_map), (0), (sc->sc_xr_dma.dma_map->dm_mapsize
), (0x01 | 0x04))
725 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_xr_dma.dma_map), (0), (sc->sc_xr_dma.dma_map->dm_mapsize
), (0x01 | 0x04))
;
726
727 xen_intr_signal(sc->sc_xih);
728
729 return (0);
730}
731
732int
733xbf_poll_cmd(struct scsi_xfer *xs)
734{
735 int timo = 1000;
736
737 do {
738 if (ISSET(xs->flags, ITSDONE)((xs->flags) & (0x00008)))
739 break;
740 if (ISSET(xs->flags, SCSI_NOSLEEP)((xs->flags) & (0x00001)))
741 delay(10)(*delay_func)(10);
742 else
743 tsleep_nsec(xs, PRIBIO16, "xbfpoll", USEC_TO_NSEC(10));
744 xbf_intr(xs->sc_link->bus->sb_adapter_softc);
745 } while(--timo > 0);
746
747 return (0);
748}
749
750void
751xbf_complete_cmd(struct xbf_softc *sc, struct xbf_ccb_queue *cq, int desc)
752{
753 struct xbf_ccb *ccb;
754 union xbf_ring_desc *xrd;
755 bus_dmamap_t map;
756 uint32_t id, chunk;
757 int error;
758
759 xrd = &sc->sc_xr->xr_desc[desc];
760 error = xrd->xrd_rsp.rsp_status == XBF_OK0 ? XS_NOERROR0 :
761 XS_DRIVER_STUFFUP2;
762
763 mtx_enter(&sc->sc_ccb_sqlck);
764
765 /*
766 * To find a CCB for id equal to x within an interval [a, b] we must
767 * locate a CCB such that (x - a) mod N <= (b - a) mod N, where a is
768 * the first descriptor, b is the last one and N is the ring size.
769 */
770 id = (uint32_t)xrd->xrd_rsp.rsp_id;
771 TAILQ_FOREACH(ccb, &sc->sc_ccb_sq, ccb_link)for((ccb) = ((&sc->sc_ccb_sq)->tqh_first); (ccb) !=
((void *)0); (ccb) = ((ccb)->ccb_link.tqe_next))
{
772 if (((id - ccb->ccb_first) & (sc->sc_xr_ndesc - 1)) <=
773 ((ccb->ccb_last - ccb->ccb_first) & (sc->sc_xr_ndesc - 1)))
774 break;
775 }
776 KASSERT(ccb != NULL)((ccb != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pv/xbf.c"
, 776, "ccb != NULL"))
;
777
778 /* Assert that this chunk belongs to this CCB */
779 chunk = 1 << ((id - ccb->ccb_first) & (sc->sc_xr_ndesc - 1));
780 KASSERT((ccb->ccb_want & chunk) != 0)(((ccb->ccb_want & chunk) != 0) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pv/xbf.c", 780, "(ccb->ccb_want & chunk) != 0"
))
;
781 KASSERT((ccb->ccb_seen & chunk) == 0)(((ccb->ccb_seen & chunk) == 0) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pv/xbf.c", 781, "(ccb->ccb_seen & chunk) == 0"
))
;
782
783 /* When all chunks are collected remove the CCB from the queue */
784 ccb->ccb_seen |= chunk;
785 if (ccb->ccb_seen == ccb->ccb_want)
786 TAILQ_REMOVE(&sc->sc_ccb_sq, ccb, ccb_link)do { if (((ccb)->ccb_link.tqe_next) != ((void *)0)) (ccb)->
ccb_link.tqe_next->ccb_link.tqe_prev = (ccb)->ccb_link.
tqe_prev; else (&sc->sc_ccb_sq)->tqh_last = (ccb)->
ccb_link.tqe_prev; *(ccb)->ccb_link.tqe_prev = (ccb)->ccb_link
.tqe_next; ((ccb)->ccb_link.tqe_prev) = ((void *)-1); ((ccb
)->ccb_link.tqe_next) = ((void *)-1); } while (0)
;
787
788 mtx_leave(&sc->sc_ccb_sqlck);
789
790 DPRINTF("%s: completing desc %d(%llu) op %u with error %d\n",
791 sc->sc_dev.dv_xname, desc, xrd->xrd_rsp.rsp_id,
792 xrd->xrd_rsp.rsp_op, xrd->xrd_rsp.rsp_status);
793
794 memset(xrd, 0, sizeof(*xrd))__builtin_memset((xrd), (0), (sizeof(*xrd)));
795 xrd->xrd_req.req_id = desc;
796
797 if (ccb->ccb_seen != ccb->ccb_want)
798 return;
799
800 if (ccb->ccb_bbuf.dma_size > 0)
801 map = ccb->ccb_bbuf.dma_map;
802 else
803 map = ccb->ccb_dmap;
804
805 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x02 | 0x08))
806 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x02 | 0x08))
;
807 bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map
))
;
808
809 ccb->ccb_xfer->resid = 0;
810 ccb->ccb_xfer->error = error;
811 TAILQ_INSERT_TAIL(cq, ccb, ccb_link)do { (ccb)->ccb_link.tqe_next = ((void *)0); (ccb)->ccb_link
.tqe_prev = (cq)->tqh_last; *(cq)->tqh_last = (ccb); (cq
)->tqh_last = &(ccb)->ccb_link.tqe_next; } while (0
)
;
812}
813
814void
815xbf_scsi_inq(struct scsi_xfer *xs)
816{
817 struct scsi_inquiry *inq = (struct scsi_inquiry *)&xs->cmd;
818
819 if (ISSET(inq->flags, SI_EVPD)((inq->flags) & (0x01)))
820 xbf_scsi_done(xs, XS_DRIVER_STUFFUP2);
821 else
822 xbf_scsi_inquiry(xs);
823}
824
825void
826xbf_scsi_inquiry(struct scsi_xfer *xs)
827{
828 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
829 struct scsi_inquiry_data inq;
830
831 bzero(&inq, sizeof(inq))__builtin_bzero((&inq), (sizeof(inq)));
832
833 switch (sc->sc_type) {
834 case XBF_CDROM1:
835 inq.device = T_CDROM0x05;
836 break;
837 default:
838 inq.device = T_DIRECT0x00;
839 break;
840 }
841
842 inq.version = SCSI_REV_SPC30x05;
843 inq.response_format = SID_SCSI2_RESPONSE0x02;
844 inq.additional_length = SID_SCSI2_ALEN31;
845 inq.flags |= SID_CmdQue0x02;
846 bcopy("Xen ", inq.vendor, sizeof(inq.vendor));
847 bcopy(sc->sc_prod, inq.product, sizeof(inq.product));
848 bcopy("0000", inq.revision, sizeof(inq.revision));
849
850 scsi_copy_internal_data(xs, &inq, sizeof(inq));
851
852 xbf_scsi_done(xs, XS_NOERROR0);
853}
854
855void
856xbf_scsi_capacity(struct scsi_xfer *xs)
857{
858 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
859 struct scsi_read_cap_data rcd;
860 uint64_t capacity;
861
862 bzero(&rcd, sizeof(rcd))__builtin_bzero((&rcd), (sizeof(rcd)));
863
864 /* [addr|length] are sc_block_size. sc->sc_disk_size is XBF_SEC_SIZE. */
865 capacity = (sc->sc_disk_size * XBF_SEC_SIZE(1 << 9)) / sc->sc_block_size - 1;
866 if (capacity > 0xffffffff)
867 capacity = 0xffffffff;
868
869 _lto4b(capacity, rcd.addr);
870 _lto4b(sc->sc_block_size, rcd.length);
871
872 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen)(((sizeof(rcd))<(xs->datalen))?(sizeof(rcd)):(xs->datalen
))
);
873
874 xbf_scsi_done(xs, XS_NOERROR0);
875}
876
877void
878xbf_scsi_capacity16(struct scsi_xfer *xs)
879{
880 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
881 struct scsi_read_cap_data_16 rcd;
882 uint64_t capacity;
883
884 bzero(&rcd, sizeof(rcd))__builtin_bzero((&rcd), (sizeof(rcd)));
885
886 /* [addr|length] are sc_block_size. sc->sc_disk_size is XBF_SEC_SIZE. */
887 capacity = (sc->sc_disk_size * XBF_SEC_SIZE(1 << 9)) / sc->sc_block_size - 1;
888 _lto8b(capacity, rcd.addr);
889 _lto4b(sc->sc_block_size, rcd.length);
890
891 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen)(((sizeof(rcd))<(xs->datalen))?(sizeof(rcd)):(xs->datalen
))
);
892
893 xbf_scsi_done(xs, XS_NOERROR0);
894}
895
896void
897xbf_scsi_done(struct scsi_xfer *xs, int error)
898{
899 int s;
900
901 xs->error = error;
902
903 s = splbio()splraise(0x3);
904 scsi_done(xs);
905 splx(s)spllower(s);
906}
907
908int
909xbf_get_type(struct xbf_softc *sc)
910{
911 unsigned long long res;
912 const char *prop;
913 char val[32];
914 int error;
915
916 prop = "type";
917 if ((error = xs_getprop(sc->sc_parent, sc->sc_backend, prop, val,
918 sizeof(val))) != 0)
919 goto errout;
920 snprintf(sc->sc_prod, sizeof(sc->sc_prod), "%s", val);
921
922 prop = "dev";
923 if ((error = xs_getprop(sc->sc_parent, sc->sc_backend, prop, val,
924 sizeof(val))) != 0)
925 goto errout;
926 snprintf(sc->sc_prod, sizeof(sc->sc_prod), "%s %s", sc->sc_prod, val);
927
928 prop = "virtual-device";
929 if ((error = xs_getnum(sc->sc_parent, sc->sc_node, prop, &res)) != 0)
930 goto errout;
931 sc->sc_unit = (uint32_t)res;
932 snprintf(sc->sc_prod, sizeof(sc->sc_prod), "%s %llu", sc->sc_prod, res);
933
934 prop = "device-type";
935 if ((error = xs_getprop(sc->sc_parent, sc->sc_node, prop,
Although the value stored to 'error' is used in the enclosing expression, the value is never actually read from 'error'
936 sc->sc_dtype, sizeof(sc->sc_dtype))) != 0)
937 goto errout;
938 if (!strcmp(sc->sc_dtype, "cdrom"))
939 sc->sc_type = XBF_CDROM1;
940
941 return (0);
942
943 errout:
944 printf("%s: failed to read \"%s\" property\n", sc->sc_dev.dv_xname,
945 prop);
946 return (-1);
947}
948
949int
950xbf_init(struct xbf_softc *sc)
951{
952 unsigned long long res;
953 const char *action, *prop;
954 char pbuf[sizeof("ring-refXX")];
955 unsigned int i;
956 int error;
957
958 prop = "max-ring-page-order";
959 error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res);
960 if (error == 0)
961 sc->sc_xr_size = 1 << res;
962 if (error == ENOENT2) {
963 prop = "max-ring-pages";
964 error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res);
965 if (error == 0)
966 sc->sc_xr_size = res;
967 }
968 /* Fallback to the known minimum */
969 if (error)
970 sc->sc_xr_size = XBF_MIN_RING_SIZE1;
971
972 if (sc->sc_xr_size < XBF_MIN_RING_SIZE1)
973 sc->sc_xr_size = XBF_MIN_RING_SIZE1;
974 if (sc->sc_xr_size > XBF_MAX_RING_SIZE8)
975 sc->sc_xr_size = XBF_MAX_RING_SIZE8;
976 if (!powerof2(sc->sc_xr_size)((((sc->sc_xr_size)-1)&(sc->sc_xr_size))==0))
977 sc->sc_xr_size = 1 << (fls(sc->sc_xr_size) - 1);
978
979 sc->sc_xr_ndesc = ((sc->sc_xr_size * PAGE_SIZE(1 << 12)) -
980 sizeof(struct xbf_ring)) / sizeof(union xbf_ring_desc);
981 if (!powerof2(sc->sc_xr_ndesc)((((sc->sc_xr_ndesc)-1)&(sc->sc_xr_ndesc))==0))
982 sc->sc_xr_ndesc = 1 << (fls(sc->sc_xr_ndesc) - 1);
983 if (sc->sc_xr_ndesc > XBF_MAX_REQS256)
984 sc->sc_xr_ndesc = XBF_MAX_REQS256;
985
986 DPRINTF("%s: %u ring pages, %d requests\n",
987 sc->sc_dev.dv_xname, sc->sc_xr_size, sc->sc_xr_ndesc);
988
989 if (xbf_ring_create(sc))
990 return (-1);
991
992 action = "set";
993
994 for (i = 0; i < sc->sc_xr_size; i++) {
995 if (i == 0 && sc->sc_xr_size == 1)
996 snprintf(pbuf, sizeof(pbuf), "ring-ref");
997 else
998 snprintf(pbuf, sizeof(pbuf), "ring-ref%d", i);
999 prop = pbuf;
1000 if (xs_setnum(sc->sc_parent, sc->sc_node, prop,
1001 sc->sc_xr_ref[i]))
1002 goto errout;
1003 }
1004
1005 if (sc->sc_xr_size > 1) {
1006 prop = "num-ring-pages";
1007 if (xs_setnum(sc->sc_parent, sc->sc_node, prop,
1008 sc->sc_xr_size))
1009 goto errout;
1010 prop = "ring-page-order";
1011 if (xs_setnum(sc->sc_parent, sc->sc_node, prop,
1012 fls(sc->sc_xr_size) - 1))
1013 goto errout;
1014 }
1015
1016 prop = "event-channel";
1017 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_xih))
1018 goto errout;
1019
1020 prop = "protocol";
1021#ifdef __amd64__1
1022 if (xs_setprop(sc->sc_parent, sc->sc_node, prop, "x86_64-abi",
1023 strlen("x86_64-abi")))
1024 goto errout;
1025#else
1026 if (xs_setprop(sc->sc_parent, sc->sc_node, prop, "x86_32-abi",
1027 strlen("x86_32-abi")))
1028 goto errout;
1029#endif
1030
1031 if (xs_setprop(sc->sc_parent, sc->sc_node, "state",
1032 XEN_STATE_INITIALIZED"3", strlen(XEN_STATE_INITIALIZED"3"))) {
1033 printf("%s: failed to set state to INITIALIZED\n",
1034 sc->sc_dev.dv_xname);
1035 xbf_ring_destroy(sc);
1036 return (-1);
1037 }
1038
1039 if (xs_await_transition(sc->sc_parent, sc->sc_backend, "state",
1040 XEN_STATE_CONNECTED"4", 10000)) {
1041 printf("%s: timed out waiting for backend to connect\n",
1042 sc->sc_dev.dv_xname);
1043 xbf_ring_destroy(sc);
1044 return (-1);
1045 }
1046
1047 action = "read";
1048
1049 prop = "sectors";
1050 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0)
1051 goto errout;
1052 sc->sc_disk_size = res;
1053
1054 prop = "sector-size";
1055 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0)
1056 goto errout;
1057 sc->sc_block_size = res;
1058
1059 prop = "feature-barrier";
1060 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1061 && error != ENOENT2)
1062 goto errout;
1063 if (error == 0 && res == 1)
1064 sc->sc_caps |= XBF_CAP_BARRIER0x0001;
1065
1066 prop = "feature-flush-cache";
1067 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1068 && error != ENOENT2)
1069 goto errout;
1070 if (error == 0 && res == 1)
1071 sc->sc_caps |= XBF_CAP_FLUSH0x0002;
1072
1073#ifdef XBF_DEBUG
1074 if (sc->sc_caps) {
1075 printf("%s: features:", sc->sc_dev.dv_xname);
1076 if (sc->sc_caps & XBF_CAP_BARRIER0x0001)
1077 printf(" BARRIER");
1078 if (sc->sc_caps & XBF_CAP_FLUSH0x0002)
1079 printf(" FLUSH");
1080 printf("\n");
1081 }
1082#endif
1083
1084 if (xs_setprop(sc->sc_parent, sc->sc_node, "state",
1085 XEN_STATE_CONNECTED"4", strlen(XEN_STATE_CONNECTED"4"))) {
1086 printf("%s: failed to set state to CONNECTED\n",
1087 sc->sc_dev.dv_xname);
1088 return (-1);
1089 }
1090
1091 sc->sc_state = XBF_CONNECTED4;
1092
1093 return (0);
1094
1095 errout:
1096 printf("%s: failed to %s \"%s\" property (%d)\n", sc->sc_dev.dv_xname,
1097 action, prop, error);
1098 xbf_ring_destroy(sc);
1099 return (-1);
1100}
1101
1102int
1103xbf_dma_alloc(struct xbf_softc *sc, struct xbf_dma_mem *dma,
1104 bus_size_t size, int nsegs, int mapflags)
1105{
1106 int error;
1107
1108 dma->dma_tag = sc->sc_dmat;
1109
1110 dma->dma_seg = mallocarray(nsegs, sizeof(bus_dma_segment_t), M_DEVBUF2,
1111 M_ZERO0x0008 | M_NOWAIT0x0002);
1112 if (dma->dma_seg == NULL((void *)0)) {
1113 printf("%s: failed to allocate a segment array\n",
1114 sc->sc_dev.dv_xname);
1115 return (ENOMEM12);
1116 }
1117
1118 error = bus_dmamap_create(dma->dma_tag, size, nsegs, PAGE_SIZE, 0,(*(dma->dma_tag)->_dmamap_create)((dma->dma_tag), (size
), (nsegs), ((1 << 12)), (0), (0x0001), (&dma->dma_map
))
1119 BUS_DMA_NOWAIT, &dma->dma_map)(*(dma->dma_tag)->_dmamap_create)((dma->dma_tag), (size
), (nsegs), ((1 << 12)), (0), (0x0001), (&dma->dma_map
))
;
1120 if (error) {
1121 printf("%s: failed to create a memory map (%d)\n",
1122 sc->sc_dev.dv_xname, error);
1123 goto errout;
1124 }
1125
1126 error = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0,(*(dma->dma_tag)->_dmamem_alloc)((dma->dma_tag), (size
), ((1 << 12)), (0), (dma->dma_seg), (nsegs), (&
dma->dma_rsegs), (0x1000 | 0x0001))
1127 dma->dma_seg, nsegs, &dma->dma_rsegs, BUS_DMA_ZERO |(*(dma->dma_tag)->_dmamem_alloc)((dma->dma_tag), (size
), ((1 << 12)), (0), (dma->dma_seg), (nsegs), (&
dma->dma_rsegs), (0x1000 | 0x0001))
1128 BUS_DMA_NOWAIT)(*(dma->dma_tag)->_dmamem_alloc)((dma->dma_tag), (size
), ((1 << 12)), (0), (dma->dma_seg), (nsegs), (&
dma->dma_rsegs), (0x1000 | 0x0001))
;
1129 if (error) {
1130 printf("%s: failed to allocate DMA memory (%d)\n",
1131 sc->sc_dev.dv_xname, error);
1132 goto destroy;
1133 }
1134
1135 error = bus_dmamem_map(dma->dma_tag, dma->dma_seg, dma->dma_rsegs,(*(dma->dma_tag)->_dmamem_map)((dma->dma_tag), (dma->
dma_seg), (dma->dma_rsegs), (size), (&dma->dma_vaddr
), (0x0001))
1136 size, &dma->dma_vaddr, BUS_DMA_NOWAIT)(*(dma->dma_tag)->_dmamem_map)((dma->dma_tag), (dma->
dma_seg), (dma->dma_rsegs), (size), (&dma->dma_vaddr
), (0x0001))
;
1137 if (error) {
1138 printf("%s: failed to map DMA memory (%d)\n",
1139 sc->sc_dev.dv_xname, error);
1140 goto free;
1141 }
1142
1143 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,(*(dma->dma_tag)->_dmamap_load)((dma->dma_tag), (dma
->dma_map), (dma->dma_vaddr), (size), (((void *)0)), (mapflags
| 0x0001))
1144 size, NULL, mapflags | BUS_DMA_NOWAIT)(*(dma->dma_tag)->_dmamap_load)((dma->dma_tag), (dma
->dma_map), (dma->dma_vaddr), (size), (((void *)0)), (mapflags
| 0x0001))
;
1145 if (error) {
1146 printf("%s: failed to load DMA memory (%d)\n",
1147 sc->sc_dev.dv_xname, error);
1148 goto unmap;
1149 }
1150
1151 dma->dma_size = size;
1152 dma->dma_nsegs = nsegs;
1153 return (0);
1154
1155 unmap:
1156 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size)(*(dma->dma_tag)->_dmamem_unmap)((dma->dma_tag), (dma
->dma_vaddr), (size))
;
1157 free:
1158 bus_dmamem_free(dma->dma_tag, dma->dma_seg, dma->dma_rsegs)(*(dma->dma_tag)->_dmamem_free)((dma->dma_tag), (dma
->dma_seg), (dma->dma_rsegs))
;
1159 destroy:
1160 bus_dmamap_destroy(dma->dma_tag, dma->dma_map)(*(dma->dma_tag)->_dmamap_destroy)((dma->dma_tag), (
dma->dma_map))
;
1161 errout:
1162 free(dma->dma_seg, M_DEVBUF2, nsegs * sizeof(bus_dma_segment_t));
1163 dma->dma_map = NULL((void *)0);
1164 dma->dma_tag = NULL((void *)0);
1165 return (error);
1166}
1167
1168void
1169xbf_dma_free(struct xbf_softc *sc, struct xbf_dma_mem *dma)
1170{
1171 if (dma->dma_tag == NULL((void *)0) || dma->dma_map == NULL((void *)0))
1172 return;
1173 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, dma->dma_size,(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma
->dma_map), (0), (dma->dma_size), (0x02 | 0x08))
1174 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(dma->dma_tag)->_dmamap_sync)((dma->dma_tag), (dma
->dma_map), (0), (dma->dma_size), (0x02 | 0x08))
;
1175 bus_dmamap_unload(dma->dma_tag, dma->dma_map)(*(dma->dma_tag)->_dmamap_unload)((dma->dma_tag), (dma
->dma_map))
;
1176 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size)(*(dma->dma_tag)->_dmamem_unmap)((dma->dma_tag), (dma
->dma_vaddr), (dma->dma_size))
;
1177 bus_dmamem_free(dma->dma_tag, dma->dma_seg, dma->dma_rsegs)(*(dma->dma_tag)->_dmamem_free)((dma->dma_tag), (dma
->dma_seg), (dma->dma_rsegs))
;
1178 bus_dmamap_destroy(dma->dma_tag, dma->dma_map)(*(dma->dma_tag)->_dmamap_destroy)((dma->dma_tag), (
dma->dma_map))
;
1179 free(dma->dma_seg, M_DEVBUF2, dma->dma_nsegs * sizeof(bus_dma_segment_t));
1180 dma->dma_seg = NULL((void *)0);
1181 dma->dma_map = NULL((void *)0);
1182 dma->dma_size = 0;
1183}
1184
1185int
1186xbf_ring_create(struct xbf_softc *sc)
1187{
1188 int i;
1189
1190 if (xbf_dma_alloc(sc, &sc->sc_xr_dma, sc->sc_xr_size * PAGE_SIZE(1 << 12),
1191 sc->sc_xr_size, sc->sc_domid << 16))
1192 return (-1);
1193 for (i = 0; i < sc->sc_xr_dma.dma_map->dm_nsegs; i++)
1194 sc->sc_xr_ref[i] = sc->sc_xr_dma.dma_map->dm_segs[i].ds_addr;
1195
1196 sc->sc_xr = (struct xbf_ring *)sc->sc_xr_dma.dma_vaddr;
1197
1198 sc->sc_xr->xr_prod_event = sc->sc_xr->xr_cons_event = 1;
1199
1200 for (i = 0; i < sc->sc_xr_ndesc; i++)
1201 sc->sc_xr->xr_desc[i].xrd_req.req_id = i;
1202
1203 /* The number of contiguous blocks addressable by one descriptor */
1204 sc->sc_xrd_nblk = (PAGE_SIZE(1 << 12) * XBF_MAX_SGE11) / (1 << XBF_SEC_SHIFT9);
1205
1206 if (xbf_alloc_ccbs(sc)) {
1207 xbf_ring_destroy(sc);
1208 return (-1);
1209 }
1210
1211 return (0);
1212}
1213
1214void
1215xbf_ring_destroy(struct xbf_softc *sc)
1216{
1217 xbf_free_ccbs(sc);
1218 xbf_dma_free(sc, &sc->sc_xr_dma);
1219 sc->sc_xr = NULL((void *)0);
1220}
1221
1222void
1223xbf_stop(struct xbf_softc *sc)
1224{
1225 struct xbf_ccb *ccb, *nccb;
1226 bus_dmamap_t map;
1227
1228 bus_dmamap_sync(sc->sc_dmat, sc->sc_xr_dma.dma_map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_xr_dma.dma_map), (0), (sc->sc_xr_dma.dma_map->dm_mapsize
), (0x02 | 0x08))
1229 sc->sc_xr_dma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD |(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_xr_dma.dma_map), (0), (sc->sc_xr_dma.dma_map->dm_mapsize
), (0x02 | 0x08))
1230 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_xr_dma.dma_map), (0), (sc->sc_xr_dma.dma_map->dm_mapsize
), (0x02 | 0x08))
;
1231
1232 TAILQ_FOREACH_SAFE(ccb, &sc->sc_ccb_sq, ccb_link, nccb)for ((ccb) = ((&sc->sc_ccb_sq)->tqh_first); (ccb) !=
((void *)0) && ((nccb) = ((ccb)->ccb_link.tqe_next
), 1); (ccb) = (nccb))
{
1233 TAILQ_REMOVE(&sc->sc_ccb_sq, ccb, ccb_link)do { if (((ccb)->ccb_link.tqe_next) != ((void *)0)) (ccb)->
ccb_link.tqe_next->ccb_link.tqe_prev = (ccb)->ccb_link.
tqe_prev; else (&sc->sc_ccb_sq)->tqh_last = (ccb)->
ccb_link.tqe_prev; *(ccb)->ccb_link.tqe_prev = (ccb)->ccb_link
.tqe_next; ((ccb)->ccb_link.tqe_prev) = ((void *)-1); ((ccb
)->ccb_link.tqe_next) = ((void *)-1); } while (0)
;
1234
1235 if (ccb->ccb_bbuf.dma_size > 0)
1236 map = ccb->ccb_bbuf.dma_map;
1237 else
1238 map = ccb->ccb_dmap;
1239 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x02 | 0x08))
1240 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x02 | 0x08))
;
1241 bus_dmamap_unload(sc->sc_dmat, map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (map
))
;
1242
1243 xbf_reclaim_cmd(ccb->ccb_xfer);
1244 xbf_scsi_done(ccb->ccb_xfer, XS_SELTIMEOUT3);
1245 }
1246
1247 xbf_ring_destroy(sc);
1248}
1249
1250int
1251xbf_alloc_ccbs(struct xbf_softc *sc)
1252{
1253 int i, error;
1254
1255 TAILQ_INIT(&sc->sc_ccb_fq)do { (&sc->sc_ccb_fq)->tqh_first = ((void *)0); (&
sc->sc_ccb_fq)->tqh_last = &(&sc->sc_ccb_fq)
->tqh_first; } while (0)
;
1256 TAILQ_INIT(&sc->sc_ccb_sq)do { (&sc->sc_ccb_sq)->tqh_first = ((void *)0); (&
sc->sc_ccb_sq)->tqh_last = &(&sc->sc_ccb_sq)
->tqh_first; } while (0)
;
1257 mtx_init(&sc->sc_ccb_fqlck, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_ccb_fqlck), ((((0x3)) > 0x0 && ((0x3)) < 0x9
) ? 0x9 : ((0x3)))); } while (0)
;
1258 mtx_init(&sc->sc_ccb_sqlck, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_ccb_sqlck), ((((0x3)) > 0x0 && ((0x3)) < 0x9
) ? 0x9 : ((0x3)))); } while (0)
;
1259
1260 sc->sc_nccb = sc->sc_xr_ndesc / 2;
1261
1262 sc->sc_ccbs = mallocarray(sc->sc_nccb, sizeof(struct xbf_ccb),
1263 M_DEVBUF2, M_ZERO0x0008 | M_NOWAIT0x0002);
1264 if (sc->sc_ccbs == NULL((void *)0)) {
1265 printf("%s: failed to allocate CCBs\n", sc->sc_dev.dv_xname);
1266 return (-1);
1267 }
1268
1269 for (i = 0; i < sc->sc_nccb; i++) {
1270 /*
1271 * Each CCB is set up to use up to 2 descriptors and
1272 * each descriptor can transfer XBF_MAX_SGE number of
1273 * pages.
1274 */
1275 error = bus_dmamap_create(sc->sc_dmat, MAXPHYS, 2 *(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (2 * 11), ((1 << 12)), ((1 << 12)), (0x0001
), (&sc->sc_ccbs[i].ccb_dmap))
1276 XBF_MAX_SGE, PAGE_SIZE, PAGE_SIZE, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (2 * 11), ((1 << 12)), ((1 << 12)), (0x0001
), (&sc->sc_ccbs[i].ccb_dmap))
1277 &sc->sc_ccbs[i].ccb_dmap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (2 * 11), ((1 << 12)), ((1 << 12)), (0x0001
), (&sc->sc_ccbs[i].ccb_dmap))
;
1278 if (error) {
1279 printf("%s: failed to create a memory map for "
1280 "the xfer %d (%d)\n", sc->sc_dev.dv_xname, i,
1281 error);
1282 goto errout;
1283 }
1284
1285 xbf_put_ccb(sc, &sc->sc_ccbs[i]);
1286 }
1287
1288 scsi_iopool_init(&sc->sc_iopool, sc, xbf_get_ccb, xbf_put_ccb);
1289
1290 return (0);
1291
1292 errout:
1293 xbf_free_ccbs(sc);
1294 return (-1);
1295}
1296
1297void
1298xbf_free_ccbs(struct xbf_softc *sc)
1299{
1300 struct xbf_ccb *ccb;
1301 int i;
1302
1303 for (i = 0; i < sc->sc_nccb; i++) {
1304 ccb = &sc->sc_ccbs[i];
1305 if (ccb->ccb_dmap == NULL((void *)0))
1306 continue;
1307 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmap, 0, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmap), (0), (0), (0x02 | 0x08))
1308 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmap), (0), (0), (0x02 | 0x08))
;
1309 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (ccb
->ccb_dmap))
;
1310 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ccb
->ccb_dmap))
;
1311 }
1312
1313 free(sc->sc_ccbs, M_DEVBUF2, sc->sc_nccb * sizeof(struct xbf_ccb));
1314 sc->sc_ccbs = NULL((void *)0);
1315 sc->sc_nccb = 0;
1316}
1317
1318void *
1319xbf_get_ccb(void *xsc)
1320{
1321 struct xbf_softc *sc = xsc;
1322 struct xbf_ccb *ccb;
1323
1324 if (sc->sc_state != XBF_CONNECTED4 &&
1325 sc->sc_state != XBF_CLOSING5)
1326 return (NULL((void *)0));
1327
1328 mtx_enter(&sc->sc_ccb_fqlck);
1329 ccb = TAILQ_FIRST(&sc->sc_ccb_fq)((&sc->sc_ccb_fq)->tqh_first);
1330 if (ccb != NULL((void *)0))
1331 TAILQ_REMOVE(&sc->sc_ccb_fq, ccb, ccb_link)do { if (((ccb)->ccb_link.tqe_next) != ((void *)0)) (ccb)->
ccb_link.tqe_next->ccb_link.tqe_prev = (ccb)->ccb_link.
tqe_prev; else (&sc->sc_ccb_fq)->tqh_last = (ccb)->
ccb_link.tqe_prev; *(ccb)->ccb_link.tqe_prev = (ccb)->ccb_link
.tqe_next; ((ccb)->ccb_link.tqe_prev) = ((void *)-1); ((ccb
)->ccb_link.tqe_next) = ((void *)-1); } while (0)
;
1332 mtx_leave(&sc->sc_ccb_fqlck);
1333
1334 return (ccb);
1335}
1336
1337void
1338xbf_put_ccb(void *xsc, void *io)
1339{
1340 struct xbf_softc *sc = xsc;
1341 struct xbf_ccb *ccb = io;
1342
1343 ccb->ccb_xfer = NULL((void *)0);
1344
1345 mtx_enter(&sc->sc_ccb_fqlck);
1346 TAILQ_INSERT_HEAD(&sc->sc_ccb_fq, ccb, ccb_link)do { if (((ccb)->ccb_link.tqe_next = (&sc->sc_ccb_fq
)->tqh_first) != ((void *)0)) (&sc->sc_ccb_fq)->
tqh_first->ccb_link.tqe_prev = &(ccb)->ccb_link.tqe_next
; else (&sc->sc_ccb_fq)->tqh_last = &(ccb)->
ccb_link.tqe_next; (&sc->sc_ccb_fq)->tqh_first = (ccb
); (ccb)->ccb_link.tqe_prev = &(&sc->sc_ccb_fq)
->tqh_first; } while (0)
;
1347 mtx_leave(&sc->sc_ccb_fqlck);
1348}