Bug Summary

File:dev/ic/qlw.c
Warning:line 231, column 4
Called function pointer is an uninitialized pointer value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name qlw.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/ic/qlw.c
1/* $OpenBSD: qlw.c,v 1.47 2020/09/22 19:32:52 krw Exp $ */
2
3/*
4 * Copyright (c) 2011 David Gwynne <dlg@openbsd.org>
5 * Copyright (c) 2013, 2014 Jonathan Matthew <jmatthew@openbsd.org>
6 * Copyright (c) 2014 Mark Kettenis <kettenis@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <sys/param.h>
22#include <sys/systm.h>
23#include <sys/atomic.h>
24#include <sys/device.h>
25#include <sys/ioctl.h>
26#include <sys/malloc.h>
27#include <sys/kernel.h>
28#include <sys/mutex.h>
29#include <sys/rwlock.h>
30#include <sys/sensors.h>
31#include <sys/queue.h>
32
33#include <machine/bus.h>
34
35#include <scsi/scsi_all.h>
36#include <scsi/scsiconf.h>
37
38#include <dev/ic/qlwreg.h>
39#include <dev/ic/qlwvar.h>
40
41#ifndef SMALL_KERNEL
42#ifndef QLW_DEBUG
43#define QLW_DEBUG
44#endif
45#endif
46
47#ifdef QLW_DEBUG
48#define DPRINTF(m, f...)do { if ((qlwdebug & (m)) == (m)) printf(f...); } while (
0)
do { if ((qlwdebug & (m)) == (m)) printf(f); } \
49 while (0)
50#define QLW_D_MBOX0x01 0x01
51#define QLW_D_INTR0x02 0x02
52#define QLW_D_PORT0x04 0x04
53#define QLW_D_IO0x08 0x08
54#define QLW_D_IOCB0x10 0x10
55int qlwdebug = QLW_D_PORT0x04 | QLW_D_INTR0x02 | QLW_D_MBOX0x01;
56#else
57#define DPRINTF(m, f...)do { if ((qlwdebug & (m)) == (m)) printf(f...); } while (
0)
58#endif
59
60struct cfdriver qlw_cd = {
61 NULL((void *)0),
62 "qlw",
63 DV_DULL
64};
65
66void qlw_scsi_cmd(struct scsi_xfer *);
67
68u_int16_t qlw_read(struct qlw_softc *, bus_size_t);
69void qlw_write(struct qlw_softc *, bus_size_t, u_int16_t);
70void qlw_host_cmd(struct qlw_softc *sc, u_int16_t);
71
72int qlw_mbox(struct qlw_softc *, int, int);
73void qlw_mbox_putaddr(u_int16_t *, struct qlw_dmamem *);
74u_int16_t qlw_read_mbox(struct qlw_softc *, int);
75void qlw_write_mbox(struct qlw_softc *, int, u_int16_t);
76
77int qlw_config_bus(struct qlw_softc *, int);
78int qlw_config_target(struct qlw_softc *, int, int);
79void qlw_update_bus(struct qlw_softc *, int);
80void qlw_update_target(struct qlw_softc *, int, int);
81void qlw_update_task(void *);
82
83void qlw_handle_intr(struct qlw_softc *, u_int16_t, u_int16_t);
84void qlw_set_ints(struct qlw_softc *, int);
85int qlw_read_isr(struct qlw_softc *, u_int16_t *, u_int16_t *);
86void qlw_clear_isr(struct qlw_softc *, u_int16_t);
87
88void qlw_update(struct qlw_softc *, int);
89void qlw_put_marker(struct qlw_softc *, int, void *);
90void qlw_put_cmd(struct qlw_softc *, void *, struct scsi_xfer *,
91 struct qlw_ccb *);
92void qlw_put_cont(struct qlw_softc *, void *, struct scsi_xfer *,
93 struct qlw_ccb *, int);
94struct qlw_ccb *qlw_handle_resp(struct qlw_softc *, u_int16_t);
95void qlw_get_header(struct qlw_softc *, struct qlw_iocb_hdr *,
96 int *, int *);
97void qlw_put_header(struct qlw_softc *, struct qlw_iocb_hdr *,
98 int, int);
99void qlw_put_data_seg(struct qlw_softc *, struct qlw_iocb_seg *,
100 bus_dmamap_t, int);
101
102int qlw_softreset(struct qlw_softc *);
103void qlw_dma_burst_enable(struct qlw_softc *);
104
105int qlw_async(struct qlw_softc *, u_int16_t);
106
107int qlw_load_firmware_words(struct qlw_softc *, const u_int16_t *,
108 u_int16_t);
109int qlw_load_firmware(struct qlw_softc *);
110int qlw_read_nvram(struct qlw_softc *);
111void qlw_parse_nvram_1040(struct qlw_softc *, int);
112void qlw_parse_nvram_1080(struct qlw_softc *, int);
113void qlw_init_defaults(struct qlw_softc *, int);
114
115struct qlw_dmamem *qlw_dmamem_alloc(struct qlw_softc *, size_t);
116void qlw_dmamem_free(struct qlw_softc *, struct qlw_dmamem *);
117
118int qlw_alloc_ccbs(struct qlw_softc *);
119void qlw_free_ccbs(struct qlw_softc *);
120void *qlw_get_ccb(void *);
121void qlw_put_ccb(void *, void *);
122
123#ifdef QLW_DEBUG
124void qlw_dump_iocb(struct qlw_softc *, void *, int);
125void qlw_dump_iocb_segs(struct qlw_softc *, void *, int);
126#else
127#define qlw_dump_iocb(sc, h, fl) do { /* nothing */ } while (0)
128#define qlw_dump_iocb_segs(sc, h, fl) do { /* nothing */ } while (0)
129#endif
130
131static inline int
132qlw_xs_bus(struct qlw_softc *sc, struct scsi_xfer *xs)
133{
134 /*
135 * sc_scsibus[0] == NULL -> bus 0 probing during config_found().
136 * sc_scsibus[0] == xs->sc_link->bus -> bus 0 normal operation.
137 * sc_scsibus[1] == NULL -> bus 1 probing during config_found().
138 * sc_scsibus[1] == xs->sc_link->bus -> bus 1 normal operation.
139 */
140 if ((sc->sc_scsibus[0] == NULL((void *)0)) ||
141 (xs->sc_link->bus == sc->sc_scsibus[0]))
142 return 0;
143 else
144 return 1;
145}
146
147static inline u_int16_t
148qlw_swap16(struct qlw_softc *sc, u_int16_t value)
149{
150 if (sc->sc_isp_gen == QLW_GEN_ISP1000)
151 return htobe16(value)(__uint16_t)(__builtin_constant_p(value) ? (__uint16_t)(((__uint16_t
)(value) & 0xffU) << 8 | ((__uint16_t)(value) &
0xff00U) >> 8) : __swap16md(value))
;
152 else
153 return htole16(value)((__uint16_t)(value));
154}
155
156static inline u_int32_t
157qlw_swap32(struct qlw_softc *sc, u_int32_t value)
158{
159 if (sc->sc_isp_gen == QLW_GEN_ISP1000)
160 return htobe32(value)(__uint32_t)(__builtin_constant_p(value) ? (__uint32_t)(((__uint32_t
)(value) & 0xff) << 24 | ((__uint32_t)(value) &
0xff00) << 8 | ((__uint32_t)(value) & 0xff0000) >>
8 | ((__uint32_t)(value) & 0xff000000) >> 24) : __swap32md
(value))
;
161 else
162 return htole32(value)((__uint32_t)(value));
163}
164
165static inline u_int16_t
166qlw_queue_read(struct qlw_softc *sc, bus_size_t offset)
167{
168 return qlw_read(sc, sc->sc_mbox_base + offset);
169}
170
171static inline void
172qlw_queue_write(struct qlw_softc *sc, bus_size_t offset, u_int16_t value)
173{
174 qlw_write(sc, sc->sc_mbox_base + offset, value);
175}
176
177struct scsi_adapter qlw_switch = {
178 qlw_scsi_cmd, NULL((void *)0), NULL((void *)0), NULL((void *)0), NULL((void *)0)
179};
180
181int
182qlw_attach(struct qlw_softc *sc)
183{
184 struct scsibus_attach_args saa;
185 void (*parse_nvram)(struct qlw_softc *, int);
1
'parse_nvram' declared without an initial value
186 int reset_delay;
187 int bus;
188
189 task_set(&sc->sc_update_task, qlw_update_task, sc);
190
191 switch (sc->sc_isp_gen) {
2
Control jumps to 'case QLW_GEN_ISP1000:' at line 192
192 case QLW_GEN_ISP1000:
193 sc->sc_nvram_size = 0;
194 break;
3
Execution continues on line 213
195 case QLW_GEN_ISP1040:
196 sc->sc_nvram_size = 128;
197 sc->sc_nvram_minversion = 2;
198 parse_nvram = qlw_parse_nvram_1040;
199 break;
200 case QLW_GEN_ISP1080:
201 case QLW_GEN_ISP12160:
202 sc->sc_nvram_size = 256;
203 sc->sc_nvram_minversion = 1;
204 parse_nvram = qlw_parse_nvram_1080;
205 break;
206
207 default:
208 printf("unknown isp type\n");
209 return (ENXIO6);
210 }
211
212 /* after reset, mbox registers 1-3 should contain the string "ISP " */
213 if (qlw_read_mbox(sc, 1) != 0x4953 ||
4
Assuming the condition is true
214 qlw_read_mbox(sc, 2) != 0x5020 ||
215 qlw_read_mbox(sc, 3) != 0x2020) {
216 /* try releasing the risc processor */
217 qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE0x3);
218 }
219
220 qlw_host_cmd(sc, QLW_HOST_CMD_PAUSE0x2);
221 if (qlw_softreset(sc) != 0) {
5
Assuming the condition is false
6
Taking false branch
222 printf("softreset failed\n");
223 return (ENXIO6);
224 }
225
226 for (bus = 0; bus < sc->sc_numbusses; bus++)
7
Assuming 'bus' is >= field 'sc_numbusses'
8
Loop condition is false. Execution continues on line 229
227 qlw_init_defaults(sc, bus);
228
229 if (qlw_read_nvram(sc) == 0) {
9
Assuming the condition is true
10
Taking true branch
230 for (bus = 0; bus < sc->sc_numbusses; bus++)
11
Assuming 'bus' is < field 'sc_numbusses'
12
Loop condition is true. Entering loop body
231 parse_nvram(sc, bus);
13
Called function pointer is an uninitialized pointer value
232 }
233
234#ifndef ISP_NOFIRMWARE
235 if (sc->sc_firmware && qlw_load_firmware(sc)) {
236 printf("firmware load failed\n");
237 return (ENXIO6);
238 }
239#endif
240
241 /* execute firmware */
242 sc->sc_mbox[0] = QLW_MBOX_EXEC_FIRMWARE0x0002;
243 sc->sc_mbox[1] = QLW_CODE_ORG0x1000;
244 if (qlw_mbox(sc, 0x0003, 0x0001)) {
245 printf("ISP couldn't exec firmware: %x\n", sc->sc_mbox[0]);
246 return (ENXIO6);
247 }
248
249 delay(250000)(*delay_func)(250000); /* from isp(4) */
250
251 sc->sc_mbox[0] = QLW_MBOX_ABOUT_FIRMWARE0x0008;
252 if (qlw_mbox(sc, QLW_MBOX_ABOUT_FIRMWARE_IN0x0001,
253 QLW_MBOX_ABOUT_FIRMWARE_OUT0x004f)) {
254 printf("ISP not talking after firmware exec: %x\n",
255 sc->sc_mbox[0]);
256 return (ENXIO6);
257 }
258 /* The ISP1000 firmware we use doesn't return a version number. */
259 if (sc->sc_isp_gen == QLW_GEN_ISP1000 && sc->sc_firmware) {
260 sc->sc_mbox[1] = 1;
261 sc->sc_mbox[2] = 37;
262 sc->sc_mbox[3] = 0;
263 sc->sc_mbox[6] = 0;
264 }
265 printf("%s: firmware rev %d.%d.%d, attrs 0x%x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
266 sc->sc_mbox[1], sc->sc_mbox[2], sc->sc_mbox[3], sc->sc_mbox[6]);
267
268 /* work out how many ccbs to allocate */
269 sc->sc_mbox[0] = QLW_MBOX_GET_FIRMWARE_STATUS0x001F;
270 if (qlw_mbox(sc, 0x0001, 0x0007)) {
271 printf("couldn't get firmware status: %x\n", sc->sc_mbox[0]);
272 return (ENXIO6);
273 }
274 sc->sc_maxrequests = sc->sc_mbox[2];
275 if (sc->sc_maxrequests > 512)
276 sc->sc_maxrequests = 512;
277 for (bus = 0; bus < sc->sc_numbusses; bus++) {
278 if (sc->sc_max_queue_depth[bus] > sc->sc_maxrequests)
279 sc->sc_max_queue_depth[bus] = sc->sc_maxrequests;
280 }
281
282 /*
283 * On some 1020/1040 variants the response queue is limited to
284 * 256 entries. We don't really need all that many anyway.
285 */
286 sc->sc_maxresponses = sc->sc_maxrequests / 2;
287 if (sc->sc_maxresponses < 64)
288 sc->sc_maxresponses = 64;
289
290 /* We may need up to 3 request entries per SCSI command. */
291 sc->sc_maxccbs = sc->sc_maxrequests / 3;
292
293 /* Allegedly the FIFO is busted on the 1040A. */
294 if (sc->sc_isp_type == QLW_ISP1040A)
295 sc->sc_isp_config &= ~QLW_PCI_FIFO_MASK0x0070;
296 qlw_write(sc, QLW_CFG10x06, sc->sc_isp_config);
297
298 if (sc->sc_isp_config & QLW_BURST_ENABLE0x0004)
299 qlw_dma_burst_enable(sc);
300
301 sc->sc_mbox[0] = QLW_MBOX_SET_FIRMWARE_FEATURES0x004a;
302 sc->sc_mbox[1] = 0;
303 if (sc->sc_fw_features & QLW_FW_FEATURE_LVD_NOTIFY0x0002)
304 sc->sc_mbox[1] |= QLW_FW_FEATURE_LVD_NOTIFY0x0002;
305 if (sc->sc_mbox[1] != 0 && qlw_mbox(sc, 0x0003, 0x0001)) {
306 printf("couldn't set firmware features: %x\n", sc->sc_mbox[0]);
307 return (ENXIO6);
308 }
309
310 sc->sc_mbox[0] = QLW_MBOX_SET_CLOCK_RATE0x0034;
311 sc->sc_mbox[1] = sc->sc_clock;
312 if (qlw_mbox(sc, 0x0003, 0x0001)) {
313 printf("couldn't set clock rate: %x\n", sc->sc_mbox[0]);
314 return (ENXIO6);
315 }
316
317 sc->sc_mbox[0] = QLW_MBOX_SET_RETRY_COUNT0x0032;
318 sc->sc_mbox[1] = sc->sc_retry_count[0];
319 sc->sc_mbox[2] = sc->sc_retry_delay[0];
320 sc->sc_mbox[6] = sc->sc_retry_count[1];
321 sc->sc_mbox[7] = sc->sc_retry_delay[1];
322 if (qlw_mbox(sc, 0x00c7, 0x0001)) {
323 printf("couldn't set retry count: %x\n", sc->sc_mbox[0]);
324 return (ENXIO6);
325 }
326
327 sc->sc_mbox[0] = QLW_MBOX_SET_ASYNC_DATA_SETUP0x0036;
328 sc->sc_mbox[1] = sc->sc_async_data_setup[0];
329 sc->sc_mbox[2] = sc->sc_async_data_setup[1];
330 if (qlw_mbox(sc, 0x0007, 0x0001)) {
331 printf("couldn't set async data setup: %x\n", sc->sc_mbox[0]);
332 return (ENXIO6);
333 }
334
335 sc->sc_mbox[0] = QLW_MBOX_SET_ACTIVE_NEGATION0x0035;
336 sc->sc_mbox[1] = sc->sc_req_ack_active_neg[0] << 5;
337 sc->sc_mbox[1] |= sc->sc_data_line_active_neg[0] << 4;
338 sc->sc_mbox[2] = sc->sc_req_ack_active_neg[1] << 5;
339 sc->sc_mbox[2] |= sc->sc_data_line_active_neg[1] << 4;
340 if (qlw_mbox(sc, 0x0007, 0x0001)) {
341 printf("couldn't set active negation: %x\n", sc->sc_mbox[0]);
342 return (ENXIO6);
343 }
344
345 sc->sc_mbox[0] = QLW_MBOX_SET_TAG_AGE_LIMIT0x0033;
346 sc->sc_mbox[1] = sc->sc_tag_age_limit[0];
347 sc->sc_mbox[2] = sc->sc_tag_age_limit[1];
348 if (qlw_mbox(sc, 0x0007, 0x0001)) {
349 printf("couldn't set tag age limit: %x\n", sc->sc_mbox[0]);
350 return (ENXIO6);
351 }
352
353 sc->sc_mbox[0] = QLW_MBOX_SET_SELECTION_TIMEOUT0x0031;
354 sc->sc_mbox[1] = sc->sc_selection_timeout[0];
355 sc->sc_mbox[2] = sc->sc_selection_timeout[1];
356 if (qlw_mbox(sc, 0x0007, 0x0001)) {
357 printf("couldn't set selection timeout: %x\n", sc->sc_mbox[0]);
358 return (ENXIO6);
359 }
360
361 for (bus = 0; bus < sc->sc_numbusses; bus++) {
362 if (qlw_config_bus(sc, bus))
363 return (ENXIO6);
364 }
365
366 if (qlw_alloc_ccbs(sc)) {
367 /* error already printed */
368 return (ENOMEM12);
369 }
370
371 sc->sc_mbox[0] = QLW_MBOX_INIT_REQ_QUEUE0x0010;
372 sc->sc_mbox[1] = sc->sc_maxrequests;
373 qlw_mbox_putaddr(sc->sc_mbox, sc->sc_requests);
374 sc->sc_mbox[4] = 0;
375 if (qlw_mbox(sc, 0x00df, 0x0001)) {
376 printf("couldn't init request queue: %x\n", sc->sc_mbox[0]);
377 goto free_ccbs;
378 }
379
380 sc->sc_mbox[0] = QLW_MBOX_INIT_RSP_QUEUE0x0011;
381 sc->sc_mbox[1] = sc->sc_maxresponses;
382 qlw_mbox_putaddr(sc->sc_mbox, sc->sc_responses);
383 sc->sc_mbox[5] = 0;
384 if (qlw_mbox(sc, 0x00ef, 0x0001)) {
385 printf("couldn't init response queue: %x\n", sc->sc_mbox[0]);
386 goto free_ccbs;
387 }
388
389 reset_delay = 0;
390 for (bus = 0; bus < sc->sc_numbusses; bus++) {
391 sc->sc_mbox[0] = QLW_MBOX_BUS_RESET0x0018;
392 sc->sc_mbox[1] = sc->sc_reset_delay[bus];
393 sc->sc_mbox[2] = bus;
394 if (qlw_mbox(sc, 0x0007, 0x0001)) {
395 printf("couldn't reset bus: %x\n", sc->sc_mbox[0]);
396 goto free_ccbs;
397 }
398 sc->sc_marker_required[bus] = 1;
399 sc->sc_update_required[bus] = 0xffff;
400
401 if (sc->sc_reset_delay[bus] > reset_delay)
402 reset_delay = sc->sc_reset_delay[bus];
403 }
404
405 /* wait for the busses to settle */
406 delay(reset_delay * 1000000)(*delay_func)(reset_delay * 1000000);
407
408 saa.saa_adapter = &qlw_switch;
409 saa.saa_adapter_softc = sc;
410 saa.saa_adapter_buswidth = QLW_MAX_TARGETS16;
411 saa.saa_luns = QLW_MAX_LUNS8;
412 saa.saa_pool = &sc->sc_iopool;
413 saa.saa_quirks = saa.saa_flags = 0;
414 saa.saa_wwpn = saa.saa_wwnn = 0;
415 for (bus = 0; bus < sc->sc_numbusses; bus++) {
416 saa.saa_adapter_target = sc->sc_initiator[bus];
417 saa.saa_openings = sc->sc_max_queue_depth[bus];
418
419 sc->sc_scsibus[bus] = (struct scsibus_softc *)
420 config_found(&sc->sc_dev, &saa, scsiprint)config_found_sm((&sc->sc_dev), (&saa), (scsiprint)
, ((void *)0))
;
421
422 qlw_update_bus(sc, bus);
423 }
424
425 sc->sc_running = 1;
426 return(0);
427
428free_ccbs:
429 qlw_free_ccbs(sc);
430 return (ENXIO6);
431}
432
433int
434qlw_detach(struct qlw_softc *sc, int flags)
435{
436 return (0);
437}
438
439int
440qlw_config_bus(struct qlw_softc *sc, int bus)
441{
442 int target, err;
443
444 sc->sc_mbox[0] = QLW_MBOX_SET_INITIATOR_ID0x0030;
445 sc->sc_mbox[1] = (bus << 7) | sc->sc_initiator[bus];
446
447 if (qlw_mbox(sc, 0x0003, 0x0001)) {
448 printf("couldn't set initiator id: %x\n", sc->sc_mbox[0]);
449 return (ENXIO6);
450 }
451
452 for (target = 0; target < QLW_MAX_TARGETS16; target++) {
453 err = qlw_config_target(sc, bus, target);
454 if (err)
455 return (err);
456 }
457
458 return (0);
459}
460
461int
462qlw_config_target(struct qlw_softc *sc, int bus, int target)
463{
464 int lun;
465
466 sc->sc_mbox[0] = QLW_MBOX_SET_TARGET_PARAMETERS0x0038;
467 sc->sc_mbox[1] = (((bus << 7) | target) << 8);
468 sc->sc_mbox[2] = sc->sc_target[bus][target].qt_params;
469 sc->sc_mbox[2] &= QLW_TARGET_SAFE0xc500;
470 sc->sc_mbox[2] |= QLW_TARGET_NARROW0x0080 | QLW_TARGET_ASYNC0x0040;
471 sc->sc_mbox[3] = 0;
472
473 if (qlw_mbox(sc, 0x000f, 0x0001)) {
474 printf("couldn't set target parameters: %x\n", sc->sc_mbox[0]);
475 return (ENXIO6);
476 }
477
478 for (lun = 0; lun < QLW_MAX_LUNS8; lun++) {
479 sc->sc_mbox[0] = QLW_MBOX_SET_DEVICE_QUEUE0x0039;
480 sc->sc_mbox[1] = (((bus << 7) | target) << 8) | lun;
481 sc->sc_mbox[2] = sc->sc_max_queue_depth[bus];
482 sc->sc_mbox[3] = sc->sc_target[bus][target].qt_exec_throttle;
483 if (qlw_mbox(sc, 0x000f, 0x0001)) {
484 printf("couldn't set lun parameters: %x\n",
485 sc->sc_mbox[0]);
486 return (ENXIO6);
487 }
488 }
489
490 return (0);
491}
492
493void
494qlw_update_bus(struct qlw_softc *sc, int bus)
495{
496 int target;
497
498 for (target = 0; target < QLW_MAX_TARGETS16; target++)
499 qlw_update_target(sc, bus, target);
500}
501
502void
503qlw_update_target(struct qlw_softc *sc, int bus, int target)
504{
505 struct scsi_link *link;
506 int lun;
507
508 if ((sc->sc_update_required[bus] & (1 << target)) == 0)
509 return;
510 atomic_clearbits_intx86_atomic_clearbits_u32(&sc->sc_update_required[bus], (1 << target));
511
512 link = scsi_get_link(sc->sc_scsibus[bus], target, 0);
513 if (link == NULL((void *)0))
514 return;
515
516 sc->sc_mbox[0] = QLW_MBOX_SET_TARGET_PARAMETERS0x0038;
517 sc->sc_mbox[1] = (((bus << 7) | target) << 8);
518 sc->sc_mbox[2] = sc->sc_target[bus][target].qt_params;
519 sc->sc_mbox[2] |= QLW_TARGET_RENEG0x0100;
520 sc->sc_mbox[2] &= ~QLW_TARGET_QFRZ0x0200;
521 if (link->quirks & SDEV_NOSYNC0x0002)
522 sc->sc_mbox[2] &= ~QLW_TARGET_SYNC0x1000;
523 if (link->quirks & SDEV_NOWIDE0x0004)
524 sc->sc_mbox[2] &= ~QLW_TARGET_WIDE0x2000;
525 if (link->quirks & SDEV_NOTAGS0x0008)
526 sc->sc_mbox[2] &= ~QLW_TARGET_TAGS0x0800;
527
528 sc->sc_mbox[3] = sc->sc_target[bus][target].qt_sync_period;
529 sc->sc_mbox[3] |= (sc->sc_target[bus][target].qt_sync_offset << 8);
530
531 if (qlw_mbox(sc, 0x000f, 0x0001)) {
532 printf("couldn't set target parameters: %x\n", sc->sc_mbox[0]);
533 return;
534 }
535
536 /* XXX do PPR detection */
537
538 for (lun = 0; lun < QLW_MAX_LUNS8; lun++) {
539 sc->sc_mbox[0] = QLW_MBOX_SET_DEVICE_QUEUE0x0039;
540 sc->sc_mbox[1] = (((bus << 7) | target) << 8) | lun;
541 sc->sc_mbox[2] = sc->sc_max_queue_depth[bus];
542 sc->sc_mbox[3] = sc->sc_target[bus][target].qt_exec_throttle;
543 if (qlw_mbox(sc, 0x000f, 0x0001)) {
544 printf("couldn't set lun parameters: %x\n",
545 sc->sc_mbox[0]);
546 return;
547 }
548 }
549}
550
551void
552qlw_update_task(void *xsc)
553{
554 struct qlw_softc *sc = xsc;
555 int bus;
556
557 for (bus = 0; bus < sc->sc_numbusses; bus++)
558 qlw_update_bus(sc, bus);
559}
560
561struct qlw_ccb *
562qlw_handle_resp(struct qlw_softc *sc, u_int16_t id)
563{
564 struct qlw_ccb *ccb;
565 struct qlw_iocb_hdr *hdr;
566 struct qlw_iocb_status *status;
567 struct scsi_xfer *xs;
568 u_int32_t handle;
569 int entry_type;
570 int flags;
571 int bus;
572
573 ccb = NULL((void *)0);
574 hdr = QLW_DMA_KVA(sc->sc_responses)((void *)(sc->sc_responses)->qdm_kva) + (id * QLW_QUEUE_ENTRY_SIZE64);
575
576 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_responses)->qdm_map)), (id * 64), (64), (0x02))
577 QLW_DMA_MAP(sc->sc_responses), id * QLW_QUEUE_ENTRY_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_responses)->qdm_map)), (id * 64), (64), (0x02))
578 QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_responses)->qdm_map)), (id * 64), (64), (0x02))
;
579
580 qlw_get_header(sc, hdr, &entry_type, &flags);
581 switch (entry_type) {
582 case QLW_IOCB_STATUS0x03:
583 status = (struct qlw_iocb_status *)hdr;
584 handle = qlw_swap32(sc, status->handle);
585 if (handle > sc->sc_maxccbs) {
586 panic("bad completed command handle: %d (> %d)",
587 handle, sc->sc_maxccbs);
588 }
589
590 ccb = &sc->sc_ccbs[handle];
591 xs = ccb->ccb_xs;
592 if (xs == NULL((void *)0)) {
593 DPRINTF(QLW_D_INTR, "%s: got status for inactive"do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: got status for inactive"
" ccb %d\n", ((sc)->sc_dev.dv_xname), handle); } while (0
)
594 " ccb %d\n", DEVNAME(sc), handle)do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: got status for inactive"
" ccb %d\n", ((sc)->sc_dev.dv_xname), handle); } while (0
)
;
595 qlw_dump_iocb(sc, hdr, QLW_D_INTR0x02);
596 ccb = NULL((void *)0);
597 break;
598 }
599 if (xs->io != ccb) {
600 panic("completed command handle doesn't match xs "
601 "(handle %d, ccb %p, xs->io %p)", handle, ccb,
602 xs->io);
603 }
604
605 if (xs->datalen > 0) {
606 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((xs->
flags & 0x00800) ? 0x02 : 0x08))
607 ccb->ccb_dmamap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((xs->
flags & 0x00800) ? 0x02 : 0x08))
608 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((xs->
flags & 0x00800) ? 0x02 : 0x08))
609 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((xs->
flags & 0x00800) ? 0x02 : 0x08))
;
610 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (ccb
->ccb_dmamap))
;
611 }
612
613 bus = qlw_xs_bus(sc, xs);
614 xs->status = qlw_swap16(sc, status->scsi_status);
615 switch (qlw_swap16(sc, status->completion)) {
616 case QLW_IOCB_STATUS_COMPLETE0x0000:
617 if (qlw_swap16(sc, status->scsi_status) &
618 QLW_SCSI_STATUS_SENSE_VALID0x0200) {
619 memcpy(&xs->sense, status->sense_data,__builtin_memcpy((&xs->sense), (status->sense_data)
, (sizeof(xs->sense)))
620 sizeof(xs->sense))__builtin_memcpy((&xs->sense), (status->sense_data)
, (sizeof(xs->sense)))
;
621 xs->error = XS_SENSE1;
622 } else {
623 xs->error = XS_NOERROR0;
624 }
625 xs->resid = 0;
626 break;
627
628 case QLW_IOCB_STATUS_INCOMPLETE0x0001:
629 if (flags & QLW_STATE_GOT_TARGET0x0200) {
630 xs->error = XS_DRIVER_STUFFUP2;
631 } else {
632 xs->error = XS_SELTIMEOUT3;
633 }
634 break;
635
636 case QLW_IOCB_STATUS_DMA_ERROR0x0002:
637 DPRINTF(QLW_D_INTR, "%s: dma error\n", DEVNAME(sc))do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: dma error\n"
, ((sc)->sc_dev.dv_xname)); } while (0)
;
638 /* set resid apparently? */
639 break;
640
641 case QLW_IOCB_STATUS_RESET0x0004:
642 DPRINTF(QLW_D_INTR, "%s: reset destroyed command\n",do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: reset destroyed command\n"
, ((sc)->sc_dev.dv_xname)); } while (0)
643 DEVNAME(sc))do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: reset destroyed command\n"
, ((sc)->sc_dev.dv_xname)); } while (0)
;
644 sc->sc_marker_required[bus] = 1;
645 xs->error = XS_RESET8;
646 break;
647
648 case QLW_IOCB_STATUS_ABORTED0x0005:
649 DPRINTF(QLW_D_INTR, "%s: aborted\n", DEVNAME(sc))do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: aborted\n"
, ((sc)->sc_dev.dv_xname)); } while (0)
;
650 sc->sc_marker_required[bus] = 1;
651 xs->error = XS_DRIVER_STUFFUP2;
652 break;
653
654 case QLW_IOCB_STATUS_TIMEOUT0x0006:
655 DPRINTF(QLW_D_INTR, "%s: command timed out\n",do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: command timed out\n"
, ((sc)->sc_dev.dv_xname)); } while (0)
656 DEVNAME(sc))do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: command timed out\n"
, ((sc)->sc_dev.dv_xname)); } while (0)
;
657 xs->error = XS_TIMEOUT4;
658 break;
659
660 case QLW_IOCB_STATUS_DATA_OVERRUN0x0007:
661 case QLW_IOCB_STATUS_DATA_UNDERRUN0x0015:
662 xs->resid = qlw_swap32(sc, status->resid);
663 xs->error = XS_NOERROR0;
664 break;
665
666 case QLW_IOCB_STATUS_QUEUE_FULL0x001c:
667 DPRINTF(QLW_D_INTR, "%s: queue full\n", DEVNAME(sc))do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: queue full\n"
, ((sc)->sc_dev.dv_xname)); } while (0)
;
668 xs->error = XS_BUSY5;
669 break;
670
671 case QLW_IOCB_STATUS_WIDE_FAILED0x001f:
672 DPRINTF(QLW_D_INTR, "%s: wide failed\n", DEVNAME(sc))do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: wide failed\n"
, ((sc)->sc_dev.dv_xname)); } while (0)
;
673 xs->sc_link->quirks |= SDEV_NOWIDE0x0004;
674 atomic_setbits_intx86_atomic_setbits_u32(&sc->sc_update_required[bus],
675 1 << xs->sc_link->target);
676 task_add(systq, &sc->sc_update_task);
677 xs->resid = qlw_swap32(sc, status->resid);
678 xs->error = XS_NOERROR0;
679 break;
680
681 case QLW_IOCB_STATUS_SYNCXFER_FAILED0x0020:
682 DPRINTF(QLW_D_INTR, "%s: sync failed\n", DEVNAME(sc))do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: sync failed\n"
, ((sc)->sc_dev.dv_xname)); } while (0)
;
683 xs->sc_link->quirks |= SDEV_NOSYNC0x0002;
684 atomic_setbits_intx86_atomic_setbits_u32(&sc->sc_update_required[bus],
685 1 << xs->sc_link->target);
686 task_add(systq, &sc->sc_update_task);
687 xs->resid = qlw_swap32(sc, status->resid);
688 xs->error = XS_NOERROR0;
689 break;
690
691 default:
692 DPRINTF(QLW_D_INTR, "%s: unexpected completion"do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: unexpected completion"
" status %x\n", ((sc)->sc_dev.dv_xname), qlw_swap16(sc, status
->completion)); } while (0)
693 " status %x\n", DEVNAME(sc),do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: unexpected completion"
" status %x\n", ((sc)->sc_dev.dv_xname), qlw_swap16(sc, status
->completion)); } while (0)
694 qlw_swap16(sc, status->completion))do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: unexpected completion"
" status %x\n", ((sc)->sc_dev.dv_xname), qlw_swap16(sc, status
->completion)); } while (0)
;
695 qlw_dump_iocb(sc, hdr, QLW_D_INTR0x02);
696 xs->error = XS_DRIVER_STUFFUP2;
697 break;
698 }
699 break;
700
701 default:
702 DPRINTF(QLW_D_INTR, "%s: unexpected response entry type %x\n",do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: unexpected response entry type %x\n"
, ((sc)->sc_dev.dv_xname), entry_type); } while (0)
703 DEVNAME(sc), entry_type)do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: unexpected response entry type %x\n"
, ((sc)->sc_dev.dv_xname), entry_type); } while (0)
;
704 qlw_dump_iocb(sc, hdr, QLW_D_INTR0x02);
705 break;
706 }
707
708 return (ccb);
709}
710
711void
712qlw_handle_intr(struct qlw_softc *sc, u_int16_t isr, u_int16_t info)
713{
714 int i;
715 u_int16_t rspin;
716 struct qlw_ccb *ccb;
717
718 switch (isr) {
719 case QLW_INT_TYPE_ASYNC2:
720 qlw_async(sc, info);
721 qlw_clear_isr(sc, isr);
722 break;
723
724 case QLW_INT_TYPE_IO3:
725 qlw_clear_isr(sc, isr);
726 rspin = qlw_queue_read(sc, QLW_RESP_IN0x0a);
727 if (rspin == sc->sc_last_resp_id) {
728 /* seems to happen a lot on 2200s when mbox commands
729 * complete but it doesn't want to give us the register
730 * semaphore, or something.
731 *
732 * if we're waiting on a mailbox command, don't ack
733 * the interrupt yet.
734 */
735 if (sc->sc_mbox_pending) {
736 DPRINTF(QLW_D_MBOX, "%s: ignoring premature"do { if ((qlwdebug & (0x01)) == (0x01)) printf("%s: ignoring premature"
" mbox int\n", ((sc)->sc_dev.dv_xname)); } while (0)
737 " mbox int\n", DEVNAME(sc))do { if ((qlwdebug & (0x01)) == (0x01)) printf("%s: ignoring premature"
" mbox int\n", ((sc)->sc_dev.dv_xname)); } while (0)
;
738 return;
739 }
740
741 break;
742 }
743
744 if (sc->sc_responses == NULL((void *)0))
745 break;
746
747 DPRINTF(QLW_D_IO, "%s: response queue %x=>%x\n",do { if ((qlwdebug & (0x08)) == (0x08)) printf("%s: response queue %x=>%x\n"
, ((sc)->sc_dev.dv_xname), sc->sc_last_resp_id, rspin);
} while (0)
748 DEVNAME(sc), sc->sc_last_resp_id, rspin)do { if ((qlwdebug & (0x08)) == (0x08)) printf("%s: response queue %x=>%x\n"
, ((sc)->sc_dev.dv_xname), sc->sc_last_resp_id, rspin);
} while (0)
;
749
750 do {
751 ccb = qlw_handle_resp(sc, sc->sc_last_resp_id);
752 if (ccb)
753 scsi_done(ccb->ccb_xs);
754
755 sc->sc_last_resp_id++;
756 sc->sc_last_resp_id %= sc->sc_maxresponses;
757 } while (sc->sc_last_resp_id != rspin);
758
759 qlw_queue_write(sc, QLW_RESP_OUT0x0a, rspin);
760 break;
761
762 case QLW_INT_TYPE_MBOX1:
763 if (sc->sc_mbox_pending) {
764 if (info == QLW_MBOX_COMPLETE0x4000) {
765 for (i = 1; i < nitems(sc->sc_mbox)(sizeof((sc->sc_mbox)) / sizeof((sc->sc_mbox)[0])); i++) {
766 sc->sc_mbox[i] = qlw_read_mbox(sc, i);
767 }
768 } else {
769 sc->sc_mbox[0] = info;
770 }
771 wakeup(sc->sc_mbox);
772 } else {
773 DPRINTF(QLW_D_MBOX, "%s: unexpected mbox interrupt:"do { if ((qlwdebug & (0x01)) == (0x01)) printf("%s: unexpected mbox interrupt:"
" %x\n", ((sc)->sc_dev.dv_xname), info); } while (0)
774 " %x\n", DEVNAME(sc), info)do { if ((qlwdebug & (0x01)) == (0x01)) printf("%s: unexpected mbox interrupt:"
" %x\n", ((sc)->sc_dev.dv_xname), info); } while (0)
;
775 }
776 qlw_clear_isr(sc, isr);
777 break;
778
779 default:
780 /* maybe log something? */
781 break;
782 }
783}
784
785int
786qlw_intr(void *xsc)
787{
788 struct qlw_softc *sc = xsc;
789 u_int16_t isr;
790 u_int16_t info;
791
792 if (qlw_read_isr(sc, &isr, &info) == 0)
793 return (0);
794
795 qlw_handle_intr(sc, isr, info);
796 return (1);
797}
798
799void
800qlw_scsi_cmd(struct scsi_xfer *xs)
801{
802 struct scsi_link *link = xs->sc_link;
803 struct qlw_softc *sc = link->bus->sb_adapter_softc;
804 struct qlw_ccb *ccb;
805 struct qlw_iocb_req0 *iocb;
806 struct qlw_ccb_list list;
807 u_int16_t req, rspin;
808 int offset, error, done;
809 bus_dmamap_t dmap;
810 int bus;
811 int seg;
812
813 if (xs->cmdlen > sizeof(iocb->cdb)) {
814 DPRINTF(QLW_D_IO, "%s: cdb too big (%d)\n", DEVNAME(sc),do { if ((qlwdebug & (0x08)) == (0x08)) printf("%s: cdb too big (%d)\n"
, ((sc)->sc_dev.dv_xname), xs->cmdlen); } while (0)
815 xs->cmdlen)do { if ((qlwdebug & (0x08)) == (0x08)) printf("%s: cdb too big (%d)\n"
, ((sc)->sc_dev.dv_xname), xs->cmdlen); } while (0)
;
816 memset(&xs->sense, 0, sizeof(xs->sense))__builtin_memset((&xs->sense), (0), (sizeof(xs->sense
)))
;
817 xs->sense.error_code = SSD_ERRCODE_VALID0x80 | SSD_ERRCODE_CURRENT0x70;
818 xs->sense.flags = SKEY_ILLEGAL_REQUEST0x05;
819 xs->sense.add_sense_code = 0x20;
820 xs->error = XS_SENSE1;
821 scsi_done(xs);
822 return;
823 }
824
825 ccb = xs->io;
826 dmap = ccb->ccb_dmamap;
827 if (xs->datalen > 0) {
828 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), ((xs->flags
& 0x00001) ? 0x0001 : 0x0000))
829 xs->datalen, NULL, (xs->flags & SCSI_NOSLEEP) ?(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), ((xs->flags
& 0x00001) ? 0x0001 : 0x0000))
830 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), ((xs->flags
& 0x00001) ? 0x0001 : 0x0000))
;
831 if (error) {
832 xs->error = XS_DRIVER_STUFFUP2;
833 scsi_done(xs);
834 return;
835 }
836
837 bus_dmamap_sync(sc->sc_dmat, dmap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
838 dmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
839 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
840 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
;
841 }
842
843 mtx_enter(&sc->sc_queue_mtx);
844
845 /* put in a sync marker if required */
846 bus = qlw_xs_bus(sc, xs);
847 if (sc->sc_marker_required[bus]) {
848 req = sc->sc_next_req_id++;
849 if (sc->sc_next_req_id == sc->sc_maxrequests)
850 sc->sc_next_req_id = 0;
851
852 DPRINTF(QLW_D_IO, "%s: writing marker at request %d\n",do { if ((qlwdebug & (0x08)) == (0x08)) printf("%s: writing marker at request %d\n"
, ((sc)->sc_dev.dv_xname), req); } while (0)
853 DEVNAME(sc), req)do { if ((qlwdebug & (0x08)) == (0x08)) printf("%s: writing marker at request %d\n"
, ((sc)->sc_dev.dv_xname), req); } while (0)
;
854 offset = (req * QLW_QUEUE_ENTRY_SIZE64);
855 iocb = QLW_DMA_KVA(sc->sc_requests)((void *)(sc->sc_requests)->qdm_kva) + offset;
856 bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x08))
857 offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x08))
;
858 qlw_put_marker(sc, bus, iocb);
859 bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x04))
860 offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x04))
;
861 qlw_queue_write(sc, QLW_REQ_IN0x08, sc->sc_next_req_id);
862 sc->sc_marker_required[bus] = 0;
863 }
864
865 req = sc->sc_next_req_id++;
866 if (sc->sc_next_req_id == sc->sc_maxrequests)
867 sc->sc_next_req_id = 0;
868
869 offset = (req * QLW_QUEUE_ENTRY_SIZE64);
870 iocb = QLW_DMA_KVA(sc->sc_requests)((void *)(sc->sc_requests)->qdm_kva) + offset;
871 bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x08))
872 QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x08))
;
873
874 ccb->ccb_xs = xs;
875
876 DPRINTF(QLW_D_IO, "%s: writing cmd at request %d\n", DEVNAME(sc), req)do { if ((qlwdebug & (0x08)) == (0x08)) printf("%s: writing cmd at request %d\n"
, ((sc)->sc_dev.dv_xname), req); } while (0)
;
877 qlw_put_cmd(sc, iocb, xs, ccb);
878 seg = QLW_IOCB_SEGS_PER_CMD4;
879
880 bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x04))
881 QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x04))
;
882
883 while (seg < ccb->ccb_dmamap->dm_nsegs) {
884 req = sc->sc_next_req_id++;
885 if (sc->sc_next_req_id == sc->sc_maxrequests)
886 sc->sc_next_req_id = 0;
887
888 offset = (req * QLW_QUEUE_ENTRY_SIZE64);
889 iocb = QLW_DMA_KVA(sc->sc_requests)((void *)(sc->sc_requests)->qdm_kva) + offset;
890 bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x08))
891 QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x08))
;
892
893 DPRINTF(QLW_D_IO, "%s: writing cont at request %d\n", DEVNAME(sc), req)do { if ((qlwdebug & (0x08)) == (0x08)) printf("%s: writing cont at request %d\n"
, ((sc)->sc_dev.dv_xname), req); } while (0)
;
894 qlw_put_cont(sc, iocb, xs, ccb, seg);
895 seg += QLW_IOCB_SEGS_PER_CONT7;
896
897 bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x04))
898 QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x04))
;
899 }
900
901 qlw_queue_write(sc, QLW_REQ_IN0x08, sc->sc_next_req_id);
902
903 if (!ISSET(xs->flags, SCSI_POLL)((xs->flags) & (0x00002))) {
904 mtx_leave(&sc->sc_queue_mtx);
905 return;
906 }
907
908 done = 0;
909 SIMPLEQ_INIT(&list)do { (&list)->sqh_first = ((void *)0); (&list)->
sqh_last = &(&list)->sqh_first; } while (0)
;
910 do {
911 u_int16_t isr, info;
912
913 delay(100)(*delay_func)(100);
914
915 if (qlw_read_isr(sc, &isr, &info) == 0) {
916 continue;
917 }
918
919 if (isr != QLW_INT_TYPE_IO3) {
920 qlw_handle_intr(sc, isr, info);
921 continue;
922 }
923
924 qlw_clear_isr(sc, isr);
925
926 rspin = qlw_queue_read(sc, QLW_RESP_IN0x0a);
927 while (rspin != sc->sc_last_resp_id) {
928 ccb = qlw_handle_resp(sc, sc->sc_last_resp_id);
929
930 sc->sc_last_resp_id++;
931 if (sc->sc_last_resp_id == sc->sc_maxresponses)
932 sc->sc_last_resp_id = 0;
933
934 if (ccb != NULL((void *)0))
935 SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_link)do { (ccb)->ccb_link.sqe_next = ((void *)0); *(&list)->
sqh_last = (ccb); (&list)->sqh_last = &(ccb)->ccb_link
.sqe_next; } while (0)
;
936 if (ccb == xs->io)
937 done = 1;
938 }
939 qlw_queue_write(sc, QLW_RESP_OUT0x0a, rspin);
940 } while (done == 0);
941
942 mtx_leave(&sc->sc_queue_mtx);
943
944 while ((ccb = SIMPLEQ_FIRST(&list)((&list)->sqh_first)) != NULL((void *)0)) {
945 SIMPLEQ_REMOVE_HEAD(&list, ccb_link)do { if (((&list)->sqh_first = (&list)->sqh_first
->ccb_link.sqe_next) == ((void *)0)) (&list)->sqh_last
= &(&list)->sqh_first; } while (0)
;
946 scsi_done(ccb->ccb_xs);
947 }
948}
949
950u_int16_t
951qlw_read(struct qlw_softc *sc, bus_size_t offset)
952{
953 u_int16_t v;
954 v = bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset)((sc->sc_iot)->read_2((sc->sc_ioh), (offset)));
955 bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 2,
956 BUS_SPACE_BARRIER_READ0x01 | BUS_SPACE_BARRIER_WRITE0x02);
957 return (v);
958}
959
960void
961qlw_write(struct qlw_softc *sc, bus_size_t offset, u_int16_t value)
962{
963 bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, value)((sc->sc_iot)->write_2((sc->sc_ioh), (offset), (value
)))
;
964 bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 2,
965 BUS_SPACE_BARRIER_READ0x01 | BUS_SPACE_BARRIER_WRITE0x02);
966}
967
968u_int16_t
969qlw_read_mbox(struct qlw_softc *sc, int mbox)
970{
971 /* could range-check mboxes according to chip type? */
972 return (qlw_read(sc, sc->sc_mbox_base + (mbox * 2)));
973}
974
975void
976qlw_write_mbox(struct qlw_softc *sc, int mbox, u_int16_t value)
977{
978 qlw_write(sc, sc->sc_mbox_base + (mbox * 2), value);
979}
980
981void
982qlw_host_cmd(struct qlw_softc *sc, u_int16_t cmd)
983{
984 qlw_write(sc, sc->sc_host_cmd_ctrl, cmd << QLW_HOST_CMD_SHIFT12);
985}
986
987#define MBOX_COMMAND_TIMEOUT4000 4000
988
989int
990qlw_mbox(struct qlw_softc *sc, int maskin, int maskout)
991{
992 int i;
993 int result = 0;
994 int rv;
995
996 sc->sc_mbox_pending = 1;
997 for (i = 0; i < nitems(sc->sc_mbox)(sizeof((sc->sc_mbox)) / sizeof((sc->sc_mbox)[0])); i++) {
998 if (maskin & (1 << i)) {
999 qlw_write_mbox(sc, i, sc->sc_mbox[i]);
1000 }
1001 }
1002 qlw_host_cmd(sc, QLW_HOST_CMD_SET_HOST_INT0x5);
1003
1004 if (sc->sc_running == 0) {
1005 for (i = 0; i < MBOX_COMMAND_TIMEOUT4000 && result == 0; i++) {
1006 u_int16_t isr, info;
1007
1008 delay(100)(*delay_func)(100);
1009
1010 if (qlw_read_isr(sc, &isr, &info) == 0)
1011 continue;
1012
1013 switch (isr) {
1014 case QLW_INT_TYPE_MBOX1:
1015 result = info;
1016 break;
1017
1018 default:
1019 qlw_handle_intr(sc, isr, info);
1020 break;
1021 }
1022 }
1023 } else {
1024 tsleep_nsec(sc->sc_mbox, PRIBIO16, "qlw_mbox", INFSLP0xffffffffffffffffULL);
1025 result = sc->sc_mbox[0];
1026 }
1027
1028 switch (result) {
1029 case QLW_MBOX_COMPLETE0x4000:
1030 for (i = 1; i < nitems(sc->sc_mbox)(sizeof((sc->sc_mbox)) / sizeof((sc->sc_mbox)[0])); i++) {
1031 sc->sc_mbox[i] = (maskout & (1 << i)) ?
1032 qlw_read_mbox(sc, i) : 0;
1033 }
1034 rv = 0;
1035 break;
1036
1037 case 0:
1038 /* timed out; do something? */
1039 DPRINTF(QLW_D_MBOX, "%s: mbox timed out\n", DEVNAME(sc))do { if ((qlwdebug & (0x01)) == (0x01)) printf("%s: mbox timed out\n"
, ((sc)->sc_dev.dv_xname)); } while (0)
;
1040 rv = 1;
1041 break;
1042
1043 default:
1044 sc->sc_mbox[0] = result;
1045 rv = result;
1046 break;
1047 }
1048
1049 qlw_clear_isr(sc, QLW_INT_TYPE_MBOX1);
1050 sc->sc_mbox_pending = 0;
1051 return (rv);
1052}
1053
1054void
1055qlw_mbox_putaddr(u_int16_t *mbox, struct qlw_dmamem *mem)
1056{
1057 mbox[2] = (QLW_DMA_DVA(mem)((u_int64_t)(mem)->qdm_map->dm_segs[0].ds_addr) >> 16) & 0xffff;
1058 mbox[3] = (QLW_DMA_DVA(mem)((u_int64_t)(mem)->qdm_map->dm_segs[0].ds_addr) >> 0) & 0xffff;
1059 mbox[6] = (QLW_DMA_DVA(mem)((u_int64_t)(mem)->qdm_map->dm_segs[0].ds_addr) >> 48) & 0xffff;
1060 mbox[7] = (QLW_DMA_DVA(mem)((u_int64_t)(mem)->qdm_map->dm_segs[0].ds_addr) >> 32) & 0xffff;
1061}
1062
1063void
1064qlw_set_ints(struct qlw_softc *sc, int enabled)
1065{
1066 u_int16_t v = enabled ? (QLW_INT_REQ0x0002 | QLW_RISC_INT_REQ0x0004) : 0;
1067 qlw_write(sc, QLW_INT_CTRL0x08, v);
1068}
1069
1070int
1071qlw_read_isr(struct qlw_softc *sc, u_int16_t *isr, u_int16_t *info)
1072{
1073 u_int16_t int_status;
1074
1075 if (qlw_read(sc, QLW_SEMA0x0c) & QLW_SEMA_LOCK0x0001) {
1076 *info = qlw_read_mbox(sc, 0);
1077 if (*info & QLW_MBOX_HAS_STATUS0x4000)
1078 *isr = QLW_INT_TYPE_MBOX1;
1079 else
1080 *isr = QLW_INT_TYPE_ASYNC2;
1081 } else {
1082 int_status = qlw_read(sc, QLW_INT_STATUS0x0a);
1083 if ((int_status & (QLW_INT_REQ0x0002 | QLW_RISC_INT_REQ0x0004)) == 0)
1084 return (0);
1085
1086 *isr = QLW_INT_TYPE_IO3;
1087 }
1088
1089 return (1);
1090}
1091
1092void
1093qlw_clear_isr(struct qlw_softc *sc, u_int16_t isr)
1094{
1095 qlw_host_cmd(sc, QLW_HOST_CMD_CLR_RISC_INT0x7);
1096 switch (isr) {
1097 case QLW_INT_TYPE_MBOX1:
1098 case QLW_INT_TYPE_ASYNC2:
1099 qlw_write(sc, QLW_SEMA0x0c, 0);
1100 break;
1101 default:
1102 break;
1103 }
1104}
1105
1106int
1107qlw_softreset(struct qlw_softc *sc)
1108{
1109 int i;
1110
1111 qlw_set_ints(sc, 0);
1112
1113 /* reset */
1114 qlw_write(sc, QLW_INT_CTRL0x08, QLW_RESET0x0001);
1115 delay(100)(*delay_func)(100);
1116 /* clear data and control dma engines? */
1117
1118 /* wait for soft reset to clear */
1119 for (i = 0; i < 1000; i++) {
1120 if ((qlw_read(sc, QLW_INT_CTRL0x08) & QLW_RESET0x0001) == 0)
1121 break;
1122
1123 delay(100)(*delay_func)(100);
1124 }
1125
1126 if (i == 1000) {
1127 DPRINTF(QLW_D_INTR, "%s: reset didn't clear\n", DEVNAME(sc))do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: reset didn't clear\n"
, ((sc)->sc_dev.dv_xname)); } while (0)
;
1128 qlw_set_ints(sc, 0);
1129 return (ENXIO6);
1130 }
1131
1132 qlw_write(sc, QLW_CFG10x06, 0);
1133
1134 /* reset risc processor */
1135 qlw_host_cmd(sc, QLW_HOST_CMD_RESET0x1);
1136 delay(100)(*delay_func)(100);
1137 qlw_write(sc, QLW_SEMA0x0c, 0);
1138 qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE0x3);
1139
1140 /* reset queue pointers */
1141 qlw_queue_write(sc, QLW_REQ_IN0x08, 0);
1142 qlw_queue_write(sc, QLW_REQ_OUT0x08, 0);
1143 qlw_queue_write(sc, QLW_RESP_IN0x0a, 0);
1144 qlw_queue_write(sc, QLW_RESP_OUT0x0a, 0);
1145
1146 qlw_set_ints(sc, 1);
1147 qlw_host_cmd(sc, QLW_HOST_CMD_BIOS0x9);
1148
1149 /* do a basic mailbox operation to check we're alive */
1150 sc->sc_mbox[0] = QLW_MBOX_NOP0x0000;
1151 if (qlw_mbox(sc, 0x0001, 0x0001)) {
1152 DPRINTF(QLW_D_INTR, "%s: ISP not responding after reset\n",do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: ISP not responding after reset\n"
, ((sc)->sc_dev.dv_xname)); } while (0)
1153 DEVNAME(sc))do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: ISP not responding after reset\n"
, ((sc)->sc_dev.dv_xname)); } while (0)
;
1154 return (ENXIO6);
1155 }
1156
1157 return (0);
1158}
1159
1160void
1161qlw_dma_burst_enable(struct qlw_softc *sc)
1162{
1163 if (sc->sc_isp_gen == QLW_GEN_ISP1000 ||
1164 sc->sc_isp_gen == QLW_GEN_ISP1040) {
1165 qlw_write(sc, QLW_CDMA_CFG0x20,
1166 qlw_read(sc, QLW_CDMA_CFG0x20) | QLW_DMA_BURST_ENABLE0x0002);
1167 qlw_write(sc, QLW_DDMA_CFG0x40,
1168 qlw_read(sc, QLW_DDMA_CFG0x40) | QLW_DMA_BURST_ENABLE0x0002);
1169 } else {
1170 qlw_host_cmd(sc, QLW_HOST_CMD_PAUSE0x2);
1171 qlw_write(sc, QLW_CFG10x06,
1172 qlw_read(sc, QLW_CFG10x06) | QLW_DMA_BANK0x0300);
1173 qlw_write(sc, QLW_CDMA_CFG_10800x80,
1174 qlw_read(sc, QLW_CDMA_CFG_10800x80) | QLW_DMA_BURST_ENABLE0x0002);
1175 qlw_write(sc, QLW_DDMA_CFG_10800xa0,
1176 qlw_read(sc, QLW_DDMA_CFG_10800xa0) | QLW_DMA_BURST_ENABLE0x0002);
1177 qlw_write(sc, QLW_CFG10x06,
1178 qlw_read(sc, QLW_CFG10x06) & ~QLW_DMA_BANK0x0300);
1179 qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE0x3);
1180 }
1181}
1182
1183void
1184qlw_update(struct qlw_softc *sc, int task)
1185{
1186 /* do things */
1187}
1188
1189int
1190qlw_async(struct qlw_softc *sc, u_int16_t info)
1191{
1192 int bus;
1193
1194 switch (info) {
1195 case QLW_ASYNC_BUS_RESET0x8001:
1196 DPRINTF(QLW_D_PORT, "%s: bus reset\n", DEVNAME(sc))do { if ((qlwdebug & (0x04)) == (0x04)) printf("%s: bus reset\n"
, ((sc)->sc_dev.dv_xname)); } while (0)
;
1197 bus = qlw_read_mbox(sc, 6);
1198 sc->sc_marker_required[bus] = 1;
1199 break;
1200
1201#if 0
1202 case QLW_ASYNC_SYSTEM_ERROR0x8002:
1203 qla_update(sc, QLW_UPDATE_SOFTRESET);
1204 break;
1205
1206 case QLW_ASYNC_REQ_XFER_ERROR0x8003:
1207 qla_update(sc, QLW_UPDATE_SOFTRESET);
1208 break;
1209
1210 case QLW_ASYNC_RSP_XFER_ERROR0x8004:
1211 qla_update(sc, QLW_UPDATE_SOFTRESET);
1212 break;
1213#endif
1214
1215 case QLW_ASYNC_SCSI_CMD_COMPLETE0x8020:
1216 /* shouldn't happen, we disable fast posting */
1217 break;
1218
1219 case QLW_ASYNC_CTIO_COMPLETE0x8021:
1220 /* definitely shouldn't happen, we don't do target mode */
1221 break;
1222
1223 default:
1224 DPRINTF(QLW_D_INTR, "%s: unknown async %x\n", DEVNAME(sc),do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: unknown async %x\n"
, ((sc)->sc_dev.dv_xname), info); } while (0)
1225 info)do { if ((qlwdebug & (0x02)) == (0x02)) printf("%s: unknown async %x\n"
, ((sc)->sc_dev.dv_xname), info); } while (0)
;
1226 break;
1227 }
1228 return (1);
1229}
1230
1231#ifdef QLW_DEBUG
1232void
1233qlw_dump_iocb(struct qlw_softc *sc, void *buf, int flags)
1234{
1235 u_int8_t *iocb = buf;
1236 int l;
1237 int b;
1238
1239 if ((qlwdebug & flags) == 0)
1240 return;
1241
1242 printf("%s: iocb:\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1243 for (l = 0; l < 4; l++) {
1244 for (b = 0; b < 16; b++) {
1245 printf(" %2.2x", iocb[(l*16)+b]);
1246 }
1247 printf("\n");
1248 }
1249}
1250
1251void
1252qlw_dump_iocb_segs(struct qlw_softc *sc, void *segs, int n)
1253{
1254 u_int8_t *buf = segs;
1255 int s, b;
1256 if ((qlwdebug & QLW_D_IOCB0x10) == 0)
1257 return;
1258
1259 printf("%s: iocb segs:\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1260 for (s = 0; s < n; s++) {
1261 for (b = 0; b < sizeof(struct qlw_iocb_seg); b++) {
1262 printf(" %2.2x", buf[(s*(sizeof(struct qlw_iocb_seg)))
1263 + b]);
1264 }
1265 printf("\n");
1266 }
1267}
1268#endif
1269
1270/*
1271 * The PCI bus is little-endian whereas SBus is big-endian. This
1272 * leads to some differences in byte twisting of DMA transfers of
1273 * request and response queue entries. Most fields can be treated as
1274 * 16-bit or 32-bit with the endianness of the bus, but the header
1275 * fields end up being swapped by the ISP1000's SBus interface.
1276 */
1277
1278void
1279qlw_get_header(struct qlw_softc *sc, struct qlw_iocb_hdr *hdr,
1280 int *type, int *flags)
1281{
1282 if (sc->sc_isp_gen == QLW_GEN_ISP1000) {
1283 *type = hdr->entry_count;
1284 *flags = hdr->seqno;
1285 } else {
1286 *type = hdr->entry_type;
1287 *flags = hdr->flags;
1288 }
1289}
1290
1291void
1292qlw_put_header(struct qlw_softc *sc, struct qlw_iocb_hdr *hdr,
1293 int type, int count)
1294{
1295 if (sc->sc_isp_gen == QLW_GEN_ISP1000) {
1296 hdr->entry_type = count;
1297 hdr->entry_count = type;
1298 hdr->seqno = 0;
1299 hdr->flags = 0;
1300 } else {
1301 hdr->entry_type = type;
1302 hdr->entry_count = count;
1303 hdr->seqno = 0;
1304 hdr->flags = 0;
1305 }
1306}
1307
1308void
1309qlw_put_data_seg(struct qlw_softc *sc, struct qlw_iocb_seg *seg,
1310 bus_dmamap_t dmap, int num)
1311{
1312 seg->seg_addr = qlw_swap32(sc, dmap->dm_segs[num].ds_addr);
1313 seg->seg_len = qlw_swap32(sc, dmap->dm_segs[num].ds_len);
1314}
1315
1316void
1317qlw_put_marker(struct qlw_softc *sc, int bus, void *buf)
1318{
1319 struct qlw_iocb_marker *marker = buf;
1320
1321 qlw_put_header(sc, &marker->hdr, QLW_IOCB_MARKER0x04, 1);
1322
1323 /* could be more specific here; isp(4) isn't */
1324 marker->device = qlw_swap16(sc, (bus << 7) << 8);
1325 marker->modifier = qlw_swap16(sc, QLW_IOCB_MARKER_SYNC_ALL2);
1326 qlw_dump_iocb(sc, buf, QLW_D_IOCB0x10);
1327}
1328
1329void
1330qlw_put_cmd(struct qlw_softc *sc, void *buf, struct scsi_xfer *xs,
1331 struct qlw_ccb *ccb)
1332{
1333 struct qlw_iocb_req0 *req = buf;
1334 int entry_count = 1;
1335 u_int16_t dir;
1336 int seg, nsegs;
1337 int seg_count;
1338 int timeout = 0;
1339 int bus, target, lun;
1340
1341 if (xs->datalen == 0) {
1342 dir = QLW_IOCB_CMD_NO_DATA0x0000;
1343 seg_count = 1;
1344 } else {
1345 dir = xs->flags & SCSI_DATA_IN0x00800 ? QLW_IOCB_CMD_READ_DATA0x0020 :
1346 QLW_IOCB_CMD_WRITE_DATA0x0040;
1347 seg_count = ccb->ccb_dmamap->dm_nsegs;
1348 nsegs = ccb->ccb_dmamap->dm_nsegs - QLW_IOCB_SEGS_PER_CMD4;
1349 while (nsegs > 0) {
1350 entry_count++;
1351 nsegs -= QLW_IOCB_SEGS_PER_CONT7;
1352 }
1353 for (seg = 0; seg < ccb->ccb_dmamap->dm_nsegs; seg++) {
1354 if (seg >= QLW_IOCB_SEGS_PER_CMD4)
1355 break;
1356 qlw_put_data_seg(sc, &req->segs[seg],
1357 ccb->ccb_dmamap, seg);
1358 }
1359 }
1360
1361 if (sc->sc_running && (xs->sc_link->quirks & SDEV_NOTAGS0x0008) == 0)
1362 dir |= QLW_IOCB_CMD_SIMPLE_QUEUE0x0008;
1363
1364 qlw_put_header(sc, &req->hdr, QLW_IOCB_CMD_TYPE_00x01, entry_count);
1365
1366 /*
1367 * timeout is in seconds. make sure it's at least 1 if a timeout
1368 * was specified in xs
1369 */
1370 if (xs->timeout != 0)
1371 timeout = MAX(1, xs->timeout/1000)(((1)>(xs->timeout/1000))?(1):(xs->timeout/1000));
1372
1373 req->flags = qlw_swap16(sc, dir);
1374 req->seg_count = qlw_swap16(sc, seg_count);
1375 req->timeout = qlw_swap16(sc, timeout);
1376
1377 bus = qlw_xs_bus(sc, xs);
1378 target = xs->sc_link->target;
1379 lun = xs->sc_link->lun;
1380 req->device = qlw_swap16(sc, (((bus << 7) | target) << 8) | lun);
1381
1382 memcpy(req->cdb, &xs->cmd, xs->cmdlen)__builtin_memcpy((req->cdb), (&xs->cmd), (xs->cmdlen
))
;
1383 req->ccblen = qlw_swap16(sc, xs->cmdlen);
1384
1385 req->handle = qlw_swap32(sc, ccb->ccb_id);
1386
1387 qlw_dump_iocb(sc, buf, QLW_D_IOCB0x10);
1388}
1389
1390void
1391qlw_put_cont(struct qlw_softc *sc, void *buf, struct scsi_xfer *xs,
1392 struct qlw_ccb *ccb, int seg0)
1393{
1394 struct qlw_iocb_cont0 *cont = buf;
1395 int seg;
1396
1397 qlw_put_header(sc, &cont->hdr, QLW_IOCB_CONT_TYPE_00x02, 1);
1398
1399 for (seg = seg0; seg < ccb->ccb_dmamap->dm_nsegs; seg++) {
1400 if ((seg - seg0) >= QLW_IOCB_SEGS_PER_CONT7)
1401 break;
1402 qlw_put_data_seg(sc, &cont->segs[seg - seg0],
1403 ccb->ccb_dmamap, seg);
1404 }
1405}
1406
1407#ifndef ISP_NOFIRMWARE
1408int
1409qlw_load_firmware_words(struct qlw_softc *sc, const u_int16_t *src,
1410 u_int16_t dest)
1411{
1412 u_int16_t i;
1413
1414 for (i = 0; i < src[3]; i++) {
1415 sc->sc_mbox[0] = QLW_MBOX_WRITE_RAM_WORD0x0004;
1416 sc->sc_mbox[1] = i + dest;
1417 sc->sc_mbox[2] = src[i];
1418 if (qlw_mbox(sc, 0x07, 0x01)) {
1419 printf("firmware load failed\n");
1420 return (1);
1421 }
1422 }
1423
1424 sc->sc_mbox[0] = QLW_MBOX_VERIFY_CSUM0x0007;
1425 sc->sc_mbox[1] = dest;
1426 if (qlw_mbox(sc, 0x0003, 0x0003)) {
1427 printf("verification of chunk at %x failed: %x\n",
1428 dest, sc->sc_mbox[1]);
1429 return (1);
1430 }
1431
1432 return (0);
1433}
1434
1435int
1436qlw_load_firmware(struct qlw_softc *sc)
1437{
1438 return qlw_load_firmware_words(sc, sc->sc_firmware, QLW_CODE_ORG0x1000);
1439}
1440
1441#endif /* !ISP_NOFIRMWARE */
1442
1443int
1444qlw_read_nvram(struct qlw_softc *sc)
1445{
1446 u_int16_t data[sizeof(sc->sc_nvram) >> 1];
1447 u_int16_t req, cmd, val;
1448 u_int8_t csum;
1449 int i, bit;
1450 int reqcmd;
1451 int nbits;
1452
1453 if (sc->sc_nvram_size == 0)
1454 return (1);
1455
1456 if (sc->sc_nvram_size == 128) {
1457 reqcmd = (QLW_NVRAM_CMD_READ6 << 6);
1458 nbits = 8;
1459 } else {
1460 reqcmd = (QLW_NVRAM_CMD_READ6 << 8);
1461 nbits = 10;
1462 }
1463
1464 qlw_write(sc, QLW_NVRAM0x0e, QLW_NVRAM_CHIP_SEL0x0002);
1465 delay(10)(*delay_func)(10);
1466 qlw_write(sc, QLW_NVRAM0x0e, QLW_NVRAM_CHIP_SEL0x0002 | QLW_NVRAM_CLOCK0x0001);
1467 delay(10)(*delay_func)(10);
1468
1469 for (i = 0; i < (sc->sc_nvram_size >> 1); i++) {
1470 req = i | reqcmd;
1471
1472 /* write each bit out through the nvram register */
1473 for (bit = nbits; bit >= 0; bit--) {
1474 cmd = QLW_NVRAM_CHIP_SEL0x0002;
1475 if ((req >> bit) & 1) {
1476 cmd |= QLW_NVRAM_DATA_OUT0x0004;
1477 }
1478 qlw_write(sc, QLW_NVRAM0x0e, cmd);
1479 delay(10)(*delay_func)(10);
1480 qlw_read(sc, QLW_NVRAM0x0e);
1481
1482 qlw_write(sc, QLW_NVRAM0x0e, cmd | QLW_NVRAM_CLOCK0x0001);
1483 delay(10)(*delay_func)(10);
1484 qlw_read(sc, QLW_NVRAM0x0e);
1485
1486 qlw_write(sc, QLW_NVRAM0x0e, cmd);
1487 delay(10)(*delay_func)(10);
1488 qlw_read(sc, QLW_NVRAM0x0e);
1489 }
1490
1491 /* read the result back */
1492 val = 0;
1493 for (bit = 0; bit < 16; bit++) {
1494 val <<= 1;
1495 qlw_write(sc, QLW_NVRAM0x0e, QLW_NVRAM_CHIP_SEL0x0002 |
1496 QLW_NVRAM_CLOCK0x0001);
1497 delay(10)(*delay_func)(10);
1498 if (qlw_read(sc, QLW_NVRAM0x0e) & QLW_NVRAM_DATA_IN0x0008)
1499 val |= 1;
1500 delay(10)(*delay_func)(10);
1501
1502 qlw_write(sc, QLW_NVRAM0x0e, QLW_NVRAM_CHIP_SEL0x0002);
1503 delay(10)(*delay_func)(10);
1504 qlw_read(sc, QLW_NVRAM0x0e);
1505 }
1506
1507 qlw_write(sc, QLW_NVRAM0x0e, 0);
1508 delay(10)(*delay_func)(10);
1509 qlw_read(sc, QLW_NVRAM0x0e);
1510
1511 data[i] = letoh16(val)((__uint16_t)(val));
1512 }
1513
1514 csum = 0;
1515 for (i = 0; i < (sc->sc_nvram_size >> 1); i++) {
1516 csum += data[i] & 0xff;
1517 csum += data[i] >> 8;
1518 }
1519
1520 memcpy(&sc->sc_nvram, data, sizeof(sc->sc_nvram))__builtin_memcpy((&sc->sc_nvram), (data), (sizeof(sc->
sc_nvram)))
;
1521 /* id field should be 'ISP ', version should high enough */
1522 if (sc->sc_nvram.id[0] != 'I' || sc->sc_nvram.id[1] != 'S' ||
1523 sc->sc_nvram.id[2] != 'P' || sc->sc_nvram.id[3] != ' ' ||
1524 sc->sc_nvram.nvram_version < sc->sc_nvram_minversion ||
1525 (csum != 0)) {
1526 printf("%s: nvram corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1527 return (1);
1528 }
1529 return (0);
1530}
1531
1532void
1533qlw_parse_nvram_1040(struct qlw_softc *sc, int bus)
1534{
1535 struct qlw_nvram_1040 *nv = (struct qlw_nvram_1040 *)&sc->sc_nvram;
1536 int target;
1537
1538 KASSERT(bus == 0)((bus == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/ic/qlw.c"
, 1538, "bus == 0"))
;
1539
1540 if (!ISSET(sc->sc_flags, QLW_FLAG_INITIATOR)((sc->sc_flags) & (0x0001)))
1541 sc->sc_initiator[0] = (nv->config1 >> 4);
1542
1543 sc->sc_retry_count[0] = nv->retry_count;
1544 sc->sc_retry_delay[0] = nv->retry_delay;
1545 sc->sc_reset_delay[0] = nv->reset_delay;
1546 sc->sc_tag_age_limit[0] = nv->tag_age_limit;
1547 sc->sc_selection_timeout[0] = letoh16(nv->selection_timeout)((__uint16_t)(nv->selection_timeout));
1548 sc->sc_max_queue_depth[0] = letoh16(nv->max_queue_depth)((__uint16_t)(nv->max_queue_depth));
1549 sc->sc_async_data_setup[0] = (nv->config2 & 0x0f);
1550 sc->sc_req_ack_active_neg[0] = ((nv->config2 & 0x10) >> 4);
1551 sc->sc_data_line_active_neg[0] = ((nv->config2 & 0x20) >> 5);
1552
1553 for (target = 0; target < QLW_MAX_TARGETS16; target++) {
1554 struct qlw_target *qt = &sc->sc_target[0][target];
1555
1556 qt->qt_params = (nv->target[target].parameter << 8);
1557 qt->qt_exec_throttle = nv->target[target].execution_throttle;
1558 qt->qt_sync_period = nv->target[target].sync_period;
1559 qt->qt_sync_offset = nv->target[target].flags & 0x0f;
1560 }
1561}
1562
1563void
1564qlw_parse_nvram_1080(struct qlw_softc *sc, int bus)
1565{
1566 struct qlw_nvram_1080 *nvram = (struct qlw_nvram_1080 *)&sc->sc_nvram;
1567 struct qlw_nvram_bus *nv = &nvram->bus[bus];
1568 int target;
1569
1570 sc->sc_isp_config = nvram->isp_config;
1571 sc->sc_fw_features = nvram->fw_features;
1572
1573 if (!ISSET(sc->sc_flags, QLW_FLAG_INITIATOR)((sc->sc_flags) & (0x0001)))
1574 sc->sc_initiator[bus] = (nv->config1 & 0x0f);
1575
1576 sc->sc_retry_count[bus] = nv->retry_count;
1577 sc->sc_retry_delay[bus] = nv->retry_delay;
1578 sc->sc_reset_delay[bus] = nv->reset_delay;
1579 sc->sc_selection_timeout[bus] = letoh16(nv->selection_timeout)((__uint16_t)(nv->selection_timeout));
1580 sc->sc_max_queue_depth[bus] = letoh16(nv->max_queue_depth)((__uint16_t)(nv->max_queue_depth));
1581 sc->sc_async_data_setup[bus] = (nv->config2 & 0x0f);
1582 sc->sc_req_ack_active_neg[bus] = ((nv->config2 & 0x10) >> 4);
1583 sc->sc_data_line_active_neg[bus] = ((nv->config2 & 0x20) >> 5);
1584
1585 for (target = 0; target < QLW_MAX_TARGETS16; target++) {
1586 struct qlw_target *qt = &sc->sc_target[bus][target];
1587
1588 qt->qt_params = (nv->target[target].parameter << 8);
1589 qt->qt_exec_throttle = nv->target[target].execution_throttle;
1590 qt->qt_sync_period = nv->target[target].sync_period;
1591 if (sc->sc_isp_gen == QLW_GEN_ISP12160)
1592 qt->qt_sync_offset = nv->target[target].flags & 0x1f;
1593 else
1594 qt->qt_sync_offset = nv->target[target].flags & 0x0f;
1595 }
1596}
1597
1598void
1599qlw_init_defaults(struct qlw_softc *sc, int bus)
1600{
1601 int target;
1602
1603 switch (sc->sc_isp_gen) {
1604 case QLW_GEN_ISP1000:
1605 break;
1606 case QLW_GEN_ISP1040:
1607 sc->sc_isp_config = QLW_BURST_ENABLE0x0004 | QLW_PCI_FIFO_640x0030;
1608 break;
1609 case QLW_GEN_ISP1080:
1610 case QLW_GEN_ISP12160:
1611 sc->sc_isp_config = QLW_BURST_ENABLE0x0004 | QLW_PCI_FIFO_1280x0040;
1612 sc->sc_fw_features = QLW_FW_FEATURE_LVD_NOTIFY0x0002;
1613 break;
1614 }
1615
1616 sc->sc_retry_count[bus] = 0;
1617 sc->sc_retry_delay[bus] = 0;
1618 sc->sc_reset_delay[bus] = 3;
1619 sc->sc_tag_age_limit[bus] = 8;
1620 sc->sc_selection_timeout[bus] = 250;
1621 sc->sc_max_queue_depth[bus] = 32;
1622 if (sc->sc_clock > 40)
1623 sc->sc_async_data_setup[bus] = 9;
1624 else
1625 sc->sc_async_data_setup[bus] = 6;
1626 sc->sc_req_ack_active_neg[bus] = 1;
1627 sc->sc_data_line_active_neg[bus] = 1;
1628
1629 for (target = 0; target < QLW_MAX_TARGETS16; target++) {
1630 struct qlw_target *qt = &sc->sc_target[bus][target];
1631
1632 qt->qt_params = QLW_TARGET_DEFAULT0xfd00;
1633 qt->qt_exec_throttle = 16;
1634 qt->qt_sync_period = 10;
1635 qt->qt_sync_offset = 12;
1636 }
1637}
1638
1639struct qlw_dmamem *
1640qlw_dmamem_alloc(struct qlw_softc *sc, size_t size)
1641{
1642 struct qlw_dmamem *m;
1643 int nsegs;
1644
1645 m = malloc(sizeof(*m), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
1646 if (m == NULL((void *)0))
1647 return (NULL((void *)0));
1648
1649 m->qdm_size = size;
1650
1651 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0001 | 0x0002), (&m->qdm_map))
1652 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->qdm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0001 | 0x0002), (&m->qdm_map))
!= 0)
1653 goto qdmfree;
1654
1655 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->qdm_seg, 1,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&m->qdm_seg), (1), (&nsegs
), (0x0001 | 0x1000))
1656 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&m->qdm_seg), (1), (&nsegs
), (0x0001 | 0x1000))
!= 0)
1657 goto destroy;
1658
1659 if (bus_dmamem_map(sc->sc_dmat, &m->qdm_seg, nsegs, size, &m->qdm_kva,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&m
->qdm_seg), (nsegs), (size), (&m->qdm_kva), (0x0001
))
1660 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&m
->qdm_seg), (nsegs), (size), (&m->qdm_kva), (0x0001
))
!= 0)
1661 goto free;
1662
1663 if (bus_dmamap_load(sc->sc_dmat, m->qdm_map, m->qdm_kva, size, NULL,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (m->
qdm_map), (m->qdm_kva), (size), (((void *)0)), (0x0001))
1664 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (m->
qdm_map), (m->qdm_kva), (size), (((void *)0)), (0x0001))
!= 0)
1665 goto unmap;
1666
1667 return (m);
1668
1669unmap:
1670 bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (m->
qdm_kva), (m->qdm_size))
;
1671free:
1672 bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
m->qdm_seg), (1))
;
1673destroy:
1674 bus_dmamap_destroy(sc->sc_dmat, m->qdm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (m->
qdm_map))
;
1675qdmfree:
1676 free(m, M_DEVBUF2, sizeof(*m));
1677
1678 return (NULL((void *)0));
1679}
1680
1681void
1682qlw_dmamem_free(struct qlw_softc *sc, struct qlw_dmamem *m)
1683{
1684 bus_dmamap_unload(sc->sc_dmat, m->qdm_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (m->
qdm_map))
;
1685 bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (m->
qdm_kva), (m->qdm_size))
;
1686 bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
m->qdm_seg), (1))
;
1687 bus_dmamap_destroy(sc->sc_dmat, m->qdm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (m->
qdm_map))
;
1688 free(m, M_DEVBUF2, sizeof(*m));
1689}
1690
1691int
1692qlw_alloc_ccbs(struct qlw_softc *sc)
1693{
1694 struct qlw_ccb *ccb;
1695 u_int8_t *cmd;
1696 int i;
1697
1698 SIMPLEQ_INIT(&sc->sc_ccb_free)do { (&sc->sc_ccb_free)->sqh_first = ((void *)0); (
&sc->sc_ccb_free)->sqh_last = &(&sc->sc_ccb_free
)->sqh_first; } while (0)
;
1699 mtx_init(&sc->sc_ccb_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_ccb_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9) ?
0x9 : ((0x6)))); } while (0)
;
1700 mtx_init(&sc->sc_queue_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_queue_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9
) ? 0x9 : ((0x6)))); } while (0)
;
1701
1702 sc->sc_ccbs = mallocarray(sc->sc_maxccbs, sizeof(struct qlw_ccb),
1703 M_DEVBUF2, M_WAITOK0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008);
1704 if (sc->sc_ccbs == NULL((void *)0)) {
1705 printf("%s: unable to allocate ccbs\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1706 return (1);
1707 }
1708
1709 sc->sc_requests = qlw_dmamem_alloc(sc, sc->sc_maxrequests *
1710 QLW_QUEUE_ENTRY_SIZE64);
1711 if (sc->sc_requests == NULL((void *)0)) {
1712 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1713 goto free_ccbs;
1714 }
1715 sc->sc_responses = qlw_dmamem_alloc(sc, sc->sc_maxresponses *
1716 QLW_QUEUE_ENTRY_SIZE64);
1717 if (sc->sc_responses == NULL((void *)0)) {
1718 printf("%s: unable to allocate rcb dmamem\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1719 goto free_req;
1720 }
1721
1722 cmd = QLW_DMA_KVA(sc->sc_requests)((void *)(sc->sc_requests)->qdm_kva);
1723 memset(cmd, 0, QLW_QUEUE_ENTRY_SIZE * sc->sc_maxccbs)__builtin_memset((cmd), (0), (64 * sc->sc_maxccbs));
1724 for (i = 0; i < sc->sc_maxccbs; i++) {
1725 ccb = &sc->sc_ccbs[i];
1726
1727 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (16), ((64 * 1024)), (0), (0x0001 | 0x0002), (&
ccb->ccb_dmamap))
1728 QLW_MAX_SEGS, MAXPHYS, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (16), ((64 * 1024)), (0), (0x0001 | 0x0002), (&
ccb->ccb_dmamap))
1729 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (16), ((64 * 1024)), (0), (0x0001 | 0x0002), (&
ccb->ccb_dmamap))
1730 &ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (16), ((64 * 1024)), (0), (0x0001 | 0x0002), (&
ccb->ccb_dmamap))
!= 0) {
1731 printf("%s: unable to create dma map\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1732 goto free_maps;
1733 }
1734
1735 ccb->ccb_sc = sc;
1736 ccb->ccb_id = i;
1737
1738 qlw_put_ccb(sc, ccb);
1739 }
1740
1741 scsi_iopool_init(&sc->sc_iopool, sc, qlw_get_ccb, qlw_put_ccb);
1742 return (0);
1743
1744free_maps:
1745 while ((ccb = qlw_get_ccb(sc)) != NULL((void *)0))
1746 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ccb
->ccb_dmamap))
;
1747
1748 qlw_dmamem_free(sc, sc->sc_responses);
1749free_req:
1750 qlw_dmamem_free(sc, sc->sc_requests);
1751free_ccbs:
1752 free(sc->sc_ccbs, M_DEVBUF2, 0);
1753
1754 return (1);
1755}
1756
1757void
1758qlw_free_ccbs(struct qlw_softc *sc)
1759{
1760 struct qlw_ccb *ccb;
1761
1762 scsi_iopool_destroy(&sc->sc_iopool);
1763 while ((ccb = qlw_get_ccb(sc)) != NULL((void *)0))
1764 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ccb
->ccb_dmamap))
;
1765 qlw_dmamem_free(sc, sc->sc_responses);
1766 qlw_dmamem_free(sc, sc->sc_requests);
1767 free(sc->sc_ccbs, M_DEVBUF2, 0);
1768}
1769
1770void *
1771qlw_get_ccb(void *xsc)
1772{
1773 struct qlw_softc *sc = xsc;
1774 struct qlw_ccb *ccb;
1775
1776 mtx_enter(&sc->sc_ccb_mtx);
1777 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free)((&sc->sc_ccb_free)->sqh_first);
1778 if (ccb != NULL((void *)0)) {
1779 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link)do { if (((&sc->sc_ccb_free)->sqh_first = (&sc->
sc_ccb_free)->sqh_first->ccb_link.sqe_next) == ((void *
)0)) (&sc->sc_ccb_free)->sqh_last = &(&sc->
sc_ccb_free)->sqh_first; } while (0)
;
1780 }
1781 mtx_leave(&sc->sc_ccb_mtx);
1782 return (ccb);
1783}
1784
1785void
1786qlw_put_ccb(void *xsc, void *io)
1787{
1788 struct qlw_softc *sc = xsc;
1789 struct qlw_ccb *ccb = io;
1790
1791 ccb->ccb_xs = NULL((void *)0);
1792 mtx_enter(&sc->sc_ccb_mtx);
1793 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link)do { if (((ccb)->ccb_link.sqe_next = (&sc->sc_ccb_free
)->sqh_first) == ((void *)0)) (&sc->sc_ccb_free)->
sqh_last = &(ccb)->ccb_link.sqe_next; (&sc->sc_ccb_free
)->sqh_first = (ccb); } while (0)
;
1794 mtx_leave(&sc->sc_ccb_mtx);
1795}