Bug Summary

File:dev/ic/qla.c
Warning:line 1576, column 14
Assigned value is garbage or undefined

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name qla.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/ic/qla.c
1/* $OpenBSD: qla.c,v 1.68 2020/09/22 19:32:52 krw Exp $ */
2
3/*
4 * Copyright (c) 2011 David Gwynne <dlg@openbsd.org>
5 * Copyright (c) 2013, 2014 Jonathan Matthew <jmatthew@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#include <sys/param.h>
21#include <sys/systm.h>
22#include <sys/atomic.h>
23#include <sys/device.h>
24#include <sys/ioctl.h>
25#include <sys/malloc.h>
26#include <sys/kernel.h>
27#include <sys/mutex.h>
28#include <sys/rwlock.h>
29#include <sys/sensors.h>
30#include <sys/queue.h>
31
32#include <machine/bus.h>
33
34#include <scsi/scsi_all.h>
35#include <scsi/scsiconf.h>
36
37#include <dev/ic/qlareg.h>
38#include <dev/ic/qlavar.h>
39
40#ifdef QLA_DEBUG
41#define DPRINTF(m, f...) do { if ((qladebug & (m)) == (m)) printf(f); } \
42 while (0)
43#define QLA_D_MBOX 0x01
44#define QLA_D_INTR 0x02
45#define QLA_D_PORT 0x04
46#define QLA_D_IO 0x08
47#define QLA_D_IOCB 0x10
48int qladebug = QLA_D_PORT;
49#else
50#define DPRINTF(m, f...)
51#endif
52
53
54#ifndef ISP_NOFIRMWARE
55#include <dev/microcode/isp/asm_2100.h>
56#include <dev/microcode/isp/asm_2200.h>
57#include <dev/microcode/isp/asm_2300.h>
58#endif
59
60struct cfdriver qla_cd = {
61 NULL((void *)0),
62 "qla",
63 DV_DULL
64};
65
66void qla_scsi_cmd(struct scsi_xfer *);
67int qla_scsi_probe(struct scsi_link *);
68
69u_int16_t qla_read(struct qla_softc *, bus_size_t);
70void qla_write(struct qla_softc *, bus_size_t, u_int16_t);
71void qla_host_cmd(struct qla_softc *sc, u_int16_t);
72
73u_int16_t qla_read_queue_2100(struct qla_softc *, bus_size_t);
74
75int qla_mbox(struct qla_softc *, int);
76int qla_sns_req(struct qla_softc *, struct qla_dmamem *, int);
77void qla_mbox_putaddr(u_int16_t *, struct qla_dmamem *);
78u_int16_t qla_read_mbox(struct qla_softc *, int);
79void qla_write_mbox(struct qla_softc *, int, u_int16_t);
80
81void qla_handle_intr(struct qla_softc *, u_int16_t, u_int16_t);
82void qla_set_ints(struct qla_softc *, int);
83int qla_read_isr_1G(struct qla_softc *, u_int16_t *, u_int16_t *);
84int qla_read_isr_2G(struct qla_softc *, u_int16_t *, u_int16_t *);
85void qla_clear_isr(struct qla_softc *, u_int16_t);
86
87void qla_update_start(struct qla_softc *, int);
88void qla_update_done(struct qla_softc *, int);
89void qla_do_update(void *);
90
91void qla_put_marker(struct qla_softc *, void *);
92void qla_put_cmd(struct qla_softc *, void *, struct scsi_xfer *,
93 struct qla_ccb *);
94struct qla_ccb *qla_handle_resp(struct qla_softc *, u_int16_t);
95
96int qla_get_port_name_list(struct qla_softc *, u_int32_t);
97struct qla_fc_port *qla_next_fabric_port(struct qla_softc *, u_int32_t *,
98 u_int32_t *);
99int qla_get_port_db(struct qla_softc *c, u_int16_t,
100 struct qla_dmamem *);
101int qla_add_loop_port(struct qla_softc *, struct qla_fc_port *);
102int qla_add_fabric_port(struct qla_softc *, struct qla_fc_port *);
103int qla_add_logged_in_port(struct qla_softc *, int, u_int32_t);
104int qla_classify_port(struct qla_softc *, u_int32_t, u_int64_t,
105 u_int64_t, struct qla_fc_port **);
106int qla_get_loop_id(struct qla_softc *sc, int);
107void qla_clear_port_lists(struct qla_softc *);
108int qla_softreset(struct qla_softc *);
109void qla_update_topology(struct qla_softc *);
110int qla_update_fabric(struct qla_softc *);
111int qla_fabric_plogi(struct qla_softc *, struct qla_fc_port *);
112void qla_fabric_plogo(struct qla_softc *, struct qla_fc_port *);
113
114void qla_update_start(struct qla_softc *, int);
115int qla_async(struct qla_softc *, u_int16_t);
116
117int qla_verify_firmware(struct qla_softc *sc, u_int16_t);
118int qla_load_firmware_words(struct qla_softc *, const u_int16_t *,
119 u_int16_t);
120int qla_load_firmware_2100(struct qla_softc *);
121int qla_load_firmware_2200(struct qla_softc *);
122int qla_load_fwchunk_2300(struct qla_softc *,
123 struct qla_dmamem *, const u_int16_t *, u_int32_t);
124int qla_load_firmware_2300(struct qla_softc *);
125int qla_load_firmware_2322(struct qla_softc *);
126int qla_read_nvram(struct qla_softc *);
127
128struct qla_dmamem *qla_dmamem_alloc(struct qla_softc *, size_t);
129void qla_dmamem_free(struct qla_softc *, struct qla_dmamem *);
130
131int qla_alloc_ccbs(struct qla_softc *);
132void qla_free_ccbs(struct qla_softc *);
133void *qla_get_ccb(void *);
134void qla_put_ccb(void *, void *);
135
136void qla_dump_iocb(struct qla_softc *, void *);
137void qla_dump_iocb_segs(struct qla_softc *, void *, int);
138
139static const struct qla_regs qla_regs_2100 = {
140 qla_read_queue_2100,
141 qla_read_isr_1G,
142 QLA_MBOX_BASE_21000x10 + 0x8,
143 QLA_MBOX_BASE_21000x10 + 0x8,
144 QLA_MBOX_BASE_21000x10 + 0xa,
145 QLA_MBOX_BASE_21000x10 + 0xa
146};
147
148static const struct qla_regs qla_regs_2200 = {
149 qla_read,
150 qla_read_isr_1G,
151 QLA_MBOX_BASE_22000x10 + 0x8,
152 QLA_MBOX_BASE_22000x10 + 0x8,
153 QLA_MBOX_BASE_22000x10 + 0xa,
154 QLA_MBOX_BASE_22000x10 + 0xa
155};
156
157static const struct qla_regs qla_regs_23XX = {
158 qla_read,
159 qla_read_isr_2G,
160 QLA_REQ_IN0x10,
161 QLA_REQ_OUT0x12,
162 QLA_RESP_IN0x14,
163 QLA_RESP_OUT0x16
164};
165
166#define qla_queue_read(_sc, _r)((*(_sc)->sc_regs->read)((_sc), (_r))) ((*(_sc)->sc_regs->read)((_sc), (_r)))
167#define qla_queue_write(_sc, _r, _v)qla_write((_sc), (_r), (_v)) qla_write((_sc), (_r), (_v))
168
169#define qla_read_isr(_sc, _isr, _info)((*(_sc)->sc_regs->read_isr)((_sc), (_isr), (_info))) \
170 ((*(_sc)->sc_regs->read_isr)((_sc), (_isr), (_info)))
171
172struct scsi_adapter qla_switch = {
173 qla_scsi_cmd, NULL((void *)0), qla_scsi_probe, NULL((void *)0), NULL((void *)0)
174};
175
176int
177qla_classify_port(struct qla_softc *sc, u_int32_t location,
178 u_int64_t port_name, u_int64_t node_name, struct qla_fc_port **prev)
179{
180 struct qla_fc_port *port, *locmatch, *wwnmatch;
181 locmatch = NULL((void *)0);
182 wwnmatch = NULL((void *)0);
183
184 /* make sure we don't try to add a port or location twice */
185 TAILQ_FOREACH(port, &sc->sc_ports_new, update)for((port) = ((&sc->sc_ports_new)->tqh_first); (port
) != ((void *)0); (port) = ((port)->update.tqe_next))
{
186 if ((port->port_name == port_name &&
187 port->node_name == node_name) ||
188 port->location == location) {
189 *prev = port;
190 return (QLA_PORT_DISP_DUP);
191 }
192 }
193
194 /* if we're attaching, everything is new */
195 if (sc->sc_scsibus == NULL((void *)0)) {
196 *prev = NULL((void *)0);
197 return (QLA_PORT_DISP_NEW);
198 }
199
200 TAILQ_FOREACH(port, &sc->sc_ports, ports)for((port) = ((&sc->sc_ports)->tqh_first); (port) !=
((void *)0); (port) = ((port)->ports.tqe_next))
{
201 if (port->location == location)
202 locmatch = port;
203
204 if (port->port_name == port_name &&
205 port->node_name == node_name)
206 wwnmatch = port;
207 }
208
209 if (locmatch == NULL((void *)0) && wwnmatch == NULL((void *)0)) {
210 *prev = NULL((void *)0);
211 return (QLA_PORT_DISP_NEW);
212 } else if (locmatch == wwnmatch) {
213 *prev = locmatch;
214 return (QLA_PORT_DISP_SAME);
215 } else if (wwnmatch != NULL((void *)0)) {
216 *prev = wwnmatch;
217 return (QLA_PORT_DISP_MOVED);
218 } else {
219 *prev = locmatch;
220 return (QLA_PORT_DISP_CHANGED);
221 }
222}
223
224int
225qla_get_loop_id(struct qla_softc *sc, int start)
226{
227 int i, last;
228
229 if (sc->sc_2k_logins) {
230 i = QLA_2KL_MIN_HANDLE0x81;
231 last = QLA_2KL_MAX_HANDLE0x7EF;
232 } else {
233 /* if we're an F port, we can have two ranges, but meh */
234 i = QLA_MIN_HANDLE0x81;
235 last = QLA_MAX_HANDLE0xFE;
236 }
237 if (i < start)
238 i = start;
239
240 for (; i <= last; i++) {
241 if (sc->sc_targets[i] == NULL((void *)0))
242 return (i);
243 }
244
245 return (-1);
246}
247
248int
249qla_get_port_db(struct qla_softc *sc, u_int16_t loopid, struct qla_dmamem *mem)
250{
251 sc->sc_mbox[0] = QLA_MBOX_GET_PORT_DB0x0064;
252 if (sc->sc_2k_logins) {
253 sc->sc_mbox[1] = loopid;
254 } else {
255 sc->sc_mbox[1] = loopid << 8;
256 }
257
258 memset(QLA_DMA_KVA(mem), 0, sizeof(struct qla_get_port_db))__builtin_memset((((void *)(mem)->qdm_kva)), (0), (sizeof(
struct qla_get_port_db)))
;
259 qla_mbox_putaddr(sc->sc_mbox, mem);
260 bus_dmamap_sync(sc->sc_dmat, QLA_DMA_MAP(mem), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (sizeof(struct qla_get_port_db)), (0x01
))
261 sizeof(struct qla_get_port_db), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (sizeof(struct qla_get_port_db)), (0x01
))
;
262 if (qla_mbox(sc, 0x00cf)) {
263 DPRINTF(QLA_D_PORT, "%s: get port db %d failed: %x\n",
264 DEVNAME(sc), loopid, sc->sc_mbox[0]);
265 return (1);
266 }
267
268 bus_dmamap_sync(sc->sc_dmat, QLA_DMA_MAP(mem), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (sizeof(struct qla_get_port_db)), (0x02
))
269 sizeof(struct qla_get_port_db), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (sizeof(struct qla_get_port_db)), (0x02
))
;
270 return (0);
271}
272
273int
274qla_add_loop_port(struct qla_softc *sc, struct qla_fc_port *port)
275{
276 struct qla_get_port_db *pdb;
277 struct qla_fc_port *pport = NULL((void *)0);
278 int disp;
279
280 if (qla_get_port_db(sc, port->loopid, sc->sc_scratch)) {
281 return (1);
282 }
283 pdb = QLA_DMA_KVA(sc->sc_scratch)((void *)(sc->sc_scratch)->qdm_kva);
284
285 if (letoh16(pdb->prli_svc_word3)((__uint16_t)(pdb->prli_svc_word3)) & QLA_SVC3_TARGET_ROLE0x0010)
286 port->flags |= QLA_PORT_FLAG_IS_TARGET1;
287
288 port->port_name = betoh64(pdb->port_name)(__uint64_t)(__builtin_constant_p(pdb->port_name) ? (__uint64_t
)((((__uint64_t)(pdb->port_name) & 0xff) << 56) |
((__uint64_t)(pdb->port_name) & 0xff00ULL) << 40
| ((__uint64_t)(pdb->port_name) & 0xff0000ULL) <<
24 | ((__uint64_t)(pdb->port_name) & 0xff000000ULL) <<
8 | ((__uint64_t)(pdb->port_name) & 0xff00000000ULL) >>
8 | ((__uint64_t)(pdb->port_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(pdb->port_name) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(pdb->port_name) & 0xff00000000000000ULL
) >> 56) : __swap64md(pdb->port_name))
;
289 port->node_name = betoh64(pdb->node_name)(__uint64_t)(__builtin_constant_p(pdb->node_name) ? (__uint64_t
)((((__uint64_t)(pdb->node_name) & 0xff) << 56) |
((__uint64_t)(pdb->node_name) & 0xff00ULL) << 40
| ((__uint64_t)(pdb->node_name) & 0xff0000ULL) <<
24 | ((__uint64_t)(pdb->node_name) & 0xff000000ULL) <<
8 | ((__uint64_t)(pdb->node_name) & 0xff00000000ULL) >>
8 | ((__uint64_t)(pdb->node_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(pdb->node_name) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(pdb->node_name) & 0xff00000000000000ULL
) >> 56) : __swap64md(pdb->node_name))
;
290 port->portid = (letoh16(pdb->port_id[0])((__uint16_t)(pdb->port_id[0])) << 16) |
291 letoh16(pdb->port_id[1])((__uint16_t)(pdb->port_id[1]));
292
293 mtx_enter(&sc->sc_port_mtx);
294 disp = qla_classify_port(sc, port->location, port->port_name,
295 port->node_name, &pport);
296 switch (disp) {
297 case QLA_PORT_DISP_CHANGED:
298 case QLA_PORT_DISP_MOVED:
299 case QLA_PORT_DISP_NEW:
300 TAILQ_INSERT_TAIL(&sc->sc_ports_new, port, update)do { (port)->update.tqe_next = ((void *)0); (port)->update
.tqe_prev = (&sc->sc_ports_new)->tqh_last; *(&sc
->sc_ports_new)->tqh_last = (port); (&sc->sc_ports_new
)->tqh_last = &(port)->update.tqe_next; } while (0)
;
301 sc->sc_targets[port->loopid] = port;
302 break;
303 case QLA_PORT_DISP_DUP:
304 free(port, M_DEVBUF2, sizeof *port);
305 break;
306 case QLA_PORT_DISP_SAME:
307 TAILQ_REMOVE(&sc->sc_ports_gone, pport, update)do { if (((pport)->update.tqe_next) != ((void *)0)) (pport
)->update.tqe_next->update.tqe_prev = (pport)->update
.tqe_prev; else (&sc->sc_ports_gone)->tqh_last = (pport
)->update.tqe_prev; *(pport)->update.tqe_prev = (pport)
->update.tqe_next; ((pport)->update.tqe_prev) = ((void *
)-1); ((pport)->update.tqe_next) = ((void *)-1); } while (
0)
;
308 free(port, M_DEVBUF2, sizeof *port);
309 break;
310 }
311 mtx_leave(&sc->sc_port_mtx);
312
313 switch (disp) {
314 case QLA_PORT_DISP_CHANGED:
315 case QLA_PORT_DISP_MOVED:
316 case QLA_PORT_DISP_NEW:
317 DPRINTF(QLA_D_PORT, "%s: %s %d; name %llx, port %06x\n",
318 DEVNAME(sc), ISSET(port->flags, QLA_PORT_FLAG_IS_TARGET) ?
319 "target" : "non-target", port->loopid, port->port_name,
320 port->portid);
321 break;
322 }
323 return (0);
324}
325
326int
327qla_add_fabric_port(struct qla_softc *sc, struct qla_fc_port *port)
328{
329 struct qla_get_port_db *pdb;
330
331 if (qla_get_port_db(sc, port->loopid, sc->sc_scratch)) {
332 return (1);
333 }
334 pdb = QLA_DMA_KVA(sc->sc_scratch)((void *)(sc->sc_scratch)->qdm_kva);
335
336 if (letoh16(pdb->prli_svc_word3)((__uint16_t)(pdb->prli_svc_word3)) & QLA_SVC3_TARGET_ROLE0x0010)
337 port->flags |= QLA_PORT_FLAG_IS_TARGET1;
338
339 /*
340 * if we only know about this port because qla_get_port_name_list
341 * returned it, we don't have its port id or node name, so fill
342 * those in and update its location.
343 */
344 if (port->location == QLA_LOCATION_FABRIC(2 << 24)) {
345 port->node_name = betoh64(pdb->node_name)(__uint64_t)(__builtin_constant_p(pdb->node_name) ? (__uint64_t
)((((__uint64_t)(pdb->node_name) & 0xff) << 56) |
((__uint64_t)(pdb->node_name) & 0xff00ULL) << 40
| ((__uint64_t)(pdb->node_name) & 0xff0000ULL) <<
24 | ((__uint64_t)(pdb->node_name) & 0xff000000ULL) <<
8 | ((__uint64_t)(pdb->node_name) & 0xff00000000ULL) >>
8 | ((__uint64_t)(pdb->node_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(pdb->node_name) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(pdb->node_name) & 0xff00000000000000ULL
) >> 56) : __swap64md(pdb->node_name))
;
346 port->port_name = betoh64(pdb->port_name)(__uint64_t)(__builtin_constant_p(pdb->port_name) ? (__uint64_t
)((((__uint64_t)(pdb->port_name) & 0xff) << 56) |
((__uint64_t)(pdb->port_name) & 0xff00ULL) << 40
| ((__uint64_t)(pdb->port_name) & 0xff0000ULL) <<
24 | ((__uint64_t)(pdb->port_name) & 0xff000000ULL) <<
8 | ((__uint64_t)(pdb->port_name) & 0xff00000000ULL) >>
8 | ((__uint64_t)(pdb->port_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(pdb->port_name) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(pdb->port_name) & 0xff00000000000000ULL
) >> 56) : __swap64md(pdb->port_name))
;
347 port->portid = (letoh16(pdb->port_id[0])((__uint16_t)(pdb->port_id[0])) << 16) |
348 letoh16(pdb->port_id[1])((__uint16_t)(pdb->port_id[1]));
349 port->location = QLA_LOCATION_PORT_ID(port->portid)(port->portid | (2 << 24));
350 }
351
352 mtx_enter(&sc->sc_port_mtx);
353 TAILQ_INSERT_TAIL(&sc->sc_ports_new, port, update)do { (port)->update.tqe_next = ((void *)0); (port)->update
.tqe_prev = (&sc->sc_ports_new)->tqh_last; *(&sc
->sc_ports_new)->tqh_last = (port); (&sc->sc_ports_new
)->tqh_last = &(port)->update.tqe_next; } while (0)
;
354 sc->sc_targets[port->loopid] = port;
355 mtx_leave(&sc->sc_port_mtx);
356
357 DPRINTF(QLA_D_PORT, "%s: %s %d; name %llx\n",
358 DEVNAME(sc), ISSET(port->flags, QLA_PORT_FLAG_IS_TARGET) ?
359 "target" : "non-target", port->loopid, port->port_name);
360 return (0);
361}
362
363int
364qla_add_logged_in_port(struct qla_softc *sc, int loopid, u_int32_t portid)
365{
366 struct qla_fc_port *port;
367 struct qla_get_port_db *pdb;
368 u_int64_t node_name, port_name;
369 int flags, ret;
370
371 ret = qla_get_port_db(sc, loopid, sc->sc_scratch);
372 mtx_enter(&sc->sc_port_mtx);
373 if (ret != 0) {
374 /* put in a fake port to prevent use of this loop id */
375 printf("%s: loop id %d used, but can't see what's using it\n",
376 DEVNAME(sc)((sc)->sc_dev.dv_xname), loopid);
377 node_name = 0;
378 port_name = 0;
379 flags = 0;
380 } else {
381 pdb = QLA_DMA_KVA(sc->sc_scratch)((void *)(sc->sc_scratch)->qdm_kva);
382 node_name = betoh64(pdb->node_name)(__uint64_t)(__builtin_constant_p(pdb->node_name) ? (__uint64_t
)((((__uint64_t)(pdb->node_name) & 0xff) << 56) |
((__uint64_t)(pdb->node_name) & 0xff00ULL) << 40
| ((__uint64_t)(pdb->node_name) & 0xff0000ULL) <<
24 | ((__uint64_t)(pdb->node_name) & 0xff000000ULL) <<
8 | ((__uint64_t)(pdb->node_name) & 0xff00000000ULL) >>
8 | ((__uint64_t)(pdb->node_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(pdb->node_name) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(pdb->node_name) & 0xff00000000000000ULL
) >> 56) : __swap64md(pdb->node_name))
;
383 port_name = betoh64(pdb->port_name)(__uint64_t)(__builtin_constant_p(pdb->port_name) ? (__uint64_t
)((((__uint64_t)(pdb->port_name) & 0xff) << 56) |
((__uint64_t)(pdb->port_name) & 0xff00ULL) << 40
| ((__uint64_t)(pdb->port_name) & 0xff0000ULL) <<
24 | ((__uint64_t)(pdb->port_name) & 0xff000000ULL) <<
8 | ((__uint64_t)(pdb->port_name) & 0xff00000000ULL) >>
8 | ((__uint64_t)(pdb->port_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(pdb->port_name) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(pdb->port_name) & 0xff00000000000000ULL
) >> 56) : __swap64md(pdb->port_name))
;
384 flags = 0;
385 if (letoh16(pdb->prli_svc_word3)((__uint16_t)(pdb->prli_svc_word3)) & QLA_SVC3_TARGET_ROLE0x0010)
386 flags |= QLA_PORT_FLAG_IS_TARGET1;
387
388 /* see if we've already found this port */
389 TAILQ_FOREACH(port, &sc->sc_ports_found, update)for((port) = ((&sc->sc_ports_found)->tqh_first); (port
) != ((void *)0); (port) = ((port)->update.tqe_next))
{
390 if ((port->node_name == node_name) &&
391 (port->port_name == port_name) &&
392 (port->portid == portid)) {
393 mtx_leave(&sc->sc_port_mtx);
394 DPRINTF(QLA_D_PORT, "%s: already found port "
395 "%06x\n", DEVNAME(sc), portid);
396 return (0);
397 }
398 }
399 }
400
401 port = malloc(sizeof(*port), M_DEVBUF2, M_ZERO0x0008 | M_NOWAIT0x0002);
402 if (port == NULL((void *)0)) {
403 mtx_leave(&sc->sc_port_mtx);
404 printf("%s: failed to allocate a port structure\n",
405 DEVNAME(sc)((sc)->sc_dev.dv_xname));
406 return (1);
407 }
408 port->location = QLA_LOCATION_PORT_ID(portid)(portid | (2 << 24));
409 port->port_name = port_name;
410 port->node_name = node_name;
411 port->loopid = loopid;
412 port->portid = portid;
413 port->flags = flags;
414
415 TAILQ_INSERT_TAIL(&sc->sc_ports, port, ports)do { (port)->ports.tqe_next = ((void *)0); (port)->ports
.tqe_prev = (&sc->sc_ports)->tqh_last; *(&sc->
sc_ports)->tqh_last = (port); (&sc->sc_ports)->tqh_last
= &(port)->ports.tqe_next; } while (0)
;
416 sc->sc_targets[port->loopid] = port;
417 mtx_leave(&sc->sc_port_mtx);
418
419 DPRINTF(QLA_D_PORT, "%s: added logged in port %06x at %d\n",
420 DEVNAME(sc), portid, loopid);
421 return (0);
422}
423
424int
425qla_attach(struct qla_softc *sc)
426{
427 struct scsibus_attach_args saa;
428 struct qla_init_cb *icb;
429#ifndef ISP_NOFIRMWARE
430 int (*loadfirmware)(struct qla_softc *) = NULL((void *)0);
431#endif
432 u_int16_t firmware_addr = 0;
433 u_int64_t dva;
434 int i, rv;
435
436 TAILQ_INIT(&sc->sc_ports)do { (&sc->sc_ports)->tqh_first = ((void *)0); (&
sc->sc_ports)->tqh_last = &(&sc->sc_ports)->
tqh_first; } while (0)
;
437 TAILQ_INIT(&sc->sc_ports_new)do { (&sc->sc_ports_new)->tqh_first = ((void *)0); (
&sc->sc_ports_new)->tqh_last = &(&sc->sc_ports_new
)->tqh_first; } while (0)
;
438 TAILQ_INIT(&sc->sc_ports_gone)do { (&sc->sc_ports_gone)->tqh_first = ((void *)0);
(&sc->sc_ports_gone)->tqh_last = &(&sc->
sc_ports_gone)->tqh_first; } while (0)
;
439 TAILQ_INIT(&sc->sc_ports_found)do { (&sc->sc_ports_found)->tqh_first = ((void *)0)
; (&sc->sc_ports_found)->tqh_last = &(&sc->
sc_ports_found)->tqh_first; } while (0)
;
440
441 switch (sc->sc_isp_gen) {
442 case QLA_GEN_ISP2100:
443 sc->sc_mbox_base = QLA_MBOX_BASE_21000x10;
444 sc->sc_regs = &qla_regs_2100;
445#ifndef ISP_NOFIRMWARE
446 loadfirmware = qla_load_firmware_2100;
447#endif
448 firmware_addr = QLA_2100_CODE_ORG0x1000;
449 break;
450
451 case QLA_GEN_ISP2200:
452 sc->sc_mbox_base = QLA_MBOX_BASE_22000x10;
453 sc->sc_regs = &qla_regs_2200;
454#ifndef ISP_NOFIRMWARE
455 loadfirmware = qla_load_firmware_2200;
456#endif
457 firmware_addr = QLA_2200_CODE_ORG0x1000;
458 break;
459
460 case QLA_GEN_ISP23XX:
461 sc->sc_mbox_base = QLA_MBOX_BASE_23XX0x40;
462 sc->sc_regs = &qla_regs_23XX;
463#ifndef ISP_NOFIRMWARE
464 if (sc->sc_isp_type != QLA_ISP2322)
465 loadfirmware = qla_load_firmware_2300;
466#endif
467 firmware_addr = QLA_2300_CODE_ORG0x0800;
468 break;
469
470 default:
471 printf("unknown isp type\n");
472 return (ENXIO6);
473 }
474
475 /* after reset, mbox registers 1-3 should contain the string "ISP " */
476 if (qla_read_mbox(sc, 1) != 0x4953 ||
477 qla_read_mbox(sc, 2) != 0x5020 ||
478 qla_read_mbox(sc, 3) != 0x2020) {
479 /* try releasing the risc processor */
480 qla_host_cmd(sc, QLA_HOST_CMD_RELEASE0x3);
481 }
482
483 qla_host_cmd(sc, QLA_HOST_CMD_PAUSE0x2);
484 if (qla_softreset(sc) != 0) {
485 printf("softreset failed\n");
486 return (ENXIO6);
487 }
488
489 if (qla_read_nvram(sc) == 0) {
490 sc->sc_nvram_valid = 1;
491 if (sc->sc_port_name == 0)
492 sc->sc_port_name = betoh64(sc->sc_nvram.port_name)(__uint64_t)(__builtin_constant_p(sc->sc_nvram.port_name) ?
(__uint64_t)((((__uint64_t)(sc->sc_nvram.port_name) &
0xff) << 56) | ((__uint64_t)(sc->sc_nvram.port_name
) & 0xff00ULL) << 40 | ((__uint64_t)(sc->sc_nvram
.port_name) & 0xff0000ULL) << 24 | ((__uint64_t)(sc
->sc_nvram.port_name) & 0xff000000ULL) << 8 | ((
__uint64_t)(sc->sc_nvram.port_name) & 0xff00000000ULL)
>> 8 | ((__uint64_t)(sc->sc_nvram.port_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(sc->sc_nvram.port_name) &
0xff000000000000ULL) >> 40 | ((__uint64_t)(sc->sc_nvram
.port_name) & 0xff00000000000000ULL) >> 56) : __swap64md
(sc->sc_nvram.port_name))
;
493 if (sc->sc_node_name == 0)
494 sc->sc_node_name = betoh64(sc->sc_nvram.node_name)(__uint64_t)(__builtin_constant_p(sc->sc_nvram.node_name) ?
(__uint64_t)((((__uint64_t)(sc->sc_nvram.node_name) &
0xff) << 56) | ((__uint64_t)(sc->sc_nvram.node_name
) & 0xff00ULL) << 40 | ((__uint64_t)(sc->sc_nvram
.node_name) & 0xff0000ULL) << 24 | ((__uint64_t)(sc
->sc_nvram.node_name) & 0xff000000ULL) << 8 | ((
__uint64_t)(sc->sc_nvram.node_name) & 0xff00000000ULL)
>> 8 | ((__uint64_t)(sc->sc_nvram.node_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(sc->sc_nvram.node_name) &
0xff000000000000ULL) >> 40 | ((__uint64_t)(sc->sc_nvram
.node_name) & 0xff00000000000000ULL) >> 56) : __swap64md
(sc->sc_nvram.node_name))
;
495 }
496
497 if (sc->sc_port_name == 0)
498 sc->sc_port_name = QLA_DEFAULT_PORT_NAME0x400000007F000003ULL;
499
500#ifdef ISP_NOFIRMWARE
501 if (qla_verify_firmware(sc, firmware_addr)) {
502 printf("%s: no firmware loaded\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
503 return (ENXIO6);
504 }
505#else
506 if (loadfirmware && (loadfirmware)(sc)) {
507 printf("%s: firmware load failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
508 return (ENXIO6);
509 }
510#endif
511
512 /* execute firmware */
513 sc->sc_mbox[0] = QLA_MBOX_EXEC_FIRMWARE0x0002;
514 sc->sc_mbox[1] = firmware_addr;
515#ifdef ISP_NOFIRMWARE
516 sc->sc_mbox[2] = 1;
517#else
518 if (loadfirmware)
519 sc->sc_mbox[2] = 0;
520 else
521 sc->sc_mbox[2] = 1;
522#endif
523 if (qla_mbox(sc, 0x0007)) {
524 printf("ISP couldn't exec firmware: %x\n", sc->sc_mbox[0]);
525 return (ENXIO6);
526 }
527
528 delay(250000)(*delay_func)(250000); /* from isp(4) */
529
530 sc->sc_mbox[0] = QLA_MBOX_ABOUT_FIRMWARE0x0008;
531 if (qla_mbox(sc, 0x0001)) {
532 printf("ISP not talking after firmware exec: %x\n",
533 sc->sc_mbox[0]);
534 return (ENXIO6);
535 }
536 printf("%s: firmware rev %d.%d.%d, attrs 0x%x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
537 sc->sc_mbox[1], sc->sc_mbox[2], sc->sc_mbox[3], sc->sc_mbox[6]);
538
539 if (sc->sc_mbox[6] & QLA_FW_ATTR_EXPANDED_LUN0x0002)
540 sc->sc_expanded_lun = 1;
541 if (sc->sc_mbox[6] & QLA_FW_ATTR_FABRIC0x0004)
542 sc->sc_fabric = 1;
543 if (sc->sc_mbox[6] & QLA_FW_ATTR_2K_LOGINS0x0100)
544 sc->sc_2k_logins = 1;
545
546 /* work out how many ccbs to allocate */
547 sc->sc_mbox[0] = QLA_MBOX_GET_FIRMWARE_STATUS0x001F;
548 if (qla_mbox(sc, 0x0001)) {
549 printf("couldn't get firmware status: %x\n", sc->sc_mbox[0]);
550 return (ENXIO6);
551 }
552 sc->sc_maxcmds = sc->sc_mbox[2];
553
554 if (qla_alloc_ccbs(sc)) {
555 /* error already printed */
556 return (ENOMEM12);
557 }
558 sc->sc_scratch = qla_dmamem_alloc(sc, QLA_SCRATCH_SIZE0x1000);
559 if (sc->sc_scratch == NULL((void *)0)) {
560 printf("%s: unable to allocate scratch\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
561 goto free_ccbs;
562 }
563
564 /* build init buffer thing */
565 icb = (struct qla_init_cb *)QLA_DMA_KVA(sc->sc_scratch)((void *)(sc->sc_scratch)->qdm_kva);
566 memset(icb, 0, sizeof(*icb))__builtin_memset((icb), (0), (sizeof(*icb)));
567 icb->icb_version = QLA_ICB_VERSION1;
568 /* port and node names are big-endian in the icb */
569 htobem32(&icb->icb_portname_hi, sc->sc_port_name >> 32)(*(__uint32_t *)(&icb->icb_portname_hi) = (__uint32_t)
(__builtin_constant_p(sc->sc_port_name >> 32) ? (__uint32_t
)(((__uint32_t)(sc->sc_port_name >> 32) & 0xff) <<
24 | ((__uint32_t)(sc->sc_port_name >> 32) & 0xff00
) << 8 | ((__uint32_t)(sc->sc_port_name >> 32)
& 0xff0000) >> 8 | ((__uint32_t)(sc->sc_port_name
>> 32) & 0xff000000) >> 24) : __swap32md(sc->
sc_port_name >> 32)))
;
570 htobem32(&icb->icb_portname_lo, sc->sc_port_name)(*(__uint32_t *)(&icb->icb_portname_lo) = (__uint32_t)
(__builtin_constant_p(sc->sc_port_name) ? (__uint32_t)(((__uint32_t
)(sc->sc_port_name) & 0xff) << 24 | ((__uint32_t
)(sc->sc_port_name) & 0xff00) << 8 | ((__uint32_t
)(sc->sc_port_name) & 0xff0000) >> 8 | ((__uint32_t
)(sc->sc_port_name) & 0xff000000) >> 24) : __swap32md
(sc->sc_port_name)))
;
571 htobem32(&icb->icb_nodename_hi, sc->sc_node_name >> 32)(*(__uint32_t *)(&icb->icb_nodename_hi) = (__uint32_t)
(__builtin_constant_p(sc->sc_node_name >> 32) ? (__uint32_t
)(((__uint32_t)(sc->sc_node_name >> 32) & 0xff) <<
24 | ((__uint32_t)(sc->sc_node_name >> 32) & 0xff00
) << 8 | ((__uint32_t)(sc->sc_node_name >> 32)
& 0xff0000) >> 8 | ((__uint32_t)(sc->sc_node_name
>> 32) & 0xff000000) >> 24) : __swap32md(sc->
sc_node_name >> 32)))
;
572 htobem32(&icb->icb_nodename_lo, sc->sc_node_name)(*(__uint32_t *)(&icb->icb_nodename_lo) = (__uint32_t)
(__builtin_constant_p(sc->sc_node_name) ? (__uint32_t)(((__uint32_t
)(sc->sc_node_name) & 0xff) << 24 | ((__uint32_t
)(sc->sc_node_name) & 0xff00) << 8 | ((__uint32_t
)(sc->sc_node_name) & 0xff0000) >> 8 | ((__uint32_t
)(sc->sc_node_name) & 0xff000000) >> 24) : __swap32md
(sc->sc_node_name)))
;
573 if (sc->sc_nvram_valid) {
574 icb->icb_fw_options = sc->sc_nvram.fw_options;
575 icb->icb_max_frame_len = sc->sc_nvram.frame_payload_size;
576 icb->icb_max_alloc = sc->sc_nvram.max_iocb_allocation;
577 icb->icb_exec_throttle = sc->sc_nvram.execution_throttle;
578 icb->icb_retry_count = sc->sc_nvram.retry_count;
579 icb->icb_retry_delay = sc->sc_nvram.retry_delay;
580 icb->icb_hardaddr = sc->sc_nvram.hard_address;
581 icb->icb_inquiry_data = sc->sc_nvram.inquiry_data;
582 icb->icb_login_timeout = sc->sc_nvram.login_timeout;
583 icb->icb_xfwoptions = sc->sc_nvram.add_fw_options;
584 icb->icb_zfwoptions = sc->sc_nvram.special_options;
585 } else {
586 /* defaults copied from isp(4) */
587 icb->icb_retry_count = 3;
588 icb->icb_retry_delay = 5;
589 icb->icb_exec_throttle = htole16(16)((__uint16_t)(16));
590 icb->icb_max_alloc = htole16(256)((__uint16_t)(256));
591 icb->icb_max_frame_len = htole16(1024)((__uint16_t)(1024));
592 icb->icb_fw_options = htole16(QLA_ICB_FW_FAIRNESS |((__uint16_t)(0x0002 | 0x0100 | 0x0001 | 0x0004))
593 QLA_ICB_FW_ENABLE_PDB_CHANGED | QLA_ICB_FW_HARD_ADDR |((__uint16_t)(0x0002 | 0x0100 | 0x0001 | 0x0004))
594 QLA_ICB_FW_FULL_DUPLEX)((__uint16_t)(0x0002 | 0x0100 | 0x0001 | 0x0004));
595 }
596 /* target mode stuff that we don't care about */
597 icb->icb_lun_enables = 0;
598 icb->icb_cmd_count = 0;
599 icb->icb_notify_count = 0;
600 icb->icb_lun_timeout = 0;
601
602 /* "zero interrupt operation" */
603 icb->icb_int_delaytimer = 0;
604
605 icb->icb_req_out = 0;
606 icb->icb_resp_in = 0;
607 htolem16(&icb->icb_req_queue_len, sc->sc_maxcmds)(*(__uint16_t *)(&icb->icb_req_queue_len) = ((__uint16_t
)(sc->sc_maxcmds)))
;
608 htolem16(&icb->icb_resp_queue_len, sc->sc_maxcmds)(*(__uint16_t *)(&icb->icb_resp_queue_len) = ((__uint16_t
)(sc->sc_maxcmds)))
;
609 dva = QLA_DMA_DVA(sc->sc_requests)((u_int64_t)(sc->sc_requests)->qdm_map->dm_segs[0].ds_addr
)
;
610 htolem32(&icb->icb_req_queue_addr_lo, dva)(*(__uint32_t *)(&icb->icb_req_queue_addr_lo) = ((__uint32_t
)(dva)))
;
611 htolem32(&icb->icb_req_queue_addr_hi, dva >> 32)(*(__uint32_t *)(&icb->icb_req_queue_addr_hi) = ((__uint32_t
)(dva >> 32)))
;
612 dva = QLA_DMA_DVA(sc->sc_responses)((u_int64_t)(sc->sc_responses)->qdm_map->dm_segs[0].
ds_addr)
;
613 htolem32(&icb->icb_resp_queue_addr_lo, dva)(*(__uint32_t *)(&icb->icb_resp_queue_addr_lo) = ((__uint32_t
)(dva)))
;
614 htolem32(&icb->icb_resp_queue_addr_hi, dva >> 32)(*(__uint32_t *)(&icb->icb_resp_queue_addr_hi) = ((__uint32_t
)(dva >> 32)))
;
615
616 /* adjust firmware options a bit */
617 icb->icb_fw_options |= htole16(QLA_ICB_FW_EXTENDED_INIT_CB)((__uint16_t)(0x8000));
618 icb->icb_fw_options &= ~htole16(QLA_ICB_FW_FAST_POST)((__uint16_t)(0x0008));
619
620 sc->sc_mbox[0] = QLA_MBOX_INIT_FIRMWARE0x0060;
621 sc->sc_mbox[4] = 0;
622 sc->sc_mbox[5] = 0;
623 qla_mbox_putaddr(sc->sc_mbox, sc->sc_scratch);
624 bus_dmamap_sync(sc->sc_dmat, QLA_DMA_MAP(sc->sc_scratch), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (sizeof(*icb)), (0x04))
625 sizeof(*icb), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (sizeof(*icb)), (0x04))
;
626 rv = qla_mbox(sc, 0x00fd);
627 bus_dmamap_sync(sc->sc_dmat, QLA_DMA_MAP(sc->sc_scratch), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (sizeof(*icb)), (0x08))
628 sizeof(*icb), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (sizeof(*icb)), (0x08))
;
629
630 if (rv != 0) {
631 printf("%s: ISP firmware init failed: %x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
632 sc->sc_mbox[0]);
633 goto free_scratch;
634 }
635
636 /* enable some more notifications */
637 sc->sc_mbox[0] = QLA_MBOX_SET_FIRMWARE_OPTIONS0x0038;
638 sc->sc_mbox[1] = QLA_FW_OPTION1_ASYNC_LIP_F80x0001 |
639 QLA_FW_OPTION1_ASYNC_LIP_RESET0x0002 |
640 QLA_FW_OPTION1_ASYNC_LIP_ERROR0x0080 |
641 QLA_FW_OPTION1_ASYNC_LOGIN_RJT0x0800;
642 sc->sc_mbox[2] = 0;
643 sc->sc_mbox[3] = 0;
644 if (qla_mbox(sc, 0x000f)) {
645 printf("%s: setting firmware options failed: %x\n",
646 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_mbox[0]);
647 goto free_scratch;
648 }
649
650 sc->sc_update_taskq = taskq_create(DEVNAME(sc)((sc)->sc_dev.dv_xname), 1, IPL_BIO0x6, 0);
651 task_set(&sc->sc_update_task, qla_do_update, sc);
652
653 /* wait a bit for link to come up so we can scan and attach devices */
654 for (i = 0; i < QLA_WAIT_FOR_LOOP10 * 10000; i++) {
655 u_int16_t isr, info;
656
657 delay(100)(*delay_func)(100);
658
659 if (qla_read_isr(sc, &isr, &info)((*(sc)->sc_regs->read_isr)((sc), (&isr), (&info
)))
== 0)
660 continue;
661
662 qla_handle_intr(sc, isr, info);
663
664 if (sc->sc_loop_up)
665 break;
666 }
667
668 if (sc->sc_loop_up) {
669 qla_do_update(sc);
670 } else {
671 DPRINTF(QLA_D_PORT, "%s: loop still down, giving up\n",
672 DEVNAME(sc));
673 }
674
675 saa.saa_adapter = &qla_switch;
676 saa.saa_adapter_softc = sc;
677 if (sc->sc_2k_logins) {
678 saa.saa_adapter_buswidth = QLA_2KL_BUSWIDTH0x800;
679 } else {
680 saa.saa_adapter_buswidth = QLA_BUSWIDTH0x100;
681 }
682 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET0xffff;
683 saa.saa_luns = 8;
684 saa.saa_openings = sc->sc_maxcmds;
685 saa.saa_pool = &sc->sc_iopool;
686 saa.saa_wwpn = sc->sc_port_name;
687 saa.saa_wwnn = sc->sc_node_name;
688 if (saa.saa_wwnn == 0) {
689 /*
690 * mask out the port number from the port name to get
691 * the node name.
692 */
693 saa.saa_wwnn = saa.saa_wwpn;
694 saa.saa_wwnn &= ~(0xfULL << 56);
695 }
696 saa.saa_quirks = saa.saa_flags = 0;
697
698 sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev,config_found_sm((&sc->sc_dev), (&saa), (scsiprint)
, ((void *)0))
699 &saa, scsiprint)config_found_sm((&sc->sc_dev), (&saa), (scsiprint)
, ((void *)0))
;
700
701 return(0);
702
703free_scratch:
704 qla_dmamem_free(sc, sc->sc_scratch);
705free_ccbs:
706 qla_free_ccbs(sc);
707 return (ENXIO6);
708}
709
710int
711qla_detach(struct qla_softc *sc, int flags)
712{
713 return (0);
714}
715
716struct qla_ccb *
717qla_handle_resp(struct qla_softc *sc, u_int16_t id)
718{
719 struct qla_ccb *ccb;
720 struct qla_iocb_status *status;
721 struct scsi_xfer *xs;
722 u_int32_t handle;
723 u_int8_t *entry;
724
725 ccb = NULL((void *)0);
726 entry = QLA_DMA_KVA(sc->sc_responses)((void *)(sc->sc_responses)->qdm_kva) + (id * QLA_QUEUE_ENTRY_SIZE64);
727
728 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_responses)->qdm_map)), (id * 64), (64), (0x02))
729 QLA_DMA_MAP(sc->sc_responses), id * QLA_QUEUE_ENTRY_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_responses)->qdm_map)), (id * 64), (64), (0x02))
730 QLA_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_responses)->qdm_map)), (id * 64), (64), (0x02))
;
731
732 qla_dump_iocb(sc, entry);
733 switch (entry[0]) {
734 case QLA_IOCB_STATUS0x03:
735 status = (struct qla_iocb_status *)entry;
736 handle = status->handle;
737 if (handle > sc->sc_maxcmds) {
738 panic("bad completed command handle: %d (> %d)",
739 handle, sc->sc_maxcmds);
740 }
741
742 ccb = &sc->sc_ccbs[handle];
743 xs = ccb->ccb_xs;
744 if (xs == NULL((void *)0)) {
745 DPRINTF(QLA_D_INTR, "%s: got status for inactive"
746 " ccb %d\n", DEVNAME(sc), handle);
747 ccb = NULL((void *)0);
748 break;
749 }
750 if (xs->io != ccb) {
751 panic("completed command handle doesn't match xs "
752 "(handle %d, ccb %p, xs->io %p)", handle, ccb,
753 xs->io);
754 }
755
756 if (xs->datalen > 0) {
757 if (ccb->ccb_dmamap->dm_nsegs >
758 QLA_IOCB_SEGS_PER_CMD2) {
759 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (sizeof
(*ccb->ccb_t4segs) * ccb->ccb_dmamap->dm_nsegs), (0x08
))
760 QLA_DMA_MAP(sc->sc_segments),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (sizeof
(*ccb->ccb_t4segs) * ccb->ccb_dmamap->dm_nsegs), (0x08
))
761 ccb->ccb_seg_offset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (sizeof
(*ccb->ccb_t4segs) * ccb->ccb_dmamap->dm_nsegs), (0x08
))
762 sizeof(*ccb->ccb_t4segs) *(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (sizeof
(*ccb->ccb_t4segs) * ccb->ccb_dmamap->dm_nsegs), (0x08
))
763 ccb->ccb_dmamap->dm_nsegs,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (sizeof
(*ccb->ccb_t4segs) * ccb->ccb_dmamap->dm_nsegs), (0x08
))
764 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (sizeof
(*ccb->ccb_t4segs) * ccb->ccb_dmamap->dm_nsegs), (0x08
))
;
765 }
766
767 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((xs->
flags & 0x00800) ? 0x02 : 0x08))
768 ccb->ccb_dmamap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((xs->
flags & 0x00800) ? 0x02 : 0x08))
769 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((xs->
flags & 0x00800) ? 0x02 : 0x08))
770 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((xs->
flags & 0x00800) ? 0x02 : 0x08))
;
771 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (ccb
->ccb_dmamap))
;
772 }
773
774 xs->status = letoh16(status->scsi_status)((__uint16_t)(status->scsi_status));
775 switch (letoh16(status->completion)((__uint16_t)(status->completion))) {
776 case QLA_IOCB_STATUS_COMPLETE0x0000:
777 if (letoh16(status->scsi_status)((__uint16_t)(status->scsi_status)) &
778 QLA_SCSI_STATUS_SENSE_VALID0x0200) {
779 memcpy(&xs->sense, status->sense_data,__builtin_memcpy((&xs->sense), (status->sense_data)
, (sizeof(xs->sense)))
780 sizeof(xs->sense))__builtin_memcpy((&xs->sense), (status->sense_data)
, (sizeof(xs->sense)))
;
781 xs->error = XS_SENSE1;
782 } else {
783 xs->error = XS_NOERROR0;
784 }
785 xs->resid = 0;
786 break;
787
788 case QLA_IOCB_STATUS_DMA_ERROR0x0002:
789 DPRINTF(QLA_D_INTR, "%s: dma error\n", DEVNAME(sc));
790 /* set resid apparently? */
791 break;
792
793 case QLA_IOCB_STATUS_RESET0x0004:
794 DPRINTF(QLA_D_IO, "%s: reset destroyed command\n",
795 DEVNAME(sc));
796 sc->sc_marker_required = 1;
797 xs->error = XS_RESET8;
798 break;
799
800 case QLA_IOCB_STATUS_ABORTED0x0005:
801 DPRINTF(QLA_D_IO, "%s: aborted\n", DEVNAME(sc));
802 sc->sc_marker_required = 1;
803 xs->error = XS_DRIVER_STUFFUP2;
804 break;
805
806 case QLA_IOCB_STATUS_TIMEOUT0x0006:
807 DPRINTF(QLA_D_IO, "%s: command timed out\n",
808 DEVNAME(sc));
809 xs->error = XS_TIMEOUT4;
810 break;
811
812 case QLA_IOCB_STATUS_DATA_OVERRUN0x0007:
813 case QLA_IOCB_STATUS_DATA_UNDERRUN0x0015:
814 xs->resid = letoh32(status->resid)((__uint32_t)(status->resid));
815 xs->error = XS_NOERROR0;
816 break;
817
818 case QLA_IOCB_STATUS_QUEUE_FULL0x001C:
819 DPRINTF(QLA_D_IO, "%s: queue full\n", DEVNAME(sc));
820 xs->error = XS_BUSY5;
821 break;
822
823 case QLA_IOCB_STATUS_PORT_UNAVAIL0x0028:
824 case QLA_IOCB_STATUS_PORT_LOGGED_OUT0x0029:
825 case QLA_IOCB_STATUS_PORT_CHANGED0x002A:
826 DPRINTF(QLA_D_IO, "%s: dev gone\n", DEVNAME(sc));
827 xs->error = XS_SELTIMEOUT3;
828 break;
829
830 default:
831 DPRINTF(QLA_D_INTR, "%s: unexpected completion"
832 " status %x\n", DEVNAME(sc), status->completion);
833 xs->error = XS_DRIVER_STUFFUP2;
834 break;
835 }
836 break;
837
838 case QLA_IOCB_STATUS_CONT0x10:
839 DPRINTF(QLA_D_INTR, "%s: ignoring status continuation iocb\n",
840 DEVNAME(sc));
841 break;
842
843 /* check for requests that bounce back? */
844 default:
845 DPRINTF(QLA_D_INTR, "%s: unexpected response entry type %x\n",
846 DEVNAME(sc), entry[0]);
847 break;
848 }
849
850 return (ccb);
851}
852
853void
854qla_handle_intr(struct qla_softc *sc, u_int16_t isr, u_int16_t info)
855{
856 int i;
857 u_int16_t rspin;
858 struct qla_ccb *ccb;
859
860 switch (isr) {
861 case QLA_INT_TYPE_ASYNC2:
862 qla_async(sc, info);
863 break;
864
865 case QLA_INT_TYPE_IO3:
866 rspin = qla_queue_read(sc, sc->sc_regs->res_in)((*(sc)->sc_regs->read)((sc), (sc->sc_regs->res_in
)))
;
867 if (rspin == sc->sc_last_resp_id) {
868 /* seems to happen a lot on 2200s when mbox commands
869 * complete but it doesn't want to give us the register
870 * semaphore, or something.
871 *
872 * if we're waiting on a mailbox command, don't ack
873 * the interrupt yet.
874 */
875 if (sc->sc_mbox_pending) {
876 DPRINTF(QLA_D_MBOX, "%s: ignoring premature"
877 " mbox int\n", DEVNAME(sc));
878 return;
879 }
880
881 break;
882 }
883
884 if (sc->sc_responses == NULL((void *)0))
885 break;
886
887 DPRINTF(QLA_D_IO, "%s: response queue %x=>%x\n",
888 DEVNAME(sc), sc->sc_last_resp_id, rspin);
889
890 do {
891 ccb = qla_handle_resp(sc, sc->sc_last_resp_id);
892 if (ccb)
893 scsi_done(ccb->ccb_xs);
894
895 sc->sc_last_resp_id++;
896 sc->sc_last_resp_id %= sc->sc_maxcmds;
897 } while (sc->sc_last_resp_id != rspin);
898
899 qla_queue_write(sc, sc->sc_regs->res_out, rspin)qla_write((sc), (sc->sc_regs->res_out), (rspin));
900 break;
901
902 case QLA_INT_TYPE_MBOX1:
903 mtx_enter(&sc->sc_mbox_mtx);
904 if (sc->sc_mbox_pending) {
905 DPRINTF(QLA_D_MBOX, "%s: mbox response %x\n",
906 DEVNAME(sc), info);
907 for (i = 0; i < nitems(sc->sc_mbox)(sizeof((sc->sc_mbox)) / sizeof((sc->sc_mbox)[0])); i++) {
908 sc->sc_mbox[i] = qla_read_mbox(sc, i);
909 }
910 sc->sc_mbox_pending = 2;
911 wakeup(sc->sc_mbox);
912 mtx_leave(&sc->sc_mbox_mtx);
913 } else {
914 mtx_leave(&sc->sc_mbox_mtx);
915 DPRINTF(QLA_D_MBOX, "%s: unexpected mbox interrupt:"
916 " %x\n", DEVNAME(sc), info);
917 }
918 break;
919
920 default:
921 /* maybe log something? */
922 break;
923 }
924
925 qla_clear_isr(sc, isr);
926}
927
928int
929qla_intr(void *xsc)
930{
931 struct qla_softc *sc = xsc;
932 u_int16_t isr;
933 u_int16_t info;
934
935 if (qla_read_isr(sc, &isr, &info)((*(sc)->sc_regs->read_isr)((sc), (&isr), (&info
)))
== 0)
936 return (0);
937
938 qla_handle_intr(sc, isr, info);
939 return (1);
940}
941
942int
943qla_scsi_probe(struct scsi_link *link)
944{
945 struct qla_softc *sc = link->bus->sb_adapter_softc;
946 int rv = 0;
947
948 mtx_enter(&sc->sc_port_mtx);
949 if (sc->sc_targets[link->target] == NULL((void *)0))
950 rv = ENXIO6;
951 else if (!ISSET(sc->sc_targets[link->target]->flags,((sc->sc_targets[link->target]->flags) & (1))
952 QLA_PORT_FLAG_IS_TARGET)((sc->sc_targets[link->target]->flags) & (1)))
953 rv = ENXIO6;
954 else {
955 link->port_wwn = sc->sc_targets[link->target]->port_name;
956 link->node_wwn = sc->sc_targets[link->target]->node_name;
957 }
958 mtx_leave(&sc->sc_port_mtx);
959
960 return (rv);
961}
962
963void
964qla_scsi_cmd(struct scsi_xfer *xs)
965{
966 struct scsi_link *link = xs->sc_link;
967 struct qla_softc *sc = link->bus->sb_adapter_softc;
968 struct qla_ccb *ccb;
969 struct qla_iocb_req34 *iocb;
970 struct qla_ccb_list list;
971 u_int16_t req, rspin;
972 int offset, error, done;
973 bus_dmamap_t dmap;
974
975 if (xs->cmdlen > sizeof(iocb->req_cdb)) {
976 DPRINTF(QLA_D_IO, "%s: cdb too big (%d)\n", DEVNAME(sc),
977 xs->cmdlen);
978 memset(&xs->sense, 0, sizeof(xs->sense))__builtin_memset((&xs->sense), (0), (sizeof(xs->sense
)))
;
979 xs->sense.error_code = SSD_ERRCODE_VALID0x80 | SSD_ERRCODE_CURRENT0x70;
980 xs->sense.flags = SKEY_ILLEGAL_REQUEST0x05;
981 xs->sense.add_sense_code = 0x20;
982 xs->error = XS_SENSE1;
983 scsi_done(xs);
984 return;
985 }
986
987 ccb = xs->io;
988 dmap = ccb->ccb_dmamap;
989 if (xs->datalen > 0) {
990 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), ((xs->flags
& 0x00001) ? 0x0001 : 0x0000))
991 xs->datalen, NULL, (xs->flags & SCSI_NOSLEEP) ?(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), ((xs->flags
& 0x00001) ? 0x0001 : 0x0000))
992 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), ((xs->flags
& 0x00001) ? 0x0001 : 0x0000))
;
993 if (error) {
994 xs->error = XS_DRIVER_STUFFUP2;
995 scsi_done(xs);
996 return;
997 }
998
999 bus_dmamap_sync(sc->sc_dmat, dmap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
1000 dmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
1001 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
1002 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
;
1003 }
1004
1005 mtx_enter(&sc->sc_queue_mtx);
1006
1007 /* put in a sync marker if required */
1008 if (sc->sc_marker_required) {
1009 req = sc->sc_next_req_id++;
1010 if (sc->sc_next_req_id == sc->sc_maxcmds)
1011 sc->sc_next_req_id = 0;
1012
1013 DPRINTF(QLA_D_IO, "%s: writing marker at request %d\n",
1014 DEVNAME(sc), req);
1015 offset = (req * QLA_QUEUE_ENTRY_SIZE64);
1016 iocb = QLA_DMA_KVA(sc->sc_requests)((void *)(sc->sc_requests)->qdm_kva) + offset;
1017 bus_dmamap_sync(sc->sc_dmat, QLA_DMA_MAP(sc->sc_requests),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x08))
1018 offset, QLA_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x08))
;
1019 qla_put_marker(sc, iocb);
1020 qla_queue_write(sc, sc->sc_regs->req_in, sc->sc_next_req_id)qla_write((sc), (sc->sc_regs->req_in), (sc->sc_next_req_id
))
;
1021 sc->sc_marker_required = 0;
1022 }
1023
1024 req = sc->sc_next_req_id++;
1025 if (sc->sc_next_req_id == sc->sc_maxcmds)
1026 sc->sc_next_req_id = 0;
1027
1028 offset = (req * QLA_QUEUE_ENTRY_SIZE64);
1029 iocb = QLA_DMA_KVA(sc->sc_requests)((void *)(sc->sc_requests)->qdm_kva) + offset;
1030 bus_dmamap_sync(sc->sc_dmat, QLA_DMA_MAP(sc->sc_requests), offset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x08))
1031 QLA_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x08))
;
1032
1033 ccb->ccb_xs = xs;
1034
1035 DPRINTF(QLA_D_IO, "%s: writing cmd at request %d\n", DEVNAME(sc), req);
1036 qla_put_cmd(sc, iocb, xs, ccb);
1037
1038 qla_queue_write(sc, sc->sc_regs->req_in, sc->sc_next_req_id)qla_write((sc), (sc->sc_regs->req_in), (sc->sc_next_req_id
))
;
1039
1040 if (!ISSET(xs->flags, SCSI_POLL)((xs->flags) & (0x00002))) {
1041 mtx_leave(&sc->sc_queue_mtx);
1042 return;
1043 }
1044
1045 done = 0;
1046 SIMPLEQ_INIT(&list)do { (&list)->sqh_first = ((void *)0); (&list)->
sqh_last = &(&list)->sqh_first; } while (0)
;
1047 do {
1048 u_int16_t isr, info;
1049
1050 delay(100)(*delay_func)(100);
1051
1052 if (qla_read_isr(sc, &isr, &info)((*(sc)->sc_regs->read_isr)((sc), (&isr), (&info
)))
== 0) {
1053 continue;
1054 }
1055
1056 if (isr != QLA_INT_TYPE_IO3) {
1057 qla_handle_intr(sc, isr, info);
1058 continue;
1059 }
1060
1061 rspin = qla_queue_read(sc, sc->sc_regs->res_in)((*(sc)->sc_regs->read)((sc), (sc->sc_regs->res_in
)))
;
1062 while (rspin != sc->sc_last_resp_id) {
1063 ccb = qla_handle_resp(sc, sc->sc_last_resp_id);
1064
1065 sc->sc_last_resp_id++;
1066 if (sc->sc_last_resp_id == sc->sc_maxcmds)
1067 sc->sc_last_resp_id = 0;
1068
1069 if (ccb != NULL((void *)0))
1070 SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_link)do { (ccb)->ccb_link.sqe_next = ((void *)0); *(&list)->
sqh_last = (ccb); (&list)->sqh_last = &(ccb)->ccb_link
.sqe_next; } while (0)
;
1071 if (ccb == xs->io)
1072 done = 1;
1073 }
1074 qla_queue_write(sc, sc->sc_regs->res_out, rspin)qla_write((sc), (sc->sc_regs->res_out), (rspin));
1075 qla_clear_isr(sc, isr);
1076 } while (done == 0);
1077
1078 mtx_leave(&sc->sc_queue_mtx);
1079
1080 while ((ccb = SIMPLEQ_FIRST(&list)((&list)->sqh_first)) != NULL((void *)0)) {
1081 SIMPLEQ_REMOVE_HEAD(&list, ccb_link)do { if (((&list)->sqh_first = (&list)->sqh_first
->ccb_link.sqe_next) == ((void *)0)) (&list)->sqh_last
= &(&list)->sqh_first; } while (0)
;
1082 scsi_done(ccb->ccb_xs);
1083 }
1084}
1085
1086u_int16_t
1087qla_read(struct qla_softc *sc, bus_size_t offset)
1088{
1089 u_int16_t v;
1090 v = bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset)((sc->sc_iot)->read_2((sc->sc_ioh), (offset)));
1091 bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 2,
1092 BUS_SPACE_BARRIER_READ0x01 | BUS_SPACE_BARRIER_WRITE0x02);
1093 return (v);
1094}
1095
1096void
1097qla_write(struct qla_softc *sc, bus_size_t offset, u_int16_t value)
1098{
1099 bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, value)((sc->sc_iot)->write_2((sc->sc_ioh), (offset), (value
)))
;
1100 bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 2,
1101 BUS_SPACE_BARRIER_READ0x01 | BUS_SPACE_BARRIER_WRITE0x02);
1102}
1103
1104u_int16_t
1105qla_read_mbox(struct qla_softc *sc, int mbox)
1106{
1107 /* could range-check mboxes according to chip type? */
1108 return (qla_read(sc, sc->sc_mbox_base + (mbox * 2)));
1109}
1110
1111void
1112qla_write_mbox(struct qla_softc *sc, int mbox, u_int16_t value)
1113{
1114 qla_write(sc, sc->sc_mbox_base + (mbox * 2), value);
1115}
1116
1117void
1118qla_host_cmd(struct qla_softc *sc, u_int16_t cmd)
1119{
1120 qla_write(sc, QLA_HOST_CMD_CTRL0xC0, cmd << QLA_HOST_CMD_SHIFT12);
1121}
1122
1123#define MBOX_COMMAND_TIMEOUT4000 4000
1124
1125int
1126qla_mbox(struct qla_softc *sc, int maskin)
1127{
1128 int i;
1129 int result = 0;
1130 int rv;
1131
1132 sc->sc_mbox_pending = 1;
1133 for (i = 0; i < nitems(sc->sc_mbox)(sizeof((sc->sc_mbox)) / sizeof((sc->sc_mbox)[0])); i++) {
1134 if (maskin & (1 << i)) {
1135 qla_write_mbox(sc, i, sc->sc_mbox[i]);
1136 }
1137 }
1138 qla_host_cmd(sc, QLA_HOST_CMD_SET_HOST_INT0x5);
1139
1140 if (sc->sc_scsibus != NULL((void *)0)) {
1141 mtx_enter(&sc->sc_mbox_mtx);
1142 sc->sc_mbox_pending = 1;
1143 while (sc->sc_mbox_pending == 1) {
1144 msleep_nsec(sc->sc_mbox, &sc->sc_mbox_mtx, PRIBIO16,
1145 "qlambox", INFSLP0xffffffffffffffffULL);
1146 }
1147 result = sc->sc_mbox[0];
1148 sc->sc_mbox_pending = 0;
1149 mtx_leave(&sc->sc_mbox_mtx);
1150 return (result == QLA_MBOX_COMPLETE0x4000 ? 0 : result);
1151 }
1152
1153 for (i = 0; i < MBOX_COMMAND_TIMEOUT4000 && result == 0; i++) {
1154 u_int16_t isr, info;
1155
1156 delay(100)(*delay_func)(100);
1157
1158 if (qla_read_isr(sc, &isr, &info)((*(sc)->sc_regs->read_isr)((sc), (&isr), (&info
)))
== 0)
1159 continue;
1160
1161 switch (isr) {
1162 case QLA_INT_TYPE_MBOX1:
1163 result = info;
1164 break;
1165
1166 default:
1167 qla_handle_intr(sc, isr, info);
1168 break;
1169 }
1170 }
1171
1172 if (result == 0) {
1173 /* timed out; do something? */
1174 DPRINTF(QLA_D_MBOX, "%s: mbox timed out\n", DEVNAME(sc));
1175 rv = 1;
1176 } else {
1177 for (i = 0; i < nitems(sc->sc_mbox)(sizeof((sc->sc_mbox)) / sizeof((sc->sc_mbox)[0])); i++) {
1178 sc->sc_mbox[i] = qla_read_mbox(sc, i);
1179 }
1180 rv = (result == QLA_MBOX_COMPLETE0x4000 ? 0 : result);
1181 }
1182
1183 qla_clear_isr(sc, QLA_INT_TYPE_MBOX1);
1184 sc->sc_mbox_pending = 0;
1185 return (rv);
1186}
1187
1188void
1189qla_mbox_putaddr(u_int16_t *mbox, struct qla_dmamem *mem)
1190{
1191 mbox[2] = (QLA_DMA_DVA(mem)((u_int64_t)(mem)->qdm_map->dm_segs[0].ds_addr) >> 16) & 0xffff;
1192 mbox[3] = (QLA_DMA_DVA(mem)((u_int64_t)(mem)->qdm_map->dm_segs[0].ds_addr) >> 0) & 0xffff;
1193 mbox[6] = (QLA_DMA_DVA(mem)((u_int64_t)(mem)->qdm_map->dm_segs[0].ds_addr) >> 48) & 0xffff;
1194 mbox[7] = (QLA_DMA_DVA(mem)((u_int64_t)(mem)->qdm_map->dm_segs[0].ds_addr) >> 32) & 0xffff;
1195}
1196
1197int
1198qla_sns_req(struct qla_softc *sc, struct qla_dmamem *mem, int reqsize)
1199{
1200 struct qla_sns_req_hdr *header;
1201 uint64_t dva;
1202 int rv;
1203
1204 memset(&sc->sc_mbox, 0, sizeof(sc->sc_mbox))__builtin_memset((&sc->sc_mbox), (0), (sizeof(sc->sc_mbox
)))
;
1205 sc->sc_mbox[0] = QLA_MBOX_SEND_SNS0x006E;
1206 sc->sc_mbox[1] = reqsize / 2;
1207 qla_mbox_putaddr(sc->sc_mbox, mem);
1208
1209 header = QLA_DMA_KVA(mem)((void *)(mem)->qdm_kva);
1210 htolem16(&header->resp_len, (QLA_DMA_LEN(mem) - reqsize) / 2)(*(__uint16_t *)(&header->resp_len) = ((__uint16_t)(((
(mem)->qdm_size) - reqsize) / 2)))
;
1211 dva = QLA_DMA_DVA(mem)((u_int64_t)(mem)->qdm_map->dm_segs[0].ds_addr) + reqsize;
1212 htolem32(&header->resp_addr_lo, dva)(*(__uint32_t *)(&header->resp_addr_lo) = ((__uint32_t
)(dva)))
;
1213 htolem32(&header->resp_addr_hi, dva >> 32)(*(__uint32_t *)(&header->resp_addr_hi) = ((__uint32_t
)(dva >> 32)))
;
1214 header->subcmd_len = htole16((reqsize - sizeof(*header)) / 2)((__uint16_t)((reqsize - sizeof(*header)) / 2));
1215
1216 bus_dmamap_sync(sc->sc_dmat, QLA_DMA_MAP(mem), 0, QLA_DMA_LEN(mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (((mem)->qdm_size)), (0x01 | 0x04))
1217 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (((mem)->qdm_size)), (0x01 | 0x04))
;
1218 rv = qla_mbox(sc, 0x00cf);
1219 bus_dmamap_sync(sc->sc_dmat, QLA_DMA_MAP(mem), 0, QLA_DMA_LEN(mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (((mem)->qdm_size)), (0x02 | 0x08))
1220 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (((mem)->qdm_size)), (0x02 | 0x08))
;
1221
1222 return (rv);
1223}
1224
1225void
1226qla_set_ints(struct qla_softc *sc, int enabled)
1227{
1228 u_int16_t v = enabled ? (QLA_INT_REQ0x8000 | QLA_RISC_INT_REQ0x0008) : 0;
1229 qla_write(sc, QLA_INT_CTRL0x08, v);
1230}
1231
1232int
1233qla_read_isr_1G(struct qla_softc *sc, u_int16_t *isr, u_int16_t *info)
1234{
1235 u_int16_t int_status;
1236
1237 if (qla_read(sc, QLA_SEMA0x0C) & QLA_SEMA_LOCK0x0001) {
1238 *info = qla_read_mbox(sc, 0);
1239 if (*info & QLA_MBOX_HAS_STATUS0x4000)
1240 *isr = QLA_INT_TYPE_MBOX1;
1241 else
1242 *isr = QLA_INT_TYPE_ASYNC2;
1243 } else {
1244 int_status = qla_read(sc, QLA_INT_STATUS0x0A);
1245 if ((int_status & QLA_INT_REQ0x8000) == 0)
1246 return (0);
1247
1248 *isr = QLA_INT_TYPE_IO3;
1249 }
1250
1251 return (1);
1252}
1253
1254int
1255qla_read_isr_2G(struct qla_softc *sc, u_int16_t *isr, u_int16_t *info)
1256{
1257 u_int32_t v;
1258
1259 if ((qla_read(sc, QLA_INT_STATUS0x0A) & QLA_INT_REQ0x8000) == 0)
1260 return (0);
1261
1262 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, QLA_RISC_STATUS_LOW)((sc->sc_iot)->read_4((sc->sc_ioh), (0x18)));
1263 bus_space_barrier(sc->sc_iot, sc->sc_ioh, QLA_RISC_STATUS_LOW0x18,
1264 4, BUS_SPACE_BARRIER_READ0x01 | BUS_SPACE_BARRIER_WRITE0x02);
1265
1266 switch (v & QLA_INT_STATUS_MASK0x00FF) {
1267 case QLA_23XX_INT_ROM_MBOX0x01:
1268 case QLA_23XX_INT_ROM_MBOX_FAIL0x02:
1269 case QLA_23XX_INT_MBOX0x10:
1270 case QLA_23XX_INT_MBOX_FAIL0x11:
1271 *isr = QLA_INT_TYPE_MBOX1;
1272 break;
1273
1274 case QLA_23XX_INT_ASYNC0x12:
1275 *isr = QLA_INT_TYPE_ASYNC2;
1276 break;
1277
1278 case QLA_23XX_INT_RSPQ0x13:
1279 *isr = QLA_INT_TYPE_IO3;
1280 break;
1281
1282 default:
1283 *isr = QLA_INT_TYPE_OTHER4;
1284 break;
1285 }
1286
1287 *info = (v >> QLA_INT_INFO_SHIFT16);
1288
1289 return (1);
1290}
1291
1292void
1293qla_clear_isr(struct qla_softc *sc, u_int16_t isr)
1294{
1295 qla_host_cmd(sc, QLA_HOST_CMD_CLR_RISC_INT0x7);
1296 switch (isr) {
1297 case QLA_INT_TYPE_MBOX1:
1298 case QLA_INT_TYPE_ASYNC2:
1299 qla_write(sc, QLA_SEMA0x0C, 0);
1300 break;
1301 default:
1302 break;
1303 }
1304}
1305
1306u_int16_t
1307qla_read_queue_2100(struct qla_softc *sc, bus_size_t queue)
1308{
1309 u_int16_t a, b, i;
1310
1311 for (i = 0; i < 1000; i++) {
1312 a = qla_read(sc, queue);
1313 b = qla_read(sc, queue);
1314
1315 if (a == b)
1316 return (a);
1317 }
1318
1319 DPRINTF(QLA_D_INTR, "%s: queue ptr unstable\n", DEVNAME(sc));
1320
1321 return (a);
1322}
1323
1324int
1325qla_softreset(struct qla_softc *sc)
1326{
1327 int i;
1328 qla_set_ints(sc, 0);
1329
1330 /* reset */
1331 qla_write(sc, QLA_CTRL_STATUS0x06, QLA_CTRL_RESET0x0001);
1332 delay(100)(*delay_func)(100);
1333 /* clear data and control dma engines? */
1334
1335 /* wait for soft reset to clear */
1336 for (i = 0; i < 1000; i++) {
1337 if ((qla_read(sc, QLA_CTRL_STATUS0x06) & QLA_CTRL_RESET0x0001) == 0)
1338 break;
1339
1340 delay(100)(*delay_func)(100);
1341 }
1342
1343 if (i == 1000) {
1344 DPRINTF(QLA_D_INTR, "%s: reset didn't clear\n", DEVNAME(sc));
1345 qla_set_ints(sc, 0);
1346 return (ENXIO6);
1347 }
1348
1349 /* reset FPM */
1350 qla_write(sc, QLA_CTRL_STATUS0x06, QLA_CTRL_FPM0_REGS0x0020);
1351 qla_write(sc, QLA_FPM_DIAG0x96, QLA_FPM_RESET0x0100);
1352 qla_write(sc, QLA_FPM_DIAG0x96, 0); /* isp(4) doesn't do this? */
1353 qla_write(sc, QLA_CTRL_STATUS0x06, QLA_CTRL_RISC_REGS0x0000);
1354
1355 /* reset risc processor */
1356 qla_host_cmd(sc, QLA_HOST_CMD_RESET0x1);
1357 delay(100)(*delay_func)(100);
1358 qla_write(sc, QLA_SEMA0x0C, 0);
1359 qla_host_cmd(sc, QLA_HOST_CMD_MASK_PARITY0x4); /* from isp(4) */
1360 qla_host_cmd(sc, QLA_HOST_CMD_RELEASE0x3);
1361
1362 /* reset queue pointers */
1363 qla_queue_write(sc, sc->sc_regs->req_in, 0)qla_write((sc), (sc->sc_regs->req_in), (0));
1364 qla_queue_write(sc, sc->sc_regs->req_out, 0)qla_write((sc), (sc->sc_regs->req_out), (0));
1365 qla_queue_write(sc, sc->sc_regs->res_in, 0)qla_write((sc), (sc->sc_regs->res_in), (0));
1366 qla_queue_write(sc, sc->sc_regs->res_out, 0)qla_write((sc), (sc->sc_regs->res_out), (0));
1367
1368 qla_set_ints(sc, 1);
1369 /* isp(4) sends QLA_HOST_CMD_BIOS here.. not documented? */
1370
1371 /* do a basic mailbox operation to check we're alive */
1372 sc->sc_mbox[0] = QLA_MBOX_NOP0x0000;
1373 if (qla_mbox(sc, 0x0001)) {
1374 DPRINTF(QLA_D_INTR, "%s: ISP not responding after reset\n",
1375 DEVNAME(sc));
1376 return (ENXIO6);
1377 }
1378
1379 return (0);
1380}
1381
1382void
1383qla_update_topology(struct qla_softc *sc)
1384{
1385 sc->sc_mbox[0] = QLA_MBOX_GET_LOOP_ID0x0020;
1386 if (qla_mbox(sc, 0x0001)) {
1387 DPRINTF(QLA_D_PORT, "%s: unable to get loop id\n", DEVNAME(sc));
1388 sc->sc_topology = QLA_TOPO_N_PORT_NO_TARGET4;
1389 } else {
1390 sc->sc_topology = sc->sc_mbox[6];
1391 sc->sc_loop_id = sc->sc_mbox[1];
1392
1393 switch (sc->sc_topology) {
1394 case QLA_TOPO_NL_PORT0:
1395 case QLA_TOPO_N_PORT2:
1396 DPRINTF(QLA_D_PORT, "%s: loop id %d\n", DEVNAME(sc),
1397 sc->sc_loop_id);
1398 break;
1399
1400 case QLA_TOPO_FL_PORT1:
1401 case QLA_TOPO_F_PORT3:
1402 sc->sc_port_id = sc->sc_mbox[2] |
1403 (sc->sc_mbox[3] << 16);
1404 DPRINTF(QLA_D_PORT, "%s: fabric port id %06x\n",
1405 DEVNAME(sc), sc->sc_port_id);
1406 break;
1407
1408 case QLA_TOPO_N_PORT_NO_TARGET4:
1409 default:
1410 DPRINTF(QLA_D_PORT, "%s: not connected\n", DEVNAME(sc));
1411 break;
1412 }
1413
1414 switch (sc->sc_topology) {
1415 case QLA_TOPO_NL_PORT0:
1416 case QLA_TOPO_FL_PORT1:
1417 sc->sc_loop_max_id = 126;
1418 break;
1419
1420 case QLA_TOPO_N_PORT2:
1421 sc->sc_loop_max_id = 2;
1422 break;
1423
1424 default:
1425 sc->sc_loop_max_id = 0;
1426 break;
1427 }
1428 }
1429}
1430
1431int
1432qla_update_fabric(struct qla_softc *sc)
1433{
1434 struct qla_sns_rft_id *rft;
1435
1436 if (sc->sc_fabric == 0)
1437 return (0);
1438
1439 switch (sc->sc_topology) {
1440 case QLA_TOPO_F_PORT3:
1441 case QLA_TOPO_FL_PORT1:
1442 break;
1443
1444 default:
1445 return (0);
1446 }
1447
1448 /* get the name server's port db entry */
1449 sc->sc_mbox[0] = QLA_MBOX_GET_PORT_DB0x0064;
1450 if (sc->sc_2k_logins) {
1451 sc->sc_mbox[1] = QLA_F_PORT_HANDLE0x7E;
1452 } else {
1453 sc->sc_mbox[1] = QLA_F_PORT_HANDLE0x7E << 8;
1454 }
1455 qla_mbox_putaddr(sc->sc_mbox, sc->sc_scratch);
1456 bus_dmamap_sync(sc->sc_dmat, QLA_DMA_MAP(sc->sc_scratch), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (sizeof(struct qla_get_port_db
)), (0x01))
1457 sizeof(struct qla_get_port_db), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (sizeof(struct qla_get_port_db
)), (0x01))
;
1458 if (qla_mbox(sc, 0x00cf)) {
1459 DPRINTF(QLA_D_PORT, "%s: get port db for SNS failed: %x\n",
1460 DEVNAME(sc), sc->sc_mbox[0]);
1461 sc->sc_sns_port_name = 0;
1462 } else {
1463 struct qla_get_port_db *pdb;
1464 bus_dmamap_sync(sc->sc_dmat, QLA_DMA_MAP(sc->sc_scratch), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (sizeof(struct qla_get_port_db
)), (0x02))
1465 sizeof(struct qla_get_port_db), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (sizeof(struct qla_get_port_db
)), (0x02))
;
1466 pdb = QLA_DMA_KVA(sc->sc_scratch)((void *)(sc->sc_scratch)->qdm_kva);
1467 DPRINTF(QLA_D_PORT, "%s: SNS port name %llx\n", DEVNAME(sc),
1468 betoh64(pdb->port_name));
1469 sc->sc_sns_port_name = betoh64(pdb->port_name)(__uint64_t)(__builtin_constant_p(pdb->port_name) ? (__uint64_t
)((((__uint64_t)(pdb->port_name) & 0xff) << 56) |
((__uint64_t)(pdb->port_name) & 0xff00ULL) << 40
| ((__uint64_t)(pdb->port_name) & 0xff0000ULL) <<
24 | ((__uint64_t)(pdb->port_name) & 0xff000000ULL) <<
8 | ((__uint64_t)(pdb->port_name) & 0xff00000000ULL) >>
8 | ((__uint64_t)(pdb->port_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(pdb->port_name) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(pdb->port_name) & 0xff00000000000000ULL
) >> 56) : __swap64md(pdb->port_name))
;
1470 }
1471
1472 /*
1473 * register fc4 types with the fabric
1474 * some switches do this automatically, but apparently
1475 * some don't.
1476 */
1477 rft = QLA_DMA_KVA(sc->sc_scratch)((void *)(sc->sc_scratch)->qdm_kva);
1478 memset(rft, 0, sizeof(*rft) + sizeof(struct qla_sns_req_hdr))__builtin_memset((rft), (0), (sizeof(*rft) + sizeof(struct qla_sns_req_hdr
)))
;
1479 rft->subcmd = htole16(QLA_SNS_RFT_ID)((__uint16_t)(0x0217));
1480 rft->max_word = htole16(sizeof(struct qla_sns_req_hdr) / 4)((__uint16_t)(sizeof(struct qla_sns_req_hdr) / 4));
1481 rft->port_id = htole32(sc->sc_port_id)((__uint32_t)(sc->sc_port_id));
1482 rft->fc4_types[0] = htole32(1 << QLA_FC4_SCSI)((__uint32_t)(1 << 8));
1483 if (qla_sns_req(sc, sc->sc_scratch, sizeof(*rft))) {
1484 DPRINTF(QLA_D_PORT, "%s: RFT_ID failed\n", DEVNAME(sc));
1485 /* we might be able to continue after this fails */
1486 }
1487
1488 return (1);
1489}
1490
1491int
1492qla_get_port_name_list(struct qla_softc *sc, u_int32_t match)
1493{
1494 int i;
1495 struct qla_port_name_list *l;
1496 struct qla_fc_port *port;
1497
1498 sc->sc_mbox[0] = QLA_MBOX_GET_PORT_NAME_LIST0x0075;
1499 sc->sc_mbox[1] = 0x08; /* include initiators */
1500 if (match & QLA_LOCATION_FABRIC(2 << 24))
1501 sc->sc_mbox[1] |= 0x02; /* return all loop ids */
1502 qla_mbox_putaddr(sc->sc_mbox, sc->sc_scratch);
1503 bus_dmamap_sync(sc->sc_dmat, QLA_DMA_MAP(sc->sc_scratch), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (((sc->sc_scratch)->qdm_size
)), (0x01))
1504 QLA_DMA_LEN(sc->sc_scratch), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (((sc->sc_scratch)->qdm_size
)), (0x01))
;
1505 if (qla_mbox(sc, 0x04f)) {
1506 DPRINTF(QLA_D_PORT, "%s: get port name list failed: %x\n",
1507 DEVNAME(sc), sc->sc_mbox[0]);
1508 return (1);
1509 }
1510 bus_dmamap_sync(sc->sc_dmat, QLA_DMA_MAP(sc->sc_scratch), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (((sc->sc_scratch)->qdm_size
)), (0x01))
1511 QLA_DMA_LEN(sc->sc_scratch), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (((sc->sc_scratch)->qdm_size
)), (0x01))
;
1512
1513 i = 0;
1514 l = QLA_DMA_KVA(sc->sc_scratch)((void *)(sc->sc_scratch)->qdm_kva);
1515 mtx_enter(&sc->sc_port_mtx);
1516 while (i * sizeof(*l) < sc->sc_mbox[1]) {
1517 u_int16_t loopid;
1518 u_int32_t loc;
1519
1520 loopid = letoh16(l[i].loop_id)((__uint16_t)(l[i].loop_id));
1521 /* skip special ports */
1522 switch (loopid) {
1523 case QLA_F_PORT_HANDLE0x7E:
1524 case QLA_SNS_HANDLE0x80:
1525 case QLA_FABRIC_CTRL_HANDLE0x7F:
1526 loc = 0;
1527 break;
1528 default:
1529 if (loopid <= sc->sc_loop_max_id) {
1530 loc = QLA_LOCATION_LOOP_ID(loopid)(loopid | (1 << 24));
1531 } else {
1532 /*
1533 * we don't have the port id here, so just
1534 * indicate it's a fabric port.
1535 */
1536 loc = QLA_LOCATION_FABRIC(2 << 24);
1537 }
1538 }
1539
1540 if (match & loc) {
1541 port = malloc(sizeof(*port), M_DEVBUF2, M_ZERO0x0008 |
1542 M_NOWAIT0x0002);
1543 if (port == NULL((void *)0)) {
1544 printf("%s: failed to allocate port struct\n",
1545 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1546 break;
1547 }
1548 port->location = loc;
1549 port->loopid = loopid;
1550 port->port_name = letoh64(l[i].port_name)((__uint64_t)(l[i].port_name));
1551 DPRINTF(QLA_D_PORT, "%s: loop id %d, port name %llx\n",
1552 DEVNAME(sc), port->loopid, port->port_name);
1553 TAILQ_INSERT_TAIL(&sc->sc_ports_found, port, update)do { (port)->update.tqe_next = ((void *)0); (port)->update
.tqe_prev = (&sc->sc_ports_found)->tqh_last; *(&
sc->sc_ports_found)->tqh_last = (port); (&sc->sc_ports_found
)->tqh_last = &(port)->update.tqe_next; } while (0)
;
1554 }
1555 i++;
1556 }
1557 mtx_leave(&sc->sc_port_mtx);
1558
1559 return (0);
1560}
1561
1562struct qla_fc_port *
1563qla_next_fabric_port(struct qla_softc *sc, u_int32_t *firstport,
1564 u_int32_t *lastport)
1565{
1566 struct qla_sns_ga_nxt *ga;
1567 struct qla_sns_ga_nxt_resp *gar;
1568 struct qla_fc_port *fport;
1569 int result;
1570
1571 /* get the next port from the fabric nameserver */
1572 ga = QLA_DMA_KVA(sc->sc_scratch)((void *)(sc->sc_scratch)->qdm_kva);
1573 memset(ga, 0, sizeof(*ga) + sizeof(*gar))__builtin_memset((ga), (0), (sizeof(*ga) + sizeof(*gar)));
1574 ga->subcmd = htole16(QLA_SNS_GA_NXT)((__uint16_t)(0x0100));
1575 ga->max_word = htole16(sizeof(*gar) / 4)((__uint16_t)(sizeof(*gar) / 4));
1576 ga->port_id = htole32(*lastport)((__uint32_t)(*lastport));
18
Assigned value is garbage or undefined
1577 result = qla_sns_req(sc, sc->sc_scratch, sizeof(*ga));
1578 if (result) {
1579 DPRINTF(QLA_D_PORT, "%s: GA_NXT %06x failed: %x\n", DEVNAME(sc),
1580 *lastport, result);
1581 *lastport = 0xffffffff;
1582 return (NULL((void *)0));
1583 }
1584
1585 gar = (struct qla_sns_ga_nxt_resp *)(ga + 1);
1586 /* if the response is all zeroes, try again */
1587 if (gar->port_type_id == 0 && gar->port_name == 0 &&
1588 gar->node_name == 0) {
1589 DPRINTF(QLA_D_PORT, "%s: GA_NXT returned junk\n", DEVNAME(sc));
1590 return (NULL((void *)0));
1591 }
1592
1593 /* are we back at the start? */
1594 *lastport = betoh32(gar->port_type_id)(__uint32_t)(__builtin_constant_p(gar->port_type_id) ? (__uint32_t
)(((__uint32_t)(gar->port_type_id) & 0xff) << 24
| ((__uint32_t)(gar->port_type_id) & 0xff00) <<
8 | ((__uint32_t)(gar->port_type_id) & 0xff0000) >>
8 | ((__uint32_t)(gar->port_type_id) & 0xff000000) >>
24) : __swap32md(gar->port_type_id))
& 0xffffff;
1595 if (*lastport == *firstport) {
1596 *lastport = 0xffffffff;
1597 return (NULL((void *)0));
1598 }
1599 if (*firstport == 0xffffffff)
1600 *firstport = *lastport;
1601
1602 DPRINTF(QLA_D_PORT, "%s: GA_NXT: port id: %06x, wwpn %llx, wwnn %llx\n",
1603 DEVNAME(sc), *lastport, betoh64(gar->port_name),
1604 betoh64(gar->node_name));
1605
1606 /* don't try to log in to ourselves */
1607 if (*lastport == sc->sc_port_id) {
1608 return (NULL((void *)0));
1609 }
1610
1611 fport = malloc(sizeof(*fport), M_DEVBUF2, M_ZERO0x0008 | M_NOWAIT0x0002);
1612 if (fport == NULL((void *)0)) {
1613 printf("%s: failed to allocate a port struct\n",
1614 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1615 *lastport = 0xffffffff;
1616 return (NULL((void *)0));
1617 }
1618 fport->port_name = betoh64(gar->port_name)(__uint64_t)(__builtin_constant_p(gar->port_name) ? (__uint64_t
)((((__uint64_t)(gar->port_name) & 0xff) << 56) |
((__uint64_t)(gar->port_name) & 0xff00ULL) << 40
| ((__uint64_t)(gar->port_name) & 0xff0000ULL) <<
24 | ((__uint64_t)(gar->port_name) & 0xff000000ULL) <<
8 | ((__uint64_t)(gar->port_name) & 0xff00000000ULL) >>
8 | ((__uint64_t)(gar->port_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(gar->port_name) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(gar->port_name) & 0xff00000000000000ULL
) >> 56) : __swap64md(gar->port_name))
;
1619 fport->node_name = betoh64(gar->node_name)(__uint64_t)(__builtin_constant_p(gar->node_name) ? (__uint64_t
)((((__uint64_t)(gar->node_name) & 0xff) << 56) |
((__uint64_t)(gar->node_name) & 0xff00ULL) << 40
| ((__uint64_t)(gar->node_name) & 0xff0000ULL) <<
24 | ((__uint64_t)(gar->node_name) & 0xff000000ULL) <<
8 | ((__uint64_t)(gar->node_name) & 0xff00000000ULL) >>
8 | ((__uint64_t)(gar->node_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(gar->node_name) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(gar->node_name) & 0xff00000000000000ULL
) >> 56) : __swap64md(gar->node_name))
;
1620 fport->location = QLA_LOCATION_PORT_ID(*lastport)(*lastport | (2 << 24));
1621 fport->portid = *lastport;
1622 return (fport);
1623}
1624
1625int
1626qla_fabric_plogi(struct qla_softc *sc, struct qla_fc_port *port)
1627{
1628 int loopid, mboxin, err;
1629 u_int32_t id;
1630
1631 loopid = 0;
1632retry:
1633 if (port->loopid == 0) {
1634 mtx_enter(&sc->sc_port_mtx);
1635 loopid = qla_get_loop_id(sc, loopid);
1636 mtx_leave(&sc->sc_port_mtx);
1637 if (loopid == -1) {
1638 DPRINTF(QLA_D_PORT, "%s: ran out of loop ids\n",
1639 DEVNAME(sc));
1640 return (1);
1641 }
1642 }
1643
1644 mboxin = 0x000f;
1645 sc->sc_mbox[0] = QLA_MBOX_FABRIC_PLOGI0x006F;
1646 sc->sc_mbox[2] = (port->portid >> 16) & 0xff;
1647 sc->sc_mbox[3] = port->portid & 0xffff;
1648 if (sc->sc_2k_logins) {
1649 sc->sc_mbox[1] = loopid;
1650 sc->sc_mbox[10] = 0;
1651 mboxin |= (1 << 10);
1652 } else {
1653 sc->sc_mbox[1] = loopid << 8;
1654 }
1655
1656 err = qla_mbox(sc, mboxin);
1657 switch (err) {
1658 case 0:
1659 DPRINTF(QLA_D_PORT, "%s: logged in to %06x as %d\n",
1660 DEVNAME(sc), port->portid, loopid);
1661 port->flags &= ~QLA_PORT_FLAG_NEEDS_LOGIN2;
1662 port->loopid = loopid;
1663 return (0);
1664
1665 case QLA_MBOX_PORT_USED0x4007:
1666 DPRINTF(QLA_D_PORT, "%s: already logged in to %06x as %d\n",
1667 DEVNAME(sc), port->portid, sc->sc_mbox[1]);
1668 port->flags &= ~QLA_PORT_FLAG_NEEDS_LOGIN2;
1669 port->loopid = sc->sc_mbox[1];
1670 return (0);
1671
1672 case QLA_MBOX_LOOP_USED0x4008:
1673 id = (sc->sc_mbox[1] << 16) | sc->sc_mbox[2];
1674 if (qla_add_logged_in_port(sc, loopid, id)) {
1675 return (1);
1676 }
1677 port->loopid = 0;
1678 loopid++;
1679 goto retry;
1680
1681 default:
1682 DPRINTF(QLA_D_PORT, "%s: error %x logging in to port %06x\n",
1683 DEVNAME(sc), err, port->portid);
1684 port->loopid = 0;
1685 return (1);
1686 }
1687}
1688
1689void
1690qla_fabric_plogo(struct qla_softc *sc, struct qla_fc_port *port)
1691{
1692 int mboxin = 0x0003;
1693 sc->sc_mbox[0] = QLA_MBOX_FABRIC_PLOGO0x0071;
1694 if (sc->sc_2k_logins) {
1695 sc->sc_mbox[1] = port->loopid;
1696 sc->sc_mbox[10] = 0;
1697 mboxin |= (1 << 10);
1698 } else {
1699 sc->sc_mbox[1] = port->loopid << 8;
1700 }
1701
1702 if (qla_mbox(sc, mboxin))
1703 DPRINTF(QLA_D_PORT, "%s: loop id %d logout failed\n",
1704 DEVNAME(sc), port->loopid);
1705}
1706
1707void
1708qla_update_done(struct qla_softc *sc, int task)
1709{
1710 atomic_clearbits_intx86_atomic_clearbits_u32(&sc->sc_update_tasks, task);
1711}
1712
1713void
1714qla_update_start(struct qla_softc *sc, int task)
1715{
1716 atomic_setbits_intx86_atomic_setbits_u32(&sc->sc_update_tasks, task);
1717 task_add(sc->sc_update_taskq, &sc->sc_update_task);
1718}
1719
1720void
1721qla_clear_port_lists(struct qla_softc *sc)
1722{
1723 struct qla_fc_port *p;
1724
1725 while (!TAILQ_EMPTY(&sc->sc_ports_found)(((&sc->sc_ports_found)->tqh_first) == ((void *)0))) {
1726 p = TAILQ_FIRST(&sc->sc_ports_found)((&sc->sc_ports_found)->tqh_first);
1727 TAILQ_REMOVE(&sc->sc_ports_found, p, update)do { if (((p)->update.tqe_next) != ((void *)0)) (p)->update
.tqe_next->update.tqe_prev = (p)->update.tqe_prev; else
(&sc->sc_ports_found)->tqh_last = (p)->update.tqe_prev
; *(p)->update.tqe_prev = (p)->update.tqe_next; ((p)->
update.tqe_prev) = ((void *)-1); ((p)->update.tqe_next) = (
(void *)-1); } while (0)
;
1728 free(p, M_DEVBUF2, sizeof *p);
1729 }
1730
1731 while (!TAILQ_EMPTY(&sc->sc_ports_new)(((&sc->sc_ports_new)->tqh_first) == ((void *)0))) {
1732 p = TAILQ_FIRST(&sc->sc_ports_new)((&sc->sc_ports_new)->tqh_first);
1733 TAILQ_REMOVE(&sc->sc_ports_new, p, update)do { if (((p)->update.tqe_next) != ((void *)0)) (p)->update
.tqe_next->update.tqe_prev = (p)->update.tqe_prev; else
(&sc->sc_ports_new)->tqh_last = (p)->update.tqe_prev
; *(p)->update.tqe_prev = (p)->update.tqe_next; ((p)->
update.tqe_prev) = ((void *)-1); ((p)->update.tqe_next) = (
(void *)-1); } while (0)
;
1734 free(p, M_DEVBUF2, sizeof *p);
1735 }
1736
1737 while (!TAILQ_EMPTY(&sc->sc_ports_gone)(((&sc->sc_ports_gone)->tqh_first) == ((void *)0))) {
1738 p = TAILQ_FIRST(&sc->sc_ports_gone)((&sc->sc_ports_gone)->tqh_first);
1739 TAILQ_REMOVE(&sc->sc_ports_gone, p, update)do { if (((p)->update.tqe_next) != ((void *)0)) (p)->update
.tqe_next->update.tqe_prev = (p)->update.tqe_prev; else
(&sc->sc_ports_gone)->tqh_last = (p)->update.tqe_prev
; *(p)->update.tqe_prev = (p)->update.tqe_next; ((p)->
update.tqe_prev) = ((void *)-1); ((p)->update.tqe_next) = (
(void *)-1); } while (0)
;
1740 }
1741}
1742
1743void
1744qla_do_update(void *xsc)
1745{
1746 struct qla_softc *sc = xsc;
1747 int firstport, lastport;
1748 struct qla_fc_port *port, *fport;
1749
1750 DPRINTF(QLA_D_PORT, "%s: updating\n", DEVNAME(sc));
1751 while (sc->sc_update_tasks != 0) {
1
Assuming field 'sc_update_tasks' is not equal to 0
2
Loop condition is true. Entering loop body
1752 if (sc->sc_update_tasks & QLA_UPDATE_TASK_CLEAR_ALL0x00000001) {
3
Assuming the condition is false
4
Taking false branch
1753 TAILQ_HEAD(, qla_fc_port)struct { struct qla_fc_port *tqh_first; struct qla_fc_port **
tqh_last; }
detach;
1754 DPRINTF(QLA_D_PORT, "%s: detaching everything\n",
1755 DEVNAME(sc));
1756
1757 mtx_enter(&sc->sc_port_mtx);
1758 qla_clear_port_lists(sc);
1759 TAILQ_INIT(&detach)do { (&detach)->tqh_first = ((void *)0); (&detach)
->tqh_last = &(&detach)->tqh_first; } while (0)
;
1760 TAILQ_CONCAT(&detach, &sc->sc_ports, ports)do { if (!(((&sc->sc_ports)->tqh_first) == ((void *
)0))) { *(&detach)->tqh_last = (&sc->sc_ports)->
tqh_first; (&sc->sc_ports)->tqh_first->ports.tqe_prev
= (&detach)->tqh_last; (&detach)->tqh_last = (
&sc->sc_ports)->tqh_last; do { ((&sc->sc_ports
))->tqh_first = ((void *)0); ((&sc->sc_ports))->
tqh_last = &((&sc->sc_ports))->tqh_first; } while
(0); } } while (0)
;
1761 mtx_leave(&sc->sc_port_mtx);
1762
1763 while (!TAILQ_EMPTY(&detach)(((&detach)->tqh_first) == ((void *)0))) {
1764 port = TAILQ_FIRST(&detach)((&detach)->tqh_first);
1765 TAILQ_REMOVE(&detach, port, ports)do { if (((port)->ports.tqe_next) != ((void *)0)) (port)->
ports.tqe_next->ports.tqe_prev = (port)->ports.tqe_prev
; else (&detach)->tqh_last = (port)->ports.tqe_prev
; *(port)->ports.tqe_prev = (port)->ports.tqe_next; ((port
)->ports.tqe_prev) = ((void *)-1); ((port)->ports.tqe_next
) = ((void *)-1); } while (0)
;
1766 if (port->flags & QLA_PORT_FLAG_IS_TARGET1) {
1767 scsi_detach_target(sc->sc_scsibus,
1768 port->loopid, DETACH_FORCE0x01 |
1769 DETACH_QUIET0x02);
1770 }
1771 sc->sc_targets[port->loopid] = NULL((void *)0);
1772 if (port->location & QLA_LOCATION_FABRIC(2 << 24))
1773 qla_fabric_plogo(sc, port);
1774
1775 free(port, M_DEVBUF2, sizeof *port);
1776 }
1777
1778 qla_update_done(sc, QLA_UPDATE_TASK_CLEAR_ALL0x00000001);
1779 continue;
1780 }
1781
1782 if (sc->sc_update_tasks & QLA_UPDATE_TASK_SOFTRESET0x00000002) {
5
Assuming the condition is false
6
Taking false branch
1783 /* what no */
1784 qla_update_done(sc, QLA_UPDATE_TASK_SOFTRESET0x00000002);
1785 continue;
1786 }
1787
1788 if (sc->sc_update_tasks & QLA_UPDATE_TASK_UPDATE_TOPO0x00000004) {
7
Assuming the condition is false
8
Taking false branch
1789 DPRINTF(QLA_D_PORT, "%s: updating topology\n",
1790 DEVNAME(sc));
1791 qla_update_topology(sc);
1792 qla_update_done(sc, QLA_UPDATE_TASK_UPDATE_TOPO0x00000004);
1793 continue;
1794 }
1795
1796 if (sc->sc_update_tasks & QLA_UPDATE_TASK_GET_PORT_LIST0x00000008) {
9
Assuming the condition is false
10
Taking false branch
1797 DPRINTF(QLA_D_PORT, "%s: getting port name list\n",
1798 DEVNAME(sc));
1799 mtx_enter(&sc->sc_port_mtx);
1800 qla_clear_port_lists(sc);
1801 mtx_leave(&sc->sc_port_mtx);
1802
1803 qla_get_port_name_list(sc, QLA_LOCATION_LOOP(1 << 24) |
1804 QLA_LOCATION_FABRIC(2 << 24));
1805 mtx_enter(&sc->sc_port_mtx);
1806 TAILQ_FOREACH(port, &sc->sc_ports, ports)for((port) = ((&sc->sc_ports)->tqh_first); (port) !=
((void *)0); (port) = ((port)->ports.tqe_next))
{
1807 TAILQ_INSERT_TAIL(&sc->sc_ports_gone, port,do { (port)->update.tqe_next = ((void *)0); (port)->update
.tqe_prev = (&sc->sc_ports_gone)->tqh_last; *(&
sc->sc_ports_gone)->tqh_last = (port); (&sc->sc_ports_gone
)->tqh_last = &(port)->update.tqe_next; } while (0)
1808 update)do { (port)->update.tqe_next = ((void *)0); (port)->update
.tqe_prev = (&sc->sc_ports_gone)->tqh_last; *(&
sc->sc_ports_gone)->tqh_last = (port); (&sc->sc_ports_gone
)->tqh_last = &(port)->update.tqe_next; } while (0)
;
1809 if (port->location & QLA_LOCATION_FABRIC(2 << 24)) {
1810 port->flags |=
1811 QLA_PORT_FLAG_NEEDS_LOGIN2;
1812 }
1813 }
1814
1815 /* take care of ports that haven't changed first */
1816 TAILQ_FOREACH(fport, &sc->sc_ports_found, update)for((fport) = ((&sc->sc_ports_found)->tqh_first); (
fport) != ((void *)0); (fport) = ((fport)->update.tqe_next
))
{
1817 port = sc->sc_targets[fport->loopid];
1818 if (port == NULL((void *)0) || fport->port_name !=
1819 port->port_name) {
1820 /* new or changed port, handled later */
1821 continue;
1822 }
1823
1824 /*
1825 * the port hasn't been logged out, which
1826 * means we don't need to log in again, and,
1827 * for loop ports, that the port still exists.
1828 */
1829 port->flags &= ~QLA_PORT_FLAG_NEEDS_LOGIN2;
1830 if (port->location & QLA_LOCATION_LOOP(1 << 24))
1831 TAILQ_REMOVE(&sc->sc_ports_gone,do { if (((port)->update.tqe_next) != ((void *)0)) (port)->
update.tqe_next->update.tqe_prev = (port)->update.tqe_prev
; else (&sc->sc_ports_gone)->tqh_last = (port)->
update.tqe_prev; *(port)->update.tqe_prev = (port)->update
.tqe_next; ((port)->update.tqe_prev) = ((void *)-1); ((port
)->update.tqe_next) = ((void *)-1); } while (0)
1832 port, update)do { if (((port)->update.tqe_next) != ((void *)0)) (port)->
update.tqe_next->update.tqe_prev = (port)->update.tqe_prev
; else (&sc->sc_ports_gone)->tqh_last = (port)->
update.tqe_prev; *(port)->update.tqe_prev = (port)->update
.tqe_next; ((port)->update.tqe_prev) = ((void *)-1); ((port
)->update.tqe_next) = ((void *)-1); } while (0)
;
1833
1834 fport->location = 0;
1835 }
1836 mtx_leave(&sc->sc_port_mtx);
1837 qla_update_start(sc, QLA_UPDATE_TASK_PORT_LIST0x00000010);
1838 qla_update_done(sc, QLA_UPDATE_TASK_GET_PORT_LIST0x00000008);
1839 continue;
1840 }
1841
1842 if (sc->sc_update_tasks & QLA_UPDATE_TASK_PORT_LIST0x00000010) {
11
Assuming the condition is false
12
Taking false branch
1843 mtx_enter(&sc->sc_port_mtx);
1844 fport = TAILQ_FIRST(&sc->sc_ports_found)((&sc->sc_ports_found)->tqh_first);
1845 if (fport != NULL((void *)0)) {
1846 TAILQ_REMOVE(&sc->sc_ports_found, fport,do { if (((fport)->update.tqe_next) != ((void *)0)) (fport
)->update.tqe_next->update.tqe_prev = (fport)->update
.tqe_prev; else (&sc->sc_ports_found)->tqh_last = (
fport)->update.tqe_prev; *(fport)->update.tqe_prev = (fport
)->update.tqe_next; ((fport)->update.tqe_prev) = ((void
*)-1); ((fport)->update.tqe_next) = ((void *)-1); } while
(0)
1847 update)do { if (((fport)->update.tqe_next) != ((void *)0)) (fport
)->update.tqe_next->update.tqe_prev = (fport)->update
.tqe_prev; else (&sc->sc_ports_found)->tqh_last = (
fport)->update.tqe_prev; *(fport)->update.tqe_prev = (fport
)->update.tqe_next; ((fport)->update.tqe_prev) = ((void
*)-1); ((fport)->update.tqe_next) = ((void *)-1); } while
(0)
;
1848 }
1849 mtx_leave(&sc->sc_port_mtx);
1850
1851 if (fport == NULL((void *)0)) {
1852 DPRINTF(QLA_D_PORT, "%s: done with ports\n",
1853 DEVNAME(sc));
1854 qla_update_done(sc,
1855 QLA_UPDATE_TASK_PORT_LIST0x00000010);
1856 qla_update_start(sc,
1857 QLA_UPDATE_TASK_SCAN_FABRIC0x00000020);
1858 } else if (fport->location & QLA_LOCATION_LOOP(1 << 24)) {
1859 DPRINTF(QLA_D_PORT, "%s: loop port %d\n",
1860 DEVNAME(sc), fport->loopid);
1861 if (qla_add_loop_port(sc, fport) != 0)
1862 free(fport, M_DEVBUF2, sizeof *fport);
1863 } else if (fport->location & QLA_LOCATION_FABRIC(2 << 24)) {
1864 qla_add_fabric_port(sc, fport);
1865 } else {
1866 /* already processed */
1867 free(fport, M_DEVBUF2, sizeof *fport);
1868 }
1869 continue;
1870 }
1871
1872 if (sc->sc_update_tasks & QLA_UPDATE_TASK_SCAN_FABRIC0x00000020) {
13
Assuming the condition is false
14
Taking false branch
1873 DPRINTF(QLA_D_PORT, "%s: starting fabric scan\n",
1874 DEVNAME(sc));
1875 lastport = sc->sc_port_id;
1876 firstport = 0xffffffff;
1877 if (qla_update_fabric(sc))
1878 qla_update_start(sc,
1879 QLA_UPDATE_TASK_SCANNING_FABRIC0x00000040);
1880 qla_update_done(sc, QLA_UPDATE_TASK_SCAN_FABRIC0x00000020);
1881 continue;
1882 }
1883
1884 if (sc->sc_update_tasks & QLA_UPDATE_TASK_SCANNING_FABRIC0x00000040) {
15
Assuming the condition is true
16
Taking true branch
1885 fport = qla_next_fabric_port(sc, &firstport, &lastport);
17
Calling 'qla_next_fabric_port'
1886 if (fport != NULL((void *)0)) {
1887 int disp;
1888
1889 mtx_enter(&sc->sc_port_mtx);
1890 disp = qla_classify_port(sc, fport->location,
1891 fport->port_name, fport->node_name, &port);
1892 switch (disp) {
1893 case QLA_PORT_DISP_CHANGED:
1894 case QLA_PORT_DISP_MOVED:
1895 /* we'll log out the old port later */
1896 case QLA_PORT_DISP_NEW:
1897 DPRINTF(QLA_D_PORT, "%s: new port "
1898 "%06x\n", DEVNAME(sc),
1899 fport->portid);
1900 TAILQ_INSERT_TAIL(&sc->sc_ports_found,do { (fport)->update.tqe_next = ((void *)0); (fport)->update
.tqe_prev = (&sc->sc_ports_found)->tqh_last; *(&
sc->sc_ports_found)->tqh_last = (fport); (&sc->sc_ports_found
)->tqh_last = &(fport)->update.tqe_next; } while (0
)
1901 fport, update)do { (fport)->update.tqe_next = ((void *)0); (fport)->update
.tqe_prev = (&sc->sc_ports_found)->tqh_last; *(&
sc->sc_ports_found)->tqh_last = (fport); (&sc->sc_ports_found
)->tqh_last = &(fport)->update.tqe_next; } while (0
)
;
1902 break;
1903 case QLA_PORT_DISP_DUP:
1904 free(fport, M_DEVBUF2, sizeof *fport);
1905 break;
1906 case QLA_PORT_DISP_SAME:
1907 DPRINTF(QLA_D_PORT, "%s: existing port"
1908 " %06x\n", DEVNAME(sc),
1909 fport->portid);
1910 TAILQ_REMOVE(&sc->sc_ports_gone, port,do { if (((port)->update.tqe_next) != ((void *)0)) (port)->
update.tqe_next->update.tqe_prev = (port)->update.tqe_prev
; else (&sc->sc_ports_gone)->tqh_last = (port)->
update.tqe_prev; *(port)->update.tqe_prev = (port)->update
.tqe_next; ((port)->update.tqe_prev) = ((void *)-1); ((port
)->update.tqe_next) = ((void *)-1); } while (0)
1911 update)do { if (((port)->update.tqe_next) != ((void *)0)) (port)->
update.tqe_next->update.tqe_prev = (port)->update.tqe_prev
; else (&sc->sc_ports_gone)->tqh_last = (port)->
update.tqe_prev; *(port)->update.tqe_prev = (port)->update
.tqe_next; ((port)->update.tqe_prev) = ((void *)-1); ((port
)->update.tqe_next) = ((void *)-1); } while (0)
;
1912 free(fport, M_DEVBUF2, sizeof *fport);
1913 break;
1914 }
1915 mtx_leave(&sc->sc_port_mtx);
1916 }
1917 if (lastport == 0xffffffff) {
1918 DPRINTF(QLA_D_PORT, "%s: finished\n",
1919 DEVNAME(sc));
1920 qla_update_done(sc,
1921 QLA_UPDATE_TASK_SCANNING_FABRIC0x00000040);
1922 qla_update_start(sc,
1923 QLA_UPDATE_TASK_FABRIC_LOGIN0x00000080);
1924 }
1925 continue;
1926 }
1927
1928 if (sc->sc_update_tasks & QLA_UPDATE_TASK_FABRIC_LOGIN0x00000080) {
1929 mtx_enter(&sc->sc_port_mtx);
1930 port = TAILQ_FIRST(&sc->sc_ports_found)((&sc->sc_ports_found)->tqh_first);
1931 if (port != NULL((void *)0)) {
1932 TAILQ_REMOVE(&sc->sc_ports_found, port, update)do { if (((port)->update.tqe_next) != ((void *)0)) (port)->
update.tqe_next->update.tqe_prev = (port)->update.tqe_prev
; else (&sc->sc_ports_found)->tqh_last = (port)->
update.tqe_prev; *(port)->update.tqe_prev = (port)->update
.tqe_next; ((port)->update.tqe_prev) = ((void *)-1); ((port
)->update.tqe_next) = ((void *)-1); } while (0)
;
1933 }
1934 mtx_leave(&sc->sc_port_mtx);
1935
1936 if (port != NULL((void *)0)) {
1937 DPRINTF(QLA_D_PORT, "%s: found port %06x\n",
1938 DEVNAME(sc), port->portid);
1939 if (qla_fabric_plogi(sc, port) == 0) {
1940 qla_add_fabric_port(sc, port);
1941 } else {
1942 free(port, M_DEVBUF2, sizeof *port);
1943 }
1944 } else {
1945 DPRINTF(QLA_D_PORT, "%s: done with logins\n",
1946 DEVNAME(sc));
1947 qla_update_done(sc,
1948 QLA_UPDATE_TASK_FABRIC_LOGIN0x00000080);
1949 qla_update_start(sc,
1950 QLA_UPDATE_TASK_ATTACH_TARGET0x00000400 |
1951 QLA_UPDATE_TASK_DETACH_TARGET0x00000200);
1952 }
1953 continue;
1954 }
1955
1956 if (sc->sc_update_tasks & QLA_UPDATE_TASK_FABRIC_RELOGIN0x00000100) {
1957 TAILQ_FOREACH(port, &sc->sc_ports, ports)for((port) = ((&sc->sc_ports)->tqh_first); (port) !=
((void *)0); (port) = ((port)->ports.tqe_next))
{
1958 if (port->flags & QLA_PORT_FLAG_NEEDS_LOGIN2) {
1959 qla_fabric_plogi(sc, port);
1960 break;
1961 }
1962 }
1963
1964 if (port == NULL((void *)0))
1965 qla_update_done(sc,
1966 QLA_UPDATE_TASK_FABRIC_RELOGIN0x00000100);
1967 continue;
1968 }
1969
1970 if (sc->sc_update_tasks & QLA_UPDATE_TASK_DETACH_TARGET0x00000200) {
1971 mtx_enter(&sc->sc_port_mtx);
1972 port = TAILQ_FIRST(&sc->sc_ports_gone)((&sc->sc_ports_gone)->tqh_first);
1973 if (port != NULL((void *)0)) {
1974 sc->sc_targets[port->loopid] = NULL((void *)0);
1975 TAILQ_REMOVE(&sc->sc_ports_gone, port, update)do { if (((port)->update.tqe_next) != ((void *)0)) (port)->
update.tqe_next->update.tqe_prev = (port)->update.tqe_prev
; else (&sc->sc_ports_gone)->tqh_last = (port)->
update.tqe_prev; *(port)->update.tqe_prev = (port)->update
.tqe_next; ((port)->update.tqe_prev) = ((void *)-1); ((port
)->update.tqe_next) = ((void *)-1); } while (0)
;
1976 TAILQ_REMOVE(&sc->sc_ports, port, ports)do { if (((port)->ports.tqe_next) != ((void *)0)) (port)->
ports.tqe_next->ports.tqe_prev = (port)->ports.tqe_prev
; else (&sc->sc_ports)->tqh_last = (port)->ports
.tqe_prev; *(port)->ports.tqe_prev = (port)->ports.tqe_next
; ((port)->ports.tqe_prev) = ((void *)-1); ((port)->ports
.tqe_next) = ((void *)-1); } while (0)
;
1977 }
1978 mtx_leave(&sc->sc_port_mtx);
1979
1980 if (port != NULL((void *)0)) {
1981 DPRINTF(QLA_D_PORT, "%s: detaching target %d\n",
1982 DEVNAME(sc), port->loopid);
1983 if (sc->sc_scsibus != NULL((void *)0))
1984 scsi_detach_target(sc->sc_scsibus,
1985 port->loopid, DETACH_FORCE0x01 |
1986 DETACH_QUIET0x02);
1987
1988 if (port->location & QLA_LOCATION_FABRIC(2 << 24))
1989 qla_fabric_plogo(sc, port);
1990
1991 free(port, M_DEVBUF2, sizeof *port);
1992 } else {
1993 qla_update_done(sc,
1994 QLA_UPDATE_TASK_DETACH_TARGET0x00000200);
1995 }
1996 continue;
1997 }
1998
1999 if (sc->sc_update_tasks & QLA_UPDATE_TASK_ATTACH_TARGET0x00000400) {
2000 mtx_enter(&sc->sc_port_mtx);
2001 port = TAILQ_FIRST(&sc->sc_ports_new)((&sc->sc_ports_new)->tqh_first);
2002 if (port != NULL((void *)0)) {
2003 TAILQ_REMOVE(&sc->sc_ports_new, port, update)do { if (((port)->update.tqe_next) != ((void *)0)) (port)->
update.tqe_next->update.tqe_prev = (port)->update.tqe_prev
; else (&sc->sc_ports_new)->tqh_last = (port)->update
.tqe_prev; *(port)->update.tqe_prev = (port)->update.tqe_next
; ((port)->update.tqe_prev) = ((void *)-1); ((port)->update
.tqe_next) = ((void *)-1); } while (0)
;
2004 TAILQ_INSERT_TAIL(&sc->sc_ports, port, ports)do { (port)->ports.tqe_next = ((void *)0); (port)->ports
.tqe_prev = (&sc->sc_ports)->tqh_last; *(&sc->
sc_ports)->tqh_last = (port); (&sc->sc_ports)->tqh_last
= &(port)->ports.tqe_next; } while (0)
;
2005 }
2006 mtx_leave(&sc->sc_port_mtx);
2007
2008 if (port != NULL((void *)0)) {
2009 if (sc->sc_scsibus != NULL((void *)0))
2010 scsi_probe_target(sc->sc_scsibus,
2011 port->loopid);
2012 } else {
2013 qla_update_done(sc,
2014 QLA_UPDATE_TASK_ATTACH_TARGET0x00000400);
2015 }
2016 continue;
2017 }
2018
2019 }
2020
2021 DPRINTF(QLA_D_PORT, "%s: done updating\n", DEVNAME(sc));
2022}
2023
2024int
2025qla_async(struct qla_softc *sc, u_int16_t info)
2026{
2027 u_int16_t id, exp;
2028
2029 switch (info) {
2030 case QLA_ASYNC_SYSTEM_ERROR0x8002:
2031 qla_update_start(sc, QLA_UPDATE_TASK_SOFTRESET0x00000002);
2032 break;
2033
2034 case QLA_ASYNC_REQ_XFER_ERROR0x8003:
2035 qla_update_start(sc, QLA_UPDATE_TASK_SOFTRESET0x00000002);
2036 break;
2037
2038 case QLA_ASYNC_RSP_XFER_ERROR0x8004:
2039 qla_update_start(sc, QLA_UPDATE_TASK_SOFTRESET0x00000002);
2040 break;
2041
2042 case QLA_ASYNC_LIP_OCCURRED0x8010:
2043 DPRINTF(QLA_D_PORT, "%s: lip occurred\n", DEVNAME(sc));
2044 break;
2045
2046 case QLA_ASYNC_LOOP_UP0x8011:
2047 DPRINTF(QLA_D_PORT, "%s: loop up\n", DEVNAME(sc));
2048 sc->sc_loop_up = 1;
2049 sc->sc_marker_required = 1;
2050 qla_update_start(sc, QLA_UPDATE_TASK_UPDATE_TOPO0x00000004 |
2051 QLA_UPDATE_TASK_GET_PORT_LIST0x00000008);
2052 break;
2053
2054 case QLA_ASYNC_LOOP_DOWN0x8012:
2055 DPRINTF(QLA_D_PORT, "%s: loop down\n", DEVNAME(sc));
2056 sc->sc_loop_up = 0;
2057 qla_update_start(sc, QLA_UPDATE_TASK_CLEAR_ALL0x00000001);
2058 break;
2059
2060 case QLA_ASYNC_LIP_RESET0x8013:
2061 DPRINTF(QLA_D_PORT, "%s: lip reset\n", DEVNAME(sc));
2062 sc->sc_marker_required = 1;
2063 qla_update_start(sc, QLA_UPDATE_TASK_FABRIC_RELOGIN0x00000100);
2064 break;
2065
2066 case QLA_ASYNC_PORT_DB_CHANGE0x8014:
2067 DPRINTF(QLA_D_PORT, "%s: port db changed %x\n", DEVNAME(sc),
2068 qla_read_mbox(sc, 1));
2069 qla_update_start(sc, QLA_UPDATE_TASK_GET_PORT_LIST0x00000008);
2070 break;
2071
2072 case QLA_ASYNC_CHANGE_NOTIFY0x8015:
2073 DPRINTF(QLA_D_PORT, "%s: name server change (%02x:%02x)\n",
2074 DEVNAME(sc), qla_read_mbox(sc, 1), qla_read_mbox(sc, 2));
2075 qla_update_start(sc, QLA_UPDATE_TASK_GET_PORT_LIST0x00000008);
2076 break;
2077
2078 case QLA_ASYNC_LIP_F80x8016:
2079 DPRINTF(QLA_D_PORT, "%s: lip f8\n", DEVNAME(sc));
2080 break;
2081
2082 case QLA_ASYNC_LOOP_INIT_ERROR0x8017:
2083 DPRINTF(QLA_D_PORT, "%s: loop initialization error: %x\n",
2084 DEVNAME(sc), qla_read_mbox(sc, 1));
2085 break;
2086
2087 case QLA_ASYNC_LOGIN_REJECT0x8018:
2088 id = qla_read_mbox(sc, 1);
2089 exp = qla_read_mbox(sc, 2);
2090 DPRINTF(QLA_D_PORT, "%s: login reject from %x (reason %d,"
2091 " explanation %x)\n", DEVNAME(sc), id >> 8, id & 0xff, exp);
2092 break;
2093
2094 case QLA_ASYNC_SCSI_CMD_COMPLETE0x8020:
2095 /* shouldn't happen, we disable fast posting */
2096 break;
2097
2098 case QLA_ASYNC_CTIO_COMPLETE0x8021:
2099 /* definitely shouldn't happen, we don't do target mode */
2100 break;
2101
2102 case QLA_ASYNC_POINT_TO_POINT0x8030:
2103 DPRINTF(QLA_D_PORT, "%s: connected in point-to-point mode\n",
2104 DEVNAME(sc));
2105 /* we get stuck handling these if we have the wrong loop
2106 * topology; should somehow reinit with different things
2107 * somehow.
2108 */
2109 break;
2110
2111 case QLA_ASYNC_ZIO_RESP_UPDATE0x8040:
2112 /* shouldn't happen, we don't do zio */
2113 break;
2114
2115 case QLA_ASYNC_RND_ERROR0x8048:
2116 /* do nothing? */
2117 break;
2118
2119 case QLA_ASYNC_QUEUE_FULL0x8049:
2120 break;
2121
2122 default:
2123 DPRINTF(QLA_D_INTR, "%s: unknown async %x\n", DEVNAME(sc),
2124 info);
2125 break;
2126 }
2127 return (1);
2128}
2129
2130void
2131qla_dump_iocb(struct qla_softc *sc, void *buf)
2132{
2133#ifdef QLA_DEBUG
2134 u_int8_t *iocb = buf;
2135 int l;
2136 int b;
2137
2138 if ((qladebug & QLA_D_IOCB) == 0)
2139 return;
2140
2141 printf("%s: iocb:\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2142 for (l = 0; l < 4; l++) {
2143 for (b = 0; b < 16; b++) {
2144 printf(" %2.2x", iocb[(l*16)+b]);
2145 }
2146 printf("\n");
2147 }
2148#endif
2149}
2150
2151void
2152qla_dump_iocb_segs(struct qla_softc *sc, void *segs, int n)
2153{
2154#ifdef QLA_DEBUG
2155 u_int8_t *buf = segs;
2156 int s, b;
2157 if ((qladebug & QLA_D_IOCB) == 0)
2158 return;
2159
2160 printf("%s: iocb segs:\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2161 for (s = 0; s < n; s++) {
2162 for (b = 0; b < sizeof(struct qla_iocb_seg); b++) {
2163 printf(" %2.2x", buf[(s*(sizeof(struct qla_iocb_seg)))
2164 + b]);
2165 }
2166 printf("\n");
2167 }
2168#endif
2169}
2170
2171void
2172qla_put_marker(struct qla_softc *sc, void *buf)
2173{
2174 struct qla_iocb_marker *marker = buf;
2175
2176 marker->entry_type = QLA_IOCB_MARKER0x04;
2177 marker->entry_count = 1;
2178 marker->seqno = 0;
2179 marker->flags = 0;
2180
2181 /* could be more specific here; isp(4) isn't */
2182 marker->target = 0;
2183 marker->modifier = QLA_IOCB_MARKER_SYNC_ALL2;
2184 qla_dump_iocb(sc, buf);
2185}
2186
2187static inline void
2188qla_put_data_seg(struct qla_iocb_seg *seg, bus_dmamap_t dmap, int num)
2189{
2190 uint64_t addr = dmap->dm_segs[num].ds_addr;
2191
2192 htolem32(&seg->seg_addr_lo, addr)(*(__uint32_t *)(&seg->seg_addr_lo) = ((__uint32_t)(addr
)))
;
2193 htolem32(&seg->seg_addr_hi, addr >> 32)(*(__uint32_t *)(&seg->seg_addr_hi) = ((__uint32_t)(addr
>> 32)))
;
2194 htolem32(&seg->seg_len, dmap->dm_segs[num].ds_len)(*(__uint32_t *)(&seg->seg_len) = ((__uint32_t)(dmap->
dm_segs[num].ds_len)))
;
2195}
2196
2197void
2198qla_put_cmd(struct qla_softc *sc, void *buf, struct scsi_xfer *xs,
2199 struct qla_ccb *ccb)
2200{
2201 struct qla_iocb_req34 *req = buf;
2202 u_int16_t dir;
2203 int seg;
2204 int target = xs->sc_link->target;
2205
2206 req->seqno = 0;
2207 req->flags = 0;
2208 req->entry_count = 1;
2209
2210 if (xs->datalen == 0) {
2211 dir = QLA_IOCB_CMD_NO_DATA0x0000;
2212 req->req_seg_count = 0;
2213 req->entry_type = QLA_IOCB_CMD_TYPE_30x19;
2214 } else {
2215 dir = xs->flags & SCSI_DATA_IN0x00800 ? QLA_IOCB_CMD_READ_DATA0x0020 :
2216 QLA_IOCB_CMD_WRITE_DATA0x0040;
2217 htolem16(&req->req_seg_count, ccb->ccb_dmamap->dm_nsegs)(*(__uint16_t *)(&req->req_seg_count) = ((__uint16_t)(
ccb->ccb_dmamap->dm_nsegs)))
;
2218 if (ccb->ccb_dmamap->dm_nsegs > QLA_IOCB_SEGS_PER_CMD2) {
2219 req->entry_type = QLA_IOCB_CMD_TYPE_40x15;
2220 for (seg = 0; seg < ccb->ccb_dmamap->dm_nsegs; seg++) {
2221 qla_put_data_seg(&ccb->ccb_t4segs[seg],
2222 ccb->ccb_dmamap, seg);
2223 }
2224 req->req_type.req4.req4_seg_type = htole16(1)((__uint16_t)(1));
2225 req->req_type.req4.req4_seg_base = 0;
2226 req->req_type.req4.req4_seg_addr = ccb->ccb_seg_dva;
2227 memset(req->req_type.req4.req4_reserved, 0,__builtin_memset((req->req_type.req4.req4_reserved), (0), (
sizeof(req->req_type.req4.req4_reserved)))
2228 sizeof(req->req_type.req4.req4_reserved))__builtin_memset((req->req_type.req4.req4_reserved), (0), (
sizeof(req->req_type.req4.req4_reserved)))
;
2229 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (sizeof
(*ccb->ccb_t4segs) * ccb->ccb_dmamap->dm_nsegs), (0x04
))
2230 QLA_DMA_MAP(sc->sc_segments), ccb->ccb_seg_offset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (sizeof
(*ccb->ccb_t4segs) * ccb->ccb_dmamap->dm_nsegs), (0x04
))
2231 sizeof(*ccb->ccb_t4segs) * ccb->ccb_dmamap->dm_nsegs,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (sizeof
(*ccb->ccb_t4segs) * ccb->ccb_dmamap->dm_nsegs), (0x04
))
2232 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (sizeof
(*ccb->ccb_t4segs) * ccb->ccb_dmamap->dm_nsegs), (0x04
))
;
2233 } else {
2234 req->entry_type = QLA_IOCB_CMD_TYPE_30x19;
2235 for (seg = 0; seg < ccb->ccb_dmamap->dm_nsegs; seg++) {
2236 qla_put_data_seg(&req->req_type.req3_segs[seg],
2237 ccb->ccb_dmamap, seg);
2238 }
2239 }
2240 }
2241
2242 /* isp(4) uses head of queue for 'request sense' commands */
2243 htolem16(&req->req_flags, QLA_IOCB_CMD_SIMPLE_QUEUE | dir)(*(__uint16_t *)(&req->req_flags) = ((__uint16_t)(0x0008
| dir)))
;
2244
2245 /*
2246 * timeout is in seconds. make sure it's at least 1 if a timeout
2247 * was specified in xs
2248 */
2249 if (xs->timeout != 0)
2250 htolem16(&req->req_time, MAX(1, xs->timeout/1000))(*(__uint16_t *)(&req->req_time) = ((__uint16_t)((((1)
>(xs->timeout/1000))?(1):(xs->timeout/1000)))))
;
2251
2252 /* lun and target layout vary with firmware attributes */
2253 if (sc->sc_expanded_lun) {
2254 if (sc->sc_2k_logins) {
2255 req->req_target = htole16(target)((__uint16_t)(target));
2256 } else {
2257 req->req_target = htole16(target << 8)((__uint16_t)(target << 8));
2258 }
2259 req->req_scclun = htole16(xs->sc_link->lun)((__uint16_t)(xs->sc_link->lun));
2260 } else {
2261 req->req_target = htole16(target << 8 | xs->sc_link->lun)((__uint16_t)(target << 8 | xs->sc_link->lun));
2262 }
2263 memcpy(req->req_cdb, &xs->cmd, xs->cmdlen)__builtin_memcpy((req->req_cdb), (&xs->cmd), (xs->
cmdlen))
;
2264 req->req_totalcnt = htole32(xs->datalen)((__uint32_t)(xs->datalen));
2265
2266 req->req_handle = ccb->ccb_id;
2267
2268 qla_dump_iocb(sc, buf);
2269}
2270
2271int
2272qla_verify_firmware(struct qla_softc *sc, u_int16_t addr)
2273{
2274 sc->sc_mbox[0] = QLA_MBOX_VERIFY_CSUM0x0007;
2275 sc->sc_mbox[1] = addr;
2276 return (qla_mbox(sc, 0x0003));
2277}
2278
2279#ifndef ISP_NOFIRMWARE
2280int
2281qla_load_firmware_words(struct qla_softc *sc, const u_int16_t *src,
2282 u_int16_t dest)
2283{
2284 u_int16_t i;
2285
2286 for (i = 0; i < src[3]; i++) {
2287 sc->sc_mbox[0] = QLA_MBOX_WRITE_RAM_WORD0x0004;
2288 sc->sc_mbox[1] = i + dest;
2289 sc->sc_mbox[2] = src[i];
2290 if (qla_mbox(sc, 0x07)) {
2291 printf("firmware load failed\n");
2292 return (1);
2293 }
2294 }
2295
2296 return (qla_verify_firmware(sc, dest));
2297}
2298
2299int
2300qla_load_firmware_2100(struct qla_softc *sc)
2301{
2302 return qla_load_firmware_words(sc, isp_2100_risc_code,
2303 QLA_2100_CODE_ORG0x1000);
2304}
2305
2306int
2307qla_load_firmware_2200(struct qla_softc *sc)
2308{
2309 return qla_load_firmware_words(sc, isp_2200_risc_code,
2310 QLA_2200_CODE_ORG0x1000);
2311}
2312
2313int
2314qla_load_fwchunk_2300(struct qla_softc *sc, struct qla_dmamem *mem,
2315 const u_int16_t *src, u_int32_t dest)
2316{
2317 u_int16_t origin, done, total;
2318 int i;
2319
2320 origin = dest;
2321 done = 0;
2322 total = src[3];
2323
2324 while (done < total) {
2325 u_int16_t *copy;
2326 u_int32_t words;
2327
2328 /* limit transfer size otherwise it just doesn't work */
2329 words = MIN(total - done, 1 << 10)(((total - done)<(1 << 10))?(total - done):(1 <<
10))
;
2330 copy = QLA_DMA_KVA(mem)((void *)(mem)->qdm_kva);
2331 for (i = 0; i < words; i++) {
2332 copy[i] = htole16(src[done++])((__uint16_t)(src[done++]));
2333 }
2334 bus_dmamap_sync(sc->sc_dmat, QLA_DMA_MAP(mem), 0, words * 2,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (words * 2), (0x04))
2335 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (words * 2), (0x04))
;
2336
2337 sc->sc_mbox[0] = QLA_MBOX_LOAD_RAM_EXT0x000B;
2338 sc->sc_mbox[1] = dest;
2339 sc->sc_mbox[4] = words;
2340 sc->sc_mbox[8] = dest >> 16;
2341 qla_mbox_putaddr(sc->sc_mbox, mem);
2342 if (qla_mbox(sc, 0x01ff)) {
2343 printf("firmware load failed\n");
2344 return (1);
2345 }
2346 bus_dmamap_sync(sc->sc_dmat, QLA_DMA_MAP(mem), 0, words * 2,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (words * 2), (0x08))
2347 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (words * 2), (0x08))
;
2348
2349 dest += words;
2350 }
2351
2352 return (qla_verify_firmware(sc, origin));
2353}
2354
2355int
2356qla_load_firmware_2300(struct qla_softc *sc)
2357{
2358 struct qla_dmamem *mem;
2359 const u_int16_t *fw = isp_2300_risc_code;
2360 int rv;
2361
2362 mem = qla_dmamem_alloc(sc, 65536);
2363 rv = qla_load_fwchunk_2300(sc, mem, fw, QLA_2300_CODE_ORG0x0800);
2364 qla_dmamem_free(sc, mem);
2365
2366 return (rv);
2367}
2368
2369int
2370qla_load_firmware_2322(struct qla_softc *sc)
2371{
2372 /* we don't have the 2322 firmware image yet */
2373#if 0
2374 struct qla_dmamem *mem;
2375 const u_int16_t *fw = isp_2322_risc_code;
2376 u_int32_t addr;
2377 int i;
2378
2379 mem = qla_dmamem_alloc(sc, 65536);
2380 if (qla_load_fwchunk_2300(sc, mem, fw, QLA_2300_CODE_ORG0x0800)) {
2381 qla_dmamem_free(sc, mem);
2382 return (1);
2383 }
2384
2385 for (i = 0; i < 2; i++) {
2386 fw += fw[3];
2387 addr = fw[5] | ((fw[4] & 0x3f) << 16);
2388 if (qla_load_fwchunk_2300(sc, mem, fw, addr)) {
2389 qla_dmamem_free(sc, mem);
2390 return (1);
2391 }
2392 }
2393
2394 qla_dmamem_free(sc, mem);
2395#endif
2396 return (0);
2397}
2398
2399#endif /* !ISP_NOFIRMWARE */
2400
2401int
2402qla_read_nvram(struct qla_softc *sc)
2403{
2404 u_int16_t data[sizeof(sc->sc_nvram) >> 1];
2405 u_int16_t req, cmd, val;
2406 u_int8_t csum;
2407 int i, base, bit;
2408
2409 base = sc->sc_port * 0x80;
2410
2411 qla_write(sc, QLA_NVRAM0x0E, QLA_NVRAM_CHIP_SEL0x0002);
2412 delay(10)(*delay_func)(10);
2413 qla_write(sc, QLA_NVRAM0x0E, QLA_NVRAM_CHIP_SEL0x0002 | QLA_NVRAM_CLOCK0x0001);
2414 delay(10)(*delay_func)(10);
2415
2416 for (i = 0; i < nitems(data)(sizeof((data)) / sizeof((data)[0])); i++) {
2417 req = (i + base) | (QLA_NVRAM_CMD_READ6 << 8);
2418
2419 /* write each bit out through the nvram register */
2420 for (bit = 10; bit >= 0; bit--) {
2421 cmd = QLA_NVRAM_CHIP_SEL0x0002;
2422 if ((req >> bit) & 1) {
2423 cmd |= QLA_NVRAM_DATA_OUT0x0004;
2424 }
2425 qla_write(sc, QLA_NVRAM0x0E, cmd);
2426 delay(10)(*delay_func)(10);
2427 qla_read(sc, QLA_NVRAM0x0E);
2428
2429 qla_write(sc, QLA_NVRAM0x0E, cmd | QLA_NVRAM_CLOCK0x0001);
2430 delay(10)(*delay_func)(10);
2431 qla_read(sc, QLA_NVRAM0x0E);
2432
2433 qla_write(sc, QLA_NVRAM0x0E, cmd);
2434 delay(10)(*delay_func)(10);
2435 qla_read(sc, QLA_NVRAM0x0E);
2436 }
2437
2438 /* read the result back */
2439 val = 0;
2440 for (bit = 0; bit < 16; bit++) {
2441 val <<= 1;
2442 qla_write(sc, QLA_NVRAM0x0E, QLA_NVRAM_CHIP_SEL0x0002 |
2443 QLA_NVRAM_CLOCK0x0001);
2444 delay(10)(*delay_func)(10);
2445 if (qla_read(sc, QLA_NVRAM0x0E) & QLA_NVRAM_DATA_IN0x0008)
2446 val |= 1;
2447 delay(10)(*delay_func)(10);
2448
2449 qla_write(sc, QLA_NVRAM0x0E, QLA_NVRAM_CHIP_SEL0x0002);
2450 delay(10)(*delay_func)(10);
2451 qla_read(sc, QLA_NVRAM0x0E);
2452 }
2453
2454 qla_write(sc, QLA_NVRAM0x0E, 0);
2455 delay(10)(*delay_func)(10);
2456 qla_read(sc, QLA_NVRAM0x0E);
2457
2458 data[i] = letoh16(val)((__uint16_t)(val));
2459 }
2460
2461 csum = 0;
2462 for (i = 0; i < nitems(data)(sizeof((data)) / sizeof((data)[0])); i++) {
2463 csum += data[i] & 0xff;
2464 csum += data[i] >> 8;
2465 }
2466
2467 memcpy(&sc->sc_nvram, data, sizeof(sc->sc_nvram))__builtin_memcpy((&sc->sc_nvram), (data), (sizeof(sc->
sc_nvram)))
;
2468 /* id field should be 'ISP ', version should be at least 1 */
2469 if (sc->sc_nvram.id[0] != 'I' || sc->sc_nvram.id[1] != 'S' ||
2470 sc->sc_nvram.id[2] != 'P' || sc->sc_nvram.id[3] != ' ' ||
2471 sc->sc_nvram.nvram_version < 1 || (csum != 0)) {
2472 /*
2473 * onboard 2200s on Sun hardware don't have an nvram
2474 * fitted, but will provide us with node and port name
2475 * through Open Firmware; don't complain in that case.
2476 */
2477 if (sc->sc_node_name == 0 || sc->sc_port_name == 0)
2478 printf("%s: nvram corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2479 return (1);
2480 }
2481 return (0);
2482}
2483
2484struct qla_dmamem *
2485qla_dmamem_alloc(struct qla_softc *sc, size_t size)
2486{
2487 struct qla_dmamem *m;
2488 int nsegs;
2489
2490 m = malloc(sizeof(*m), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2491 if (m == NULL((void *)0))
2492 return (NULL((void *)0));
2493
2494 m->qdm_size = size;
2495
2496 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0001 | 0x0002), (&m->qdm_map))
2497 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->qdm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0001 | 0x0002), (&m->qdm_map))
!= 0)
2498 goto qdmfree;
2499
2500 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->qdm_seg, 1,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&m->qdm_seg), (1), (&nsegs
), (0x0001 | 0x1000))
2501 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&m->qdm_seg), (1), (&nsegs
), (0x0001 | 0x1000))
!= 0)
2502 goto destroy;
2503
2504 if (bus_dmamem_map(sc->sc_dmat, &m->qdm_seg, nsegs, size, &m->qdm_kva,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&m
->qdm_seg), (nsegs), (size), (&m->qdm_kva), (0x0001
))
2505 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&m
->qdm_seg), (nsegs), (size), (&m->qdm_kva), (0x0001
))
!= 0)
2506 goto free;
2507
2508 if (bus_dmamap_load(sc->sc_dmat, m->qdm_map, m->qdm_kva, size, NULL,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (m->
qdm_map), (m->qdm_kva), (size), (((void *)0)), (0x0001))
2509 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (m->
qdm_map), (m->qdm_kva), (size), (((void *)0)), (0x0001))
!= 0)
2510 goto unmap;
2511
2512 return (m);
2513
2514unmap:
2515 bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (m->
qdm_kva), (m->qdm_size))
;
2516free:
2517 bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
m->qdm_seg), (1))
;
2518destroy:
2519 bus_dmamap_destroy(sc->sc_dmat, m->qdm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (m->
qdm_map))
;
2520qdmfree:
2521 free(m, M_DEVBUF2, sizeof(*m));
2522
2523 return (NULL((void *)0));
2524}
2525
2526void
2527qla_dmamem_free(struct qla_softc *sc, struct qla_dmamem *m)
2528{
2529 bus_dmamap_unload(sc->sc_dmat, m->qdm_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (m->
qdm_map))
;
2530 bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (m->
qdm_kva), (m->qdm_size))
;
2531 bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
m->qdm_seg), (1))
;
2532 bus_dmamap_destroy(sc->sc_dmat, m->qdm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (m->
qdm_map))
;
2533 free(m, M_DEVBUF2, sizeof(*m));
2534}
2535
2536int
2537qla_alloc_ccbs(struct qla_softc *sc)
2538{
2539 struct qla_ccb *ccb;
2540 u_int8_t *cmd;
2541 int i;
2542
2543 SIMPLEQ_INIT(&sc->sc_ccb_free)do { (&sc->sc_ccb_free)->sqh_first = ((void *)0); (
&sc->sc_ccb_free)->sqh_last = &(&sc->sc_ccb_free
)->sqh_first; } while (0)
;
2544 mtx_init(&sc->sc_ccb_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_ccb_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9) ?
0x9 : ((0x6)))); } while (0)
;
2545 mtx_init(&sc->sc_queue_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_queue_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9
) ? 0x9 : ((0x6)))); } while (0)
;
2546 mtx_init(&sc->sc_port_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_port_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9)
? 0x9 : ((0x6)))); } while (0)
;
2547 mtx_init(&sc->sc_mbox_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_mbox_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9)
? 0x9 : ((0x6)))); } while (0)
;
2548
2549 sc->sc_ccbs = mallocarray(sc->sc_maxcmds, sizeof(struct qla_ccb),
2550 M_DEVBUF2, M_WAITOK0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008);
2551 if (sc->sc_ccbs == NULL((void *)0)) {
2552 printf("%s: unable to allocate ccbs\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2553 return (1);
2554 }
2555
2556 sc->sc_requests = qla_dmamem_alloc(sc, sc->sc_maxcmds *
2557 QLA_QUEUE_ENTRY_SIZE64);
2558 if (sc->sc_requests == NULL((void *)0)) {
2559 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2560 goto free_ccbs;
2561 }
2562 sc->sc_responses = qla_dmamem_alloc(sc, sc->sc_maxcmds *
2563 QLA_QUEUE_ENTRY_SIZE64);
2564 if (sc->sc_responses == NULL((void *)0)) {
2565 printf("%s: unable to allocate rcb dmamem\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2566 goto free_req;
2567 }
2568 sc->sc_segments = qla_dmamem_alloc(sc, sc->sc_maxcmds * QLA_MAX_SEGS16 *
2569 sizeof(struct qla_iocb_seg));
2570 if (sc->sc_segments == NULL((void *)0)) {
2571 printf("%s: unable to allocate iocb segments\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2572 goto free_res;
2573 }
2574
2575 cmd = QLA_DMA_KVA(sc->sc_requests)((void *)(sc->sc_requests)->qdm_kva);
2576 memset(cmd, 0, QLA_QUEUE_ENTRY_SIZE * sc->sc_maxcmds)__builtin_memset((cmd), (0), (64 * sc->sc_maxcmds));
2577 for (i = 0; i < sc->sc_maxcmds; i++) {
2578 ccb = &sc->sc_ccbs[i];
2579
2580 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (16), ((64 * 1024)), (0), (0x0001 | 0x0002), (&
ccb->ccb_dmamap))
2581 QLA_MAX_SEGS, MAXPHYS, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (16), ((64 * 1024)), (0), (0x0001 | 0x0002), (&
ccb->ccb_dmamap))
2582 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (16), ((64 * 1024)), (0), (0x0001 | 0x0002), (&
ccb->ccb_dmamap))
2583 &ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (16), ((64 * 1024)), (0), (0x0001 | 0x0002), (&
ccb->ccb_dmamap))
!= 0) {
2584 printf("%s: unable to create dma map\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2585 goto free_maps;
2586 }
2587
2588 ccb->ccb_sc = sc;
2589 ccb->ccb_id = i;
2590
2591 ccb->ccb_seg_offset = i * QLA_MAX_SEGS16 *
2592 sizeof(struct qla_iocb_seg);
2593 htolem64(&ccb->ccb_seg_dva,(*(__uint64_t *)(&ccb->ccb_seg_dva) = ((__uint64_t)(((
u_int64_t)(sc->sc_segments)->qdm_map->dm_segs[0].ds_addr
) + ccb->ccb_seg_offset)))
2594 QLA_DMA_DVA(sc->sc_segments) + ccb->ccb_seg_offset)(*(__uint64_t *)(&ccb->ccb_seg_dva) = ((__uint64_t)(((
u_int64_t)(sc->sc_segments)->qdm_map->dm_segs[0].ds_addr
) + ccb->ccb_seg_offset)))
;
2595 ccb->ccb_t4segs = QLA_DMA_KVA(sc->sc_segments)((void *)(sc->sc_segments)->qdm_kva) +
2596 ccb->ccb_seg_offset;
2597
2598 qla_put_ccb(sc, ccb);
2599 }
2600
2601 scsi_iopool_init(&sc->sc_iopool, sc, qla_get_ccb, qla_put_ccb);
2602 return (0);
2603
2604free_maps:
2605 while ((ccb = qla_get_ccb(sc)) != NULL((void *)0))
2606 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ccb
->ccb_dmamap))
;
2607
2608 qla_dmamem_free(sc, sc->sc_segments);
2609free_res:
2610 qla_dmamem_free(sc, sc->sc_responses);
2611free_req:
2612 qla_dmamem_free(sc, sc->sc_requests);
2613free_ccbs:
2614 free(sc->sc_ccbs, M_DEVBUF2, 0);
2615
2616 return (1);
2617}
2618
2619void
2620qla_free_ccbs(struct qla_softc *sc)
2621{
2622 struct qla_ccb *ccb;
2623
2624 scsi_iopool_destroy(&sc->sc_iopool);
2625 while ((ccb = qla_get_ccb(sc)) != NULL((void *)0))
2626 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ccb
->ccb_dmamap))
;
2627 qla_dmamem_free(sc, sc->sc_segments);
2628 qla_dmamem_free(sc, sc->sc_responses);
2629 qla_dmamem_free(sc, sc->sc_requests);
2630 free(sc->sc_ccbs, M_DEVBUF2, 0);
2631}
2632
2633void *
2634qla_get_ccb(void *xsc)
2635{
2636 struct qla_softc *sc = xsc;
2637 struct qla_ccb *ccb;
2638
2639 mtx_enter(&sc->sc_ccb_mtx);
2640 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free)((&sc->sc_ccb_free)->sqh_first);
2641 if (ccb != NULL((void *)0)) {
2642 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link)do { if (((&sc->sc_ccb_free)->sqh_first = (&sc->
sc_ccb_free)->sqh_first->ccb_link.sqe_next) == ((void *
)0)) (&sc->sc_ccb_free)->sqh_last = &(&sc->
sc_ccb_free)->sqh_first; } while (0)
;
2643 }
2644 mtx_leave(&sc->sc_ccb_mtx);
2645 return (ccb);
2646}
2647
2648void
2649qla_put_ccb(void *xsc, void *io)
2650{
2651 struct qla_softc *sc = xsc;
2652 struct qla_ccb *ccb = io;
2653
2654 ccb->ccb_xs = NULL((void *)0);
2655 mtx_enter(&sc->sc_ccb_mtx);
2656 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link)do { if (((ccb)->ccb_link.sqe_next = (&sc->sc_ccb_free
)->sqh_first) == ((void *)0)) (&sc->sc_ccb_free)->
sqh_last = &(ccb)->ccb_link.sqe_next; (&sc->sc_ccb_free
)->sqh_first = (ccb); } while (0)
;
2657 mtx_leave(&sc->sc_ccb_mtx);
2658}